query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
calls parse_sheet to each sheet in the given file
def parse_files(self): """ @param name: name of the file """ """ @type name: string """ df = pd.DataFrame() if not self.xlfnames: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source not found or cannot be open') logging.error('error happened: no excel files found') return False for fileName in self.xlfnames: try: xlfname = self.folder + '/' + fileName # xl = pd.ExcelFile(xlfname) except Exception as e: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source not found or cannot be open', e) return False try: # code for one file per sheet # for sheet in xl.sheet_names: # self.parse_sheet(xl,sheet) # code for one file for all for sheet in xl.sheet_names: df_tmp = self.parse_sheet_to_df(xl, sheet, df) df = df.append(df_tmp, ignore_index=True) except Exception as e: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected', e) return False return self.write_to_file(df)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_xlsx(self, filename):\n xlsx = pd.ExcelFile(filename)\n for sheet in xlsx.sheet_names:\n table_index_header = cfg.get_list(\"table_index_header\", sheet)\n self.input_data[sheet] = xlsx.parse(\n sheet,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in sheet),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def parse_xlsx_sheet(f, n=0):\n xl_file = pd.ExcelFile(f)\n dfs = xl_file.parse(xl_file.sheet_names[n], na_values=['n.a.', 'n.d.'])\n return dfs", "def parse_ra():\r\n fname = 'parse/File1.xlsx'\r\n sheet_name = 'Sheet1'\r\n\r\n ra_sheet = get_sheet(fname, sheet_name)\r\n return parse_sheet(ra_sheet)", "def parse_bloomberg_excel(filename, colnames_sheet, data_sheets, space=1,\n colnames=None, **kwargs):\n if isinstance(data_sheets, str):\n data_sheets = [data_sheets, ]\n flag_single_input = True\n else:\n flag_single_input = False\n\n # converter for date\n def converter(x):\n try:\n res = pd.to_datetime(x)\n except Exception:\n res = pd.NaT\n return res\n\n # colnames_sheet\n if colnames is None:\n colnames = pd.read_excel(filename, sheet_name=colnames_sheet, header=0)\n colnames = colnames.columns\n\n # float converters\n float_conv = {\n k: float for k in list(range(1, len(colnames)*(2+space), 2+space))\n }\n\n # if data_sheets is None, read in all sheets\n if data_sheets is None:\n data_dict_full = pd.read_excel(filename, sheet_name=None,\n converters=float_conv, verbose=True,\n **kwargs)\n\n # remove the sheet with colnames from this dict\n data_dict_full.pop(colnames_sheet, None)\n\n else:\n data_dict_full = pd.read_excel(filename, sheet_name=data_sheets,\n converters=float_conv, verbose=True,\n **kwargs)\n\n # loop over sheetnames\n all_data = dict()\n\n for s, data_df in data_dict_full.items():\n # loop over triplets, map dates, extract\n new_data_df = []\n for p in range((data_df.shape[1]+1)//(space+2)):\n # this triplet\n this_piece = data_df.iloc[1:, p*(space+2):(p+1)*(space+2)-space]\n\n # map date\n this_piece.iloc[:, 0] = this_piece.iloc[:, 0].map(converter)\n\n # drop nans (with dates)\n this_piece = this_piece.dropna()\n\n # extract date as index\n this_piece = this_piece.set_index(this_piece.columns[0])\n\n # rename\n this_piece.columns = [colnames[p]]\n this_piece.index.name = \"date\"\n\n # drop duplicates from the index\n this_piece = this_piece.loc[\n ~this_piece.index.duplicated(keep='first'), :]\n\n # store\n new_data_df += [this_piece, ]\n\n # concat\n all_data[s] = pd.concat(new_data_df, axis=1, join=\"outer\")\n\n # if only one sheet was asked for\n if flag_single_input:\n all_data = list(all_data.values())[0]\n\n return all_data", "def parse_xlsx(filename):\n from openpyxl import load_workbook\n\n workbook = load_workbook(filename=filename)\n worksheet = workbook.get_sheet_by_name(workbook.get_sheet_names()[0])\n row_it = worksheet.iter_rows()\n split_row_list = ([cell.value if cell.value is not None else \"\"\n for cell in row] for row in row_it)\n\n return parse_generic(split_row_list)", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def test_parse_sample_sheet(self):\n pass", "def process_xlsx(content):\n data = {}\n workbook = xlrd.open_workbook(file_contents=content)\n worksheets = [w for w in workbook.sheet_names() if not w.startswith('_')]\n for worksheet_name in worksheets:\n if worksheet_name.startswith('_'):\n continue\n\n worksheet = workbook.sheet_by_name(worksheet_name)\n\n merged_cells = worksheet.merged_cells\n if len(merged_cells):\n raise MergedCellError(worksheet.name, merged_cells)\n\n worksheet.name = slughifi(worksheet.name)\n headers = make_headers(worksheet)\n worksheet_data = make_worksheet_data(headers, worksheet)\n data[worksheet.name] = worksheet_data\n return data", "def __call__(self):\n if len(self.handle.sheet_names) > 1: self.multi_sheet()\n else: self.mono_sheet()", "def batch(self, xls):\n \n workbook = xlrd.open_workbook(xls)\n sheet = workbook.sheet_by_name(workbook.sheet_names()[0])\n self.headers = sheet.row(0)\n self.index = self._getHeaders([x.value for x in sheet.row(0)])\n self.__create_batch([self.parseRow(sheet.row(i)) \n for i in range(1, (sheet.nrows))])", "def parseSheet(self):\n self.log.info(\"Parsing {0} rows and {1} columns.\".format(self.rowns,self.colns))\n \n self.column_dimensions = {}\n self.property_dimensions = {}\n self.row_dimensions = {}\n self.rowhierarchy = {}\n\n # Get dictionary of annotations\n self.annotations = self.r_sheet.cell_note_map\n \n for i in range(0,self.rowns):\n self.rowhierarchy[i] = {}\n \n for j in range(0, self.colns):\n # Parse cell data\n self.source_cell = self.r_sheet.cell(i,j)\n self.source_cell_name = cellname(i,j)\n self.style = self.styles[self.source_cell].name\n self.cellType = self.getType(self.style)\n self.source_cell_qname = self.getQName(self.source_cell_name)\n \n self.log.debug(\"({},{}) {}/{}: \\\"{}\\\"\". format(i,j,self.cellType, self.source_cell_name, self.source_cell.value))\n\n # Try to parse ints to avoid ugly _0 URIs\n try:\n if int(self.source_cell.value) == self.source_cell.value:\n self.source_cell.value = int(self.source_cell.value)\n except ValueError:\n self.log.debug(\"(%s.%s) No parseable int\" % (i,j))\n\n \n # Parse annotation (if any)\n if self.config.get('annotations', 'enabled') == \"1\":\n if (i,j) in self.annotations:\n self.parseAnnotation(i, j)\n\n # Parse cell even if empty\n if self.cellType == 'Data':\n self.parseData(i, j)\n elif (self.cellType == 'HRowHeader') :\n self.updateRowHierarchy(i, j)\n elif self.cellType == 'ColHeader' :\n self.parseColHeader(i, j)\n elif self.cellType == 'RowProperty' :\n self.parseRowProperty(i, j)\n \n # If cell not empty, check for more types\n if not self.isEmpty(i,j) :\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],RDF.type,self.namespaces['tablink'][self.cellType]))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['cell'],Literal(self.source_cell_name)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['col'],Literal(colname(j))))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['row'],Literal(i+1)))\n #self.graph.add((self.namespaces['scope'][self.source_cell_qname] isrow row\n if self.cellType == 'Title' :\n self.parseTitle(i, j)\n \n elif self.cellType == 'RowHeader' :\n self.parseRowHeader(i, j)\n \n elif self.cellType == 'HRowHeader' :\n self.parseHierarchicalRowHeader(i, j)\n \n elif self.cellType == 'RowLabel' :\n self.parseRowLabel(i, j)\n \n # Add additional information about the hierarchy of column headers\n for value in self.column_dimensions.values():\n for index in range(1, len(value)):\n uri_sub = self.getColHeaderValueURI(value[:index+1])\n uri_top = self.getColHeaderValueURI(value[:index])\n self.graph.add((uri_sub, self.namespaces['tablink']['subColHeaderOf'], uri_top))\n self.graph.add((uri_sub, self.namespaces['tablink']['depth'], Literal(index)))\n self.graph.add((uri_top, self.namespaces['tablink']['depth'], Literal(index-1)))\n \n self.log.info(\"Done parsing...\")", "def multi_sheet(self):\n # Initialize #\n all_sheets = []\n # Loop #\n for name in self.handle.sheet_names:\n sheet = self.handle.parse(name)\n sheet.insert(0, \"nace\", name)\n all_sheets.append(sheet)\n # Write #\n df = pandas.concat(all_sheets)\n df.to_csv(str(self.dest), **self.kwargs)", "def __init__(self, infile, sheet, header=True, date_format=\"%Y-%m-%d\"):\n from openpyxl import load_workbook\n \n wb = load_workbook(infile, data_only=True, use_iterators=True, keep_vba=False)\n \n try:\n sheet = wb.worksheets[int(sheet)-1]\n \n except:\n for ws in wb.worksheets:\n if ws.title == sheet:\n sheet = ws\n break\n else:\n raise Exception(\"No worksheet named {0}\".format(sheet))\n \n self.iter = sheet.iter_rows()\n self.date_format = date_format\n \n if header:\n self.fieldnames = self.format_excel_row(self.iter.next())\n self.aliases = dict(zip(\n list(\"col{0}\".format(idx+1) for idx in xrange(len(self.fieldnames))),\n self.fieldnames))\n else:\n self.fieldnames = list(\"col{0}\".format(idx+1) for idx in xrange(len(sheet.columns))),\n self.aliases = None", "def parse_tables_xlsx(inp):\n # --------------------------------------------------------------------------\n # Start\n # --------------------------------------------------------------------------\n raw_read = pd.read_excel(inp,sheet_name = None)\n indx = get_tab_index(raw_read)\n # --------------------------------------------------------------------------\n # Get the individual tables from the file\n # --------------------------------------------------------------------------\n tabdict = {}\n for i in indx['tab'].to_list():\n tabdict[i] = get_table_df(raw_read[i])\n # --------------------------------------------------------------------------\n # Finish\n # --------------------------------------------------------------------------\n out = {}\n out['indx'] = indx\n out['tabs'] = tabdict\n return out", "def parse_f_id():\r\n fname = 'parse/File2.xlsx'\r\n sheet_name = 'Sheet1'\r\n\r\n ran_sheet = get_sheet(fname, sheet_name)\r\n return parse_sheet(ran_sheet)", "def doLink(self):\n self.log.info('Starting TabLinker for all sheets in workbook')\n \n for n in range(self.rb.nsheets) :\n self.log.info('Starting with sheet {0}'.format(n))\n self.r_sheet = self.rb.sheet_by_index(n)\n self.w_sheet = self.wb.get_sheet(n)\n \n self.rowns, self.colns = self.getValidRowsCols()\n \n self.sheet_qname = urllib.quote(re.sub('\\s','_',self.r_sheet.name))\n self.log.info('Base for QName generator set to: {0}'.format(self.sheet_qname))\n \n self.log.debug('Starting parser')\n self.parseSheet()", "def process(workbook: Any, content: str) -> None:\n sheets = ['SMB', 'NFS', 'Multiprotocol']\n\n row_tuples = dict()\n row_tuples['SMB'] = [\n 'ArrayName', 'DataMover', 'ShareName', 'SharePath',\n 'RootPath', 'Type', 'umask', 'maxusr', 'netbios', 'comment'\n ]\n\n row_tuples['NFS'] = [\n 'Hostname', 'Server', 'MountedPath', 'FileSystem',\n 'Type', 'rw', 'root', 'access'\n ]\n\n row_tuples['Multiprotocol'] = [\n 'Hostname', 'Server',\n 'MountedPath', 'Type'\n ]\n\n server_export_out = run_parser_over(content, SERVER_EXPORT_TMPL)\n server_export_grouped = groupby(itemgetter(3), server_export_out)\n share, export, multi = classify_rows(server_export_grouped)\n\n for sheet, data_list in zip(sheets, [share, export, multi]):\n worksheet = workbook.get_sheet_by_name(sheet)\n build_header(worksheet, row_tuples[sheet])\n RowTuple = namedtuple('RowTuple', row_tuples[sheet])\n\n final_col, final_row = 0, 0\n for row_n, row_tuple in enumerate(map(RowTuple._make, data_list), 2):\n for col_n, col_value in \\\n enumerate(row_tuple._asdict().values(), ord('A')):\n cell = worksheet['{}{}'.format(column_format(col_n), row_n)]\n if isinstance(col_value, str):\n cell.value = str.strip(col_value)\n else:\n cell.alignment = Alignment(wrapText=True)\n cell.value = '\\n'.join(col_value)\n style_value_cell(cell)\n set_cell_to_number(cell)\n final_col = col_n\n final_row = row_n\n\n sheet_process_output(\n worksheet,\n '{}Table'.format(sheet),\n sheet,\n final_col,\n final_row)", "def start_parse(self, file_path, data=None):\n with open_xlsb(file_path) as wb:\n\n cols = []\n data_frame = []\n with wb.get_sheet(data) as sheet:\n for row in sheet.rows():\n l = [item.v for item in row]\n if not cols:\n cols = l\n data_frame.append(l)\n\n data_frame = pd.DataFrame(data_frame[1:], columns=data_frame[0])\n data = data_frame.to_dict(\"split\")[\"data\"]\n\n parsable = []\n for d in data:\n for i in d:\n if str(i) == \"nan\" and math.isnan(i):\n return None\n parsable.append(dict(zip(cols, d)))\n\n return json.dumps(parsable)", "def read_excel(f, file_contents=None):\n wb = openpyxl.load_workbook(f, read_only=True)\n\n for sheetname in wb.sheetnames:\n if sheetname == 'reference':\n return\n sheet = wb[sheetname]\n rowiter = sheet.rows\n organization_row = next(rowiter)\n\n label_row = next(rowiter)\n names_row = next(rowiter)\n\n yield (\n sheetname,\n organization_row[0].value,\n [c.value for c in names_row],\n _filter_bumf(rowiter))", "def parse(self, f):\n \n for line in f:\n self.parse_line(line)", "def shred_sheets(subdomain, audit_date, input_file, _format):\r\n name = extract_dir_name(input_file)\r\n fname = PurePath(input_file).name.__str__()\r\n try:\r\n os.makedirs(name)\r\n except:\r\n pass\r\n\r\n wb = pd.ExcelFile(input_file)\r\n for ws in wb.sheet_names:\r\n data = pd.read_excel(input_file, sheet_name=ws)\r\n # add constants\r\n data.index.names = ['ix']\r\n data['subdomin'] = subdomain\r\n data['audit_date'] = audit_date\r\n\r\n # strip chars we don't want in colum names\r\n cols = data.columns\r\n renamed = []\r\n for col in cols:\r\n col = re.sub('[^a-zA-Z0-9]', '', col)\r\n renamed.append(col)\r\n\r\n data.columns = renamed\r\n\r\n # build output formats\r\n if _format == 'mongo':\r\n client = MongoClient('mongodb://localhost:27017/')\r\n db = client.Sitebulb\r\n cl = db.August5\r\n\r\n try:\r\n cl.insert_many(data.to_dict('records'))\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'json' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.json')\r\n data.to_json(new_file, orient=\"records\")\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'csv' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\r\n data.to_csv(new_file)\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue", "def setUp(self):\n wb = open_workbook(filename=self.filename)\n\n self.port_values = {}\n\n # find sheets that contain cash\n sheet_names = wb.sheet_names()\n for sn in sheet_names:\n if len(sn) > 4 and sn[-4:] == '-BOC':\n # print('read from sheet {0}'.format(sn))\n ws = wb.sheet_by_name(sn)\n read_cash(ws, self.port_values)", "def __init__(self, file_path):\n self.file_path = file_path\n self.current_row = 0\n self.workbook = \"\"\n self.sheet = \"\"\n self.load_workbook()", "def set_rule_from_excel(self, rule_file_path, sheet_name=0):\n rules = pd.read_excel(rule_file_path,\n sheet_name=sheet_name)\n self._set_rule_tree(rules)", "def import_worksheet(filename, sheetname, range_start='A1'):\n app = xw.App(visible=False)\n book = xw.Book(filename)\n sheet = book.sheets(sheetname)\n excel_data = sheet.range(range_start).expand('table').value\n keys = [snake_case(key) for key in excel_data[0]]\n data = [dict(zip(keys, values)) for values in excel_data[1:]]\n book.close()\n app.quit()\n return data", "def read_stats_excel(filename):\n\n df_dict = {}\n xl = pd.ExcelFile(filename)\n\n for sh in xl.sheet_names:\n df = pd.read_excel(xl, sheet_name=sh, header=[0, 1, 2])\n\n # Use start date as index\n if df[\"End\"].dtypes.all() == pd.Timestamp:\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n df.index.name = \"Date\"\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n df.columns.rename([\"channels\", \"stats\", \"units\"], inplace=True)\n df_dict[sh] = df\n\n return df_dict", "def parse_pages():\n\n excel_filename = 'Result_' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.xlsx'\n workbook = xlsxwriter.Workbook(excel_filename)\n worksheet_all = workbook.add_worksheet()\n\n create_headers(worksheet_all, workbook)\n\n row = 1\n col = 0\n\n cell_format = workbook.add_format()\n cell__wrapped_format = workbook.add_format()\n cell__wrapped_format.set_text_wrap()\n site_url = 'http://medsalltheworld.com/'\n for full_filename in get_html_filenames():\n with open(full_filename, \"r\", encoding=\"utf-8\") as html_file:\n try:\n soup = BeautifulSoup(html_file.read(), \"lxml\")\n product_name_elements = soup.find_all(\"li\", class_=\"col-xs-6 col-md-4\")\n for elem in product_name_elements:\n name = elem.select('h3')[0].text.replace('®', '')\n elem_url = site_url + elem.select('h3')[0].find('a')['href']\n\n worksheet_all.write(row, col, name, cell_format)\n worksheet_all.write(row, col + 1, elem_url, cell_format)\n worksheet_all.write(row, col + 2, full_filename, cell_format)\n row += 1\n\n except AttributeError:\n print(full_filename)\n\n workbook.close()", "def process_file(filename):\n print \"Reading and Parsing File: {}\".format(filename)\n parsed_entries = file_parser(filename)\n print \"Starting to Process Entries\"\n chunked_entires = chunk_entries(parsed_entries)\n return [process_entries(entry) for entry in chunked_entires]", "def parse():\n\n try:\n if \"excel_doc\" not in request.files:\n return bad_request()\n\n excel_doc = request.files[\"excel_doc\"]\n\n if not models.allowed_file(excel_doc.filename):\n return bad_request()\n\n file_path = models.save_excel_doc(excel_doc)\n\n students = models.parse_students(file_path)\n\n os.remove(file_path)\n except ValueError as e:\n return bad_request()\n except Exception as e:\n print(e)\n return server_error()\n\n return success(students)", "def set_excel_file(self, excel_file: ExcelFile):\n self.clearContents()\n\n # all\n self.__init_all()\n\n # horizontal header\n with excel_file as excel:\n sheet_names: List[str] = []\n for i, sheet in enumerate(excel.sheets):\n sheet_names.append(sheet.name)\n\n # cells\n self.setColumnCount(len(sheet_names) + 1)\n self.setHorizontalHeaderLabels(['All'] + sheet_names)\n for r, c in product(range(len(sheet_infos)), range(len(sheet_names))):\n self.setCellWidget(r, c + 1, self.__make_connected_cell(r, c))", "def parse_xlsx(schedule_file,col_to_keep=None):\n\n # Read schedule xlsx file\n df = _load_df(schedule_file,\"schedule\",col_to_keep)\n\n # Construct a dictionary of links from the \"links\" worksheet\n try:\n\n link_df = _load_df(schedule_file,\"links\")\n\n # Grab aliases\n try:\n aliases = [a.strip() for a in link_df[\"link_alias\"]]\n except KeyError:\n err = \"if a links sheet is given, it must have a link_alias column\\n\"\n raise ValueError(err)\n\n # Sanity checking on aliases\n if len(aliases) != len(set(aliases)):\n err = \"all link_alias entries must be unique\\n\"\n raise ValueError(err)\n\n disallowed = re.compile(\"[\\.\\,\\;\\:\\s]\")\n for a in aliases:\n if not a.startswith(\"_\"):\n err = \"all link_alias entries must start with '_'\\n\"\n raise ValueError(err)\n if disallowed.search(a):\n err = \"link_alias entries must not have whitespace or [,.:;]\\n\"\n raise ValueError(err)\n\n # Grab urls; \"\" if not specified\n try:\n urls = link_df[\"link_url\"]\n except KeyError:\n urls = [\"\" for _ in range(len(aliases))]\n\n # Grab link text; \"\" if not specified\n try:\n text = link_df[\"link_text\"]\n except KeyError:\n text = [\"\" for _ in range(len(aliases))]\n\n # Construct link dictionary\n link_dict = {}\n for i, a in enumerate(aliases):\n link_dict[a] = (urls[i],text[i])\n\n # If no links worksheet, link_dict = {}\n except XLRDError:\n link_dict = {}\n\n # Go through entry. Search and replace _link_alias entries with\n # rst-style links\n target_dict = {}\n for i, k in enumerate(df.columns):\n for j, v in enumerate(df[k]):\n\n df.iloc[j,i], new_target_dict = _replace_links(v,link_dict)\n if len(new_target_dict) > 0:\n for k in new_target_dict.keys():\n try:\n already_seen = target_dict[k]\n if already_seen != new_target_dict[k]:\n err = \"The same link_text '{}' corresponds to more than one url\\n\".format(k)\n raise ValueError(err)\n except KeyError:\n target_dict[k] = new_target_dict[k]\n\n return df, target_dict", "def upload_sheet(self, request):\n file = self.request.data['file']\n\n # validating requested payload.\n if not file:\n return Response(\"Got no file! Please hit me again with file.\")\n # Only .csv/xls format file are allowed\n if file.name.rsplit('.')[1] == 'csv':\n sheet_as_df = pd.read_csv(file)\n elif file.name.rsplit('.')[1] == 'xls':\n sheet_as_df = pd.read_excel(file)\n else:\n return Response(\"Only .csv/.xls format type allowed for now.\")\n\n # sheet uploading code\n # =============Logic Start================\n header = ['last_name', 'first_name', 'state', 'phone_number']\n df = sheet_as_df\n if not set(header).issubset(df.columns):\n return False, f'Please check uploading sheet matching headers as: {header}'\n # filling empty(NaN) of data-frame entry with 0.0\n df = df.fillna(0)\n from itertools import islice\n batch_size = 100\n while True:\n content_instance = [Content(\n first_name=record['first_name'],\n last_name=record['last_name'],\n state=record['state'],\n phone_number=record['phone_number']\n ) for record in islice(df.to_dict('records'), batch_size)]\n if not content_instance:\n logger.info('Unable to update PhoneBook model with entries.')\n break\n PhoneBook.objects.bulk_create(content_instance, batch_size)\n # =============Logic End==================\n\n return Response('Successfully updated order entry!')", "def get_sheet_names(file_path):\n with open_xlsb(file_path) as wb:\n return wb.sheets", "def loadData(self, file, sheet, cols, skip):\n df = pd.read_excel(io=file, sheetname=sheet, header=0, parse_cols=cols, skiprows=skip).values\n return df", "def open_file(path):\n book = xlrd.open_workbook(path)\n # print number of sheets\n #print book.nsheets\n # print sheet names\n #print book.sheet_names()\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n # read a row\n #print first_sheet.row_values(0)\n # read a cell\n cell = first_sheet.cell(1,0)\n #print cell\n #print cell.value\n # read a row slice\n #print first_sheet.row_slice(rowx=0,start_colx=0,end_colx=2)\n\n \"\"\"\n if Junipter.search_junipter_rule(first_sheet,1) == 0:\n print \"Juniper rule doesn't match\"\n else:\n print \"Juniper rule match\"\n \"\"\"\n\n \"\"\"\n if Mitac.search_mitac_rule(first_sheet,1) == 0:\n print \"Mitac rule doesn't match\"\n else:\n print \"Mitac rule match\"\n \"\"\"\n\n if Fabrinet.search_fabrinet_rule(first_sheet,3) == 0:\n print \"fabrinet rule doesn't match\"\n else:\n print \"fabrinet rule match\"", "def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 8,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 9,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # ,pd.DataFrame(list1)\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] # col_name_1, \n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))", "def read_sheet(self, native_sheet):\n sheet = XLSXSheet(native_sheet, **self._keywords)\n return {sheet.name: sheet.to_array()}", "def parse_sheet(spreadsheet_path):\n\tdef get_cols(sheet):\n\t\t\"\"\"Given a sheet, look at the first row and determine the index of each column.\n\t\t Returns a Reg with the column number set in each field.\"\"\"\n\t\tnames = {\"number\" : [\"Register Number\", -1],\n\t\t \"name\" : [\"Register Name\", -1],\n\t\t \"size\" : [\"Size\", -1],\n\t\t \"read\" : [\"R/W\", -1],\n\t\t \"write\" : [\"R/W\", -1],\n\t\t \"default\" : [\"Default Value\", -1],\n\t\t \"func\" : [\"Firmware Write Func\", -1],\n\t\t \"desc\" : [\"Description\", -1]}\n\t\tfor i in range(0, sheet.ncols):\n\t\t\tcell = sheet.cell(0, i).value\n\t\t\tfor k in names:\n\t\t\t\tif names[k][0] == cell:\n\t\t\t\t\t names[k][1] = i\n\t\treturn Reg(number=names[\"number\"][1], name=names[\"name\"][1], size=names[\"size\"][1], read=names[\"read\"][1], write=names[\"write\"][1], default=names[\"default\"][1], func=names[\"func\"][1], desc=names[\"desc\"][1])\n\t\n\tbook = xlrd.open_workbook(spreadsheet_path)\n\tsheet = book.sheet_by_index(0)\n\tcols = get_cols(sheet)\n\tregs = []\n\tfor i in range(1, sheet.nrows):\n\t\treg = Reg(number=sheet.cell(i, cols.number).value,\n\t\t name=sheet.cell(i, cols.name).value,\n\t\t size=sheet.cell(i, cols.size).value,\n\t\t read='r' in sheet.cell(i, cols.read).value.lower(),\n\t\t write='w' in sheet.cell(i, cols.write).value.lower(),\n\t\t default=sheet.cell(i, cols.default).value,\n\t\t func=sheet.cell(i, cols.func).value,\n\t\t desc=sheet.cell(i, cols.desc).value)\n\t\tregs.append(reg)\n\treturn regs", "def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n #mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 6,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 7,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # pd.DataFrame(list1),\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] ## col_name_1,\n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))", "def parse_sheet(xlrd_sheet):\r\n res_dict = {}\r\n ra_item = None\r\n\r\n for row_slice in get_row_slice(xlrd_sheet, start_row=0):\r\n item_code, descr, f_id = row_slice\r\n\r\n # Instantiate appropriate sales items.\r\n # 0=Empty\r\n # 1=Text\r\n if item_code.ctype == 1:\r\n\r\n if check_code(item_code.value):\r\n ra_item = SalesItem(item_code.value, descr.value)\r\n res_dict[ra_item] = []\r\n else:\r\n continue\r\n\r\n if f_id.ctype == 0:\r\n continue\r\n elif f_id.ctype == 1:\r\n ran_item = SalesItem(f_id.value, descr.value)\r\n # Fill the dict.\r\n res_dict[ra_item].append(ran_item)\r\n\r\n return res_dict", "def read_data(filename):\n \n # Iterate over all X-values. Y-values are stored in colummns of particular worksheet\n for x in range(0,13):\n\n wb = xlrd.open_workbook(filename)\n ws = wb.sheet_by_index(0)\n\n # This position of metadata doesn't change its relative position from sheet-to-sheet\n n_energy = int(ws.cell_value(1,3))\n n_iter = int(ws.cell_value(4,3))\n Rows_to_Skip = 15\n\n # Rename columns\n column_names = [str(x) for x in range(0,n_iter)]\n column_names.insert(0,'nan')\n column_names.insert(0,'KE')\n\n # Read data using pandas\n df_data = pd.read_excel(io = filename,\n sheet_name=x,\n skiprows = Rows_to_Skip,\n names = column_names,\n index_col='KE'\n )\n # Drop the second column as it is always supposed to be false\n df_data.drop(columns=df_data.columns[0],inplace=True)\n \n # Get x_data as the index \n x_array = np.array(df_data.index).reshape(len(df_data.index),1)\n \n # If we encounter first sheet\n if x==0:\n y = df_data.to_numpy()\n \n # Stack with the cummulative y built till now\n else:\n y = np.hstack((y, df_data.to_numpy()))\n \n # Ideally x_array should be (481, 1), and y should be (481, 169)\n return x_array, y", "def parse(csv_file, xlsx_file):\n\n #Summary info\n no_dsa_pattern = r'\\((\\d+)/(\\d+)\\).*don\\'t have Deep Security installed'\n disable_ips_pattern = r'\\((\\d+)/(\\d+)\\).*don\\'t have Intrusion Prevention enabled'\n no_rule_pattern = r'\\((\\d+)/(\\d+)\\).*don\\'t have rules assigned'\n summary_patterns = re.compile('{}|{}|{}'.format(no_dsa_pattern, disable_ips_pattern, no_rule_pattern))\n no_dsa_count = None\n no_ips_count = None\n no_rule_count = None\n description = []\n summary = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n #Detail info\n no_dsa_list_title = r\"Hosts that don't have Deep Security installed\"\n disable_ips_list_title = r\"Hosts that have Deep Security installed but don't have Intrusion Prevention enabled\"\n no_rule_list_title = r\"Hosts that have Intrusion Prevention enabled but no rules assigned\"\n title_patterns = re.compile('({})|({})|({})'.format(no_dsa_list_title, disable_ips_list_title, no_rule_list_title))\n no_dsa_list = []\n no_ips_list = []\n no_rule_list = []\n host_list = [\n no_dsa_list,\n no_ips_list,\n no_rule_list\n ]\n\n #Help url\n action_pattern = re.compile('\\[(.+)\\: (.+)\\]')\n action_list = []\n\n\n #Parse csv file\n with open(csv_file, 'r', newline='') as csv_file:\n rows = csv.reader(csv_file, quotechar='\"', delimiter=',', skipinitialspace=True)\n for index, row in enumerate(rows):\n if not row:\n continue\n\n if row[0].startswith('#'):\n description.append(row[0].replace('# ', ''))\n continue\n\n search = summary_patterns.search(row[0])\n if search:\n current_pattern = next((i for i, x in enumerate(search.groups()) if x), None)\n summary[int(current_pattern/2)] = ( int(search.group(current_pattern+1)), int(search.group(current_pattern+2)) - int(search.group(current_pattern+1)) )\n continue\n\n search = action_pattern.search(row[0])\n if search:\n action_list.append((search.group(1), search.group(2)))\n if len(action_list) == 4:\n break\n\n current_pattern = -1\n for index, row in enumerate(rows):\n if not row or re.match('None', row[0]):\n continue\n\n search = title_patterns.search(row[0])\n if search:\n current_pattern = next((i for i, x in enumerate(search.groups()) if x), None)\n continue\n\n if current_pattern == -1:\n continue\n host_list[current_pattern].append(row[0])\n\n no_dsa_count, no_ips_count, no_rule_count = summary\n\n #Write xlsx file\n workbook = xlsxwriter.Workbook(xlsx_file)\n summary_worksheet = workbook.add_worksheet('Summary')\n rawdata_worksheet = workbook.add_worksheet('Data')\n no_dsa_worksheet = workbook.add_worksheet(\"Hosts that don't have DS\")\n no_ips_worksheet = workbook.add_worksheet(\"Hosts that don't have IPS\")\n no_rule_worksheet = workbook.add_worksheet(\"Hosts that don't have rules\")\n\n #Rawdata worksheet\n data = [\n ['', 'Deep Security installed', 'Intrusion Prevention enabled', 'rules assigned'],\n ['Yes', no_dsa_count[1], no_ips_count[1], no_rule_count[1]],\n ['No', no_dsa_count[0], no_ips_count[0], no_rule_count[0]]\n ]\n rawdata_worksheet.write_column('A1', data[0], workbook.add_format({'bold': True}))\n rawdata_worksheet.write_column('B1', data[1])\n rawdata_worksheet.write_column('C1', data[2])\n\n #No DSA install worksheet\n no_dsa_worksheet.write_row(0, 0, [no_dsa_list_title])\n no_dsa_worksheet.write_column(1, 0, no_dsa_list)\n\n #No IPS enable worksheet\n no_ips_worksheet.write_row(0, 0, [disable_ips_list_title])\n no_ips_worksheet.write_column(1, 0, no_ips_list)\n\n #No Rule worksheet\n no_rule_worksheet.write_row(0, 0, [no_rule_list_title])\n no_rule_worksheet.write_column(1, 0, no_rule_list)\n\n #Summary worksheet\n chart = workbook.add_chart({'type': 'column', 'subtype': 'percent_stacked'})\n chart.add_series({\n 'name': '=Data!$B$1',\n 'categories': '=Data!$A$2:$A$4',\n 'values': '=Data!$B$2:$B$4',\n 'fill': {'color': '#21BC3B'}\n })\n chart.add_series({\n 'name': '=Data!$C$1',\n 'categories': '=Data!$A$2:$A$4',\n 'values': '=Data!$C$2:$C$4',\n 'fill': {'color': '#C22828'}\n })\n chart.set_title ({'name': 'Protected hosts'})\n chart.set_x_axis({\n 'name': 'Status',\n })\n\n chart.set_y_axis({\n 'major_unit' : 0.2,\n 'min' : 0,\n 'max' : 1\n })\n\n summary_worksheet.insert_chart('A1', chart)\n summary_worksheet.write_column('A16', description)\n for index, (text, url) in enumerate(action_list):\n summary_worksheet.write_url('A'+str(index+20), url, string = text)\n summary_worksheet.activate()\n\n workbook.close()", "def my_reader(filename, sheetname='Sheet1', separ=','):\r\n global df_read\r\n filename_list = filename.split('.')\r\n extension = filename_list[-1]\r\n if extension == 'csv':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'data':\r\n df_read = pd.read_csv(filename, sep=separ, header=None)\r\n if extension == 'txt':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'json':\r\n df_read = pd.read_json(filename)\r\n if extension == 'html':\r\n df_read = pd.read_html(filename)\r\n if extension == 'xls':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'xlsx':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'feather':\r\n df_read = pd.read_feather(filename)\r\n if extension == 'parquet':\r\n df_read = pd.read_parquet(filename)\r\n if extension == 'msg':\r\n df_read = pd.read_msgpack(filename)\r\n if extension == 'dta':\r\n df_read = pd.read_stata(filename)\r\n if extension == 'sas7bdat':\r\n df_read = pd.read_sas(filename)\r\n if extension == 'pkl':\r\n df_read = pd.read_pickle(filename)\r\n return df_read", "def _read_excel(file: str, semester: str, users: set, courses: set, users_to_courses: dict):\n course_parts = file.split('/')[-1].split('_')\n course_name = '{}{}'.format(course_parts[0], course_parts[1])\n courses.add((course_name, semester))\n\n book = xlrd.open_workbook(file)\n sheet = book.sheet_by_index(0)\n skip = True\n\n for row in sheet.get_rows():\n first_column = row[0].value\n if skip:\n skip = not first_column.startswith(\"Name\")\n continue\n elif first_column == \"\" or first_column is None:\n continue\n\n full_name = first_column\n name_parts = [name.strip() for name in full_name.split(', ')]\n\n email = row[1].value + '@ithaca.edu'\n users.add((email, name_parts[0], name_parts[1]))\n\n users_to_courses[email].append([course_name, semester])", "def importItem(file_path):\n\n #Ouverture du fichier\n rb = open_workbook(file_path)\n r_sheet = rb.sheet_by_index(0)\n\n for row_index in range (1, r_sheet.nrows):\n #Hydratation or get Supplier Model\n item_supplier= r_sheet.cell(row_index, 4).value\n item_supplier, created = Supplier.objects.get_or_create(name=item_supplier)\n\n #Hydratation or get Category Model\n current_category = r_sheet.cell(row_index, 0).value\n item_category, created = Category.objects.get_or_create(name=current_category)\n\n #Hydratation Item\n item_name = r_sheet.cell(row_index, 1).value\n item_ref = current_supplier= r_sheet.cell(row_index, 3).value\n item_quantity = r_sheet.cell(row_index, 2).value\n item, created = Item.objects.get_or_create(ref=item_ref, name=item_name, category=item_category, supplier=item_supplier, quantity=item_quantity)", "def load(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'r') as rank_file:\n for record in rank_file:\n team, score = common.parse(record)\n self.score[team] = score", "def __xlsx_schema_generator(file):\n try:\n # Loads the temporary file into a workbook.\n workbook = openpyxl.load_workbook(file)\n\n # Gets the name of all the sheets in the workbook.\n sheet_names = workbook.sheetnames\n \n # The first row on the first sheet is then added into a list.\n metadata_list = list()\n for cell in workbook[sheet_names[0]][1]:\n metadata_list.append(str(cell.value))\n return SchemaGenerator.__build_schema(metadata_list)\n except Exception as e:\n logging.error('Failed to parse xlsx file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from xlsx file.\")", "def parse(self):\n for section in self.sections:\n section.parse()", "def convert_excel_file(excel_file: str, sheet_name: str) -> list:\n workbook = xlrd.open_workbook(excel_file)\n sheet = workbook.sheet_by_name(sheet_name)\n\n book_list = []\n for row in range(1, sheet.nrows):\n books = OrderedDict()\n row_values = sheet.row_values(row)\n books[\"author\"] = row_values[0]\n books[\"title\"] = row_values[1]\n books[\"publisher\"] = row_values[2]\n books[\"category\"] = row_values[4]\n books[\"subject\"] = row_values[5]\n book_list.append(books)\n\n if type(row_values[3]) != str:\n books[\"shelf\"] = str(int(row_values[3]))\n else:\n books[\"shelf\"] = str(row_values[3])\n\n return book_list", "def sw_interactive_mode(xl_file):\n dfs = {sheet: xl_file.parse(sheet, index_col=0)\n for sheet in xl_file.sheet_names if (sheet[0] != '_')}\n print(\"Which one do you want to process: \")\n sheet_list = list(dfs.keys())\n for sheet in sheet_list:\n print(sheet, \"\\t\", \"[\" + str(sheet_list.index(sheet)) + \"]\")\n algo = input(\"Select the sheet: \")\n print(algo)", "def printing_resident_sheets(entry, file):\n nhi = entry.get()\n if re.match(\"^[A-Za-z]{3}[0-9]{4}$\", nhi):\n pass\n else:\n popup_error(\"Incorrect NHI format entered, please try again\")\n\n if file_available(file):\n ecase_driver = ecase_downloader.ecase_login()\n ecase_downloader.resident_contacts(ecase_driver, nhi)\n ecase_downloader.preferred_name_and_image(ecase_driver, nhi)\n ecase_driver.quit()", "def check_sheet(path, sheet): \n xl = pd.ExcelFile(path)\n if sheet not in xl.sheet_names:\n raise ValueError(\"Invalid sheet name \\'\" + sheet +\"\\'\")", "def run(self, file):\n self.loadReport(file)\n self.findCountryCode()\n self.reformatAndSave()", "def parse(self, limit=None):\n if limit is not None:\n logger.info(\"Only parsing first %d rows of each file\", limit)\n logger.info(\"Parsing files...\")\n\n if self.testOnly:\n self.testMode = True\n\n # the following will provide us the hash-lookups\n self._process_dbxref()\n self._process_cvterm()\n self._process_genotypes(limit)\n self._process_pubs(limit)\n # do this before environments to get the external ids\n self._process_environment_cvterm()\n self._process_environments()\n self._process_organisms(limit) # must be done before features\n self._process_organism_dbxref(limit)\n self._process_features(limit)\n self._process_phenotype(limit)\n self._process_phenotype_cvterm()\n # gets external mappings for features (genes, variants, etc)\n self._process_feature_dbxref(limit)\n # do this after organisms to get the right taxonomy\n self._process_stocks(limit)\n # figures out types of some of the features\n self._get_derived_feature_types(limit)\n\n # These are the associations amongst the objects above\n self._process_stockprop(limit)\n self._process_pub_dbxref(limit)\n self._process_phendesc(limit)\n self._process_feature_genotype(limit)\n self._process_feature_pub(limit)\n self._process_stock_genotype(limit)\n self._process_phenstatement(limit) # these are G2P associations\n\n self._process_feature_relationship(limit)\n\n self._process_disease_models(limit)\n # TODO add version info from file somehow\n # (in parser rather than during fetching)\n\n logger.info(\"Finished parsing.\")\n logger.info(\"Loaded %d nodes\", len(self.graph))\n return", "def load_research_project(input_file, sheet_name=None):\n\n df = read_excel(input_file, sheet_name=sheet_name)\n\n for idx, project in df[[df.columns[4], df.columns[5]]].iterrows():\n th_name, en_name = project\n\n en_name = en_name.strip() if not isinstance(en_name, float) else None\n th_name = th_name.strip() if not isinstance(th_name, float) else None\n\n if not th_name: # None or empty string\n th_name = en_name\n\n if th_name:\n project_ = session.query(ResearchProject).filter(\n ResearchProject.title_th == th_name\n ).first()\n if not project_:\n p = ResearchProject(title_th=th_name, title_en=en_name)\n session.add(p)\n\n session.commit()", "def _read_workbook_2007(maldoc):\n\n # Read in the 2007+ cells.\n color_print.output('g', \"Analyzing Excel 2007+ file ...\")\n workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc) \n color_print.output('g', \"Extracted XLM from ZIP archive.\")\n if (workbook_info is None):\n return (None, None, None)\n if (len(workbook_info) == 0):\n color_print.output('y', \"WARNING: No XLM macros found.\")\n return (None, None, None)\n\n if debug:\n print(\"=========== START 2007+ CONTENTS ==============\")\n for sheet in workbook_info.keys():\n print(\"\\n------\")\n print(sheet)\n print(\"\")\n for c in workbook_info[sheet].keys():\n print(str(c) + \" ---> \" + str(workbook_info[sheet][c]))\n print(\"=========== DONE 2007+ CONTENTS ==============\")\n \n # Figure out which sheet probably has the XLM macros.\n xlm_sheet_name = None\n max_formulas = -1\n for sheet in workbook_info.keys():\n if (len(workbook_info[sheet]) > max_formulas):\n max_formulas = len(workbook_info[sheet])\n xlm_sheet_name = sheet\n\n # Parse each formula and add it to a sheet object.\n xlm_cells = {}\n for cell_index in workbook_info[xlm_sheet_name].keys():\n\n # Value only cell?\n row = cell_index[0]\n col = cell_index[1]\n if (row not in xlm_cells):\n xlm_cells[row] = {}\n raw_formula = workbook_info[xlm_sheet_name][cell_index][0]\n if (raw_formula is None):\n\n # Do we have a value?\n formula_val = workbook_info[xlm_sheet_name][cell_index][1]\n if (formula_val is not None):\n\n # Just save the value in the cell.\n xlm_cells[row][col] = formula_val\n continue\n \n # Parse the formula into an XLM object.\n formula_str = b\"=\" + raw_formula\n formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)\n\n # Set the value of the formula if we know it.\n formula_val = workbook_info[xlm_sheet_name][cell_index][1]\n if (formula_val is not None):\n formula.value = formula_val\n\n # Save the XLM object.\n formula.update_cell_id(cell_index)\n xlm_cells[row][col] = formula\n color_print.output('g', \"Parsed MS XLM macros.\")\n \n # Merge the XLM cells with the value cells into a single unified spereadsheet\n # object.\n workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)\n if (workbook is None):\n color_print.output('r', \"ERROR: Merging XLM cells failed. Emulation aborted.\")\n return (None, None, None)\n \n # Done.\n return (workbook, xlm_cell_indices, xlm_sheet)", "def process_file(f):\n\n header = f.readline()\n if header.startswith(\"\\t\"):\n header = header[1:]\n cell_file_names = header.split(FS)\n\n map(list, zip(*[(1, 2), (3, 4), (5, 6)]))\n\n [cell_names, donor_nums, tissue_types, cell_nums] = map(list, zip(\n *[ extract_cell_name_data(x) \n for x in cell_file_names\n ]\n ))\n\n for line in f:\n\n toks = line.split(FS)\n gene_string = toks[0]\n\n (ensembl_gene, gene) = extract_gene_and_ensembl(gene_string)\n\n expr_vals = toks[1:len(toks)]\n\n for i in range(len(expr_vals)):\n if float(expr_vals[i]) == 0:\n continue\n\n # non-zero value\n output_line(cell=cell_names[i],\n donor_num=donor_nums[i],\n tissue_type=tissue_types[i],\n cell_num=cell_nums[i],\n ensembl_gene=ensembl_gene,\n gene=gene,\n expression=expr_vals[i])", "def process(workbook: Any, contents: Iterable) -> None:\n worksheet_name = 'SAN Hosts'\n worksheet = workbook.get_sheet_by_name(worksheet_name)\n headers = get_parser_header(SYSTEM_NAME_TMPL)\n\n headers += [\n 'cluster_id', 'host_id', 'volume_id', 'map_id', 'creator', 'Hostname',\n 'cluster', 'fc_ports', 'type', 'iscsi_chap_name', 'perf_class'\n ]\n\n RowTuple = namedtuple('RowTuple', headers)\n build_header(worksheet, headers)\n headers = [\n 'cluster_id/@value', 'host_id/@value', 'volume_id/@value',\n 'creator/@value', 'name/@value', 'cluster/@value',\n 'fc_ports/@value', 'type/@value',\n 'iscsi_chap_name/@value', 'perf_class/@value'\n ]\n\n hosts_rows, lun_rows = [], [] # type: list\n for sys_content, all_content, host_content in contents:\n system_name = run_parser_over(sys_content, SYSTEM_NAME_TMPL)[0]\n all_map_content = '\\n'.join(all_content.split('\\n')[1:])\n host_content = '\\n'.join(host_content.split('\\n')[1:])\n\n doc_map = xmltodict.parse(all_map_content)\n map_details = search_tag_value(doc_map, 'map')\n maps = luns_occurrences(map_details, headers)\n lun_rows = [system_name + row for row in maps]\n\n doc_host = xmltodict.parse(host_content)\n host_details = search_tag_value(doc_host, 'host')\n flat_data_host = [flatten_dict(data) for data in host_details]\n hosts = ordered_jsons(flat_data_host,\n [headers[0], 'id/@value'] + headers[3:])\n\n hosts_rows += [system_name + host_row for host_row in hosts]\n\n no_cluster = filter(lambda x: x[1] == '-1', lun_rows)\n no_hosts = filter(lambda x: x[2] == '-1', lun_rows)\n clusters_hosts = filter(lambda x: x[2] != '-1' and x[1] != '-1', lun_rows)\n\n common_columns = (0, 2)\n rows_cluster = multiple_join(\n common_columns, [no_cluster, hosts_rows])\n\n common_columns = (0, 1)\n row_hosts = multiple_join(\n common_columns, [no_hosts, hosts_rows])\n\n common_columns = (0, 1, 2)\n row_all = multiple_join(\n common_columns, [clusters_hosts, hosts_rows])\n\n sub_rows = list(rows_cluster) + list(row_hosts) + list(row_all)\n rows = expand_rows(sub_rows, 3)\n\n final_col, final_row = 0, 0\n for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):\n for col_n, col_value in \\\n enumerate(row_tuple._asdict().values(), ord('A')):\n cell = worksheet['{}{}'.format(column_format(col_n), row_n)]\n cell.value = str.strip(col_value)\n style_value_cell(cell)\n if chr(col_n) != 'D':\n set_cell_to_number(cell)\n final_col = col_n\n final_row = row_n\n\n sheet_process_output(\n worksheet,\n 'SANHostsTable',\n 'SANHosts',\n final_col,\n final_row)", "def parse_file(file_name, barcode_map=barcode_map):\n\n with open(file_name) as file_handle:\n results = defaultdict(Counter)\n try:\n while True:\n name = file_handle.next()\n seq = file_handle.next()\n plus = file_handle.next()\n qual = file_handle.next()\n handle_seq(seq, barcode_map, results)\n except StopIteration:\n pass\n return pd.DataFrame(results).T.fillna(0)", "def check_workbook_sheets(workbook):\n sheets_in_file = workbook.sheetnames\n valid_sheets_template = []\n for sheet in sheets_in_file:\n # check if a template with that name exists in templates folder\n if os.path.isfile(os.path.join(cd,'templates', sheet + '.xml')):\n logger.info('{} is a valid template'.format(sheet))\n valid_sheets_template.append(sheet)\n else:\n logger.error('{} is not a valid template. Skipping'.format(sheet))\n return valid_sheets_template", "def test_parse_samplesheet(self):\n run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n run = MinIONqc(run_dir, None, None)\n run.lims_samplesheet = 'data/nanopore_samplesheets/2020/DELIVERY_SQK-LSK109_AAU644_Samplesheet_24-594126.csv'\n run._parse_samplesheet()\n self.assertTrue(filecmp.cmp(run.nanoseq_sample_sheet, 'data/nanopore_samplesheets/expected/SQK-LSK109_sample_sheet.csv'))\n self.assertTrue(filecmp.cmp(run.anglerfish_sample_sheet, 'data/nanopore_samplesheets/expected/anglerfish_sample_sheet.csv'))", "def parse(filename):\n file_map = {\n '1995-1996.html': ninety_six,\n '2005-2006.html': twenty_six,\n '2014-2015.html': twenty_fifteen\n }\n func = file_map.get(filename, lambda: \"Invalid File\")\n func(filename)", "def nodes_data_excel_parser(excel_path,**kwargs):\n excel_parser_engine = kwargs.get(\"engine\",\"xlrd\")\n\n # Check if excel file exists\n if not excel_path or not os.path.isfile(excel_path):\n raise FileNotFoundError(\n \"Excel data file {} not found.\".format(excel_path)\n )\n\n xls = pd.ExcelFile(excel_path,engine=excel_parser_engine)\n\n try:\n # TODO for sheet in xls.sheet_names:\n # nodes_data[sheet] = xls.parse(sheet)\n nodes_data = {\n \"buses\": xls.parse(\"buses\").replace({np.nan:None}),\n \"commodity_sources\": xls.parse(\"commodity_sources\").replace({np.nan:None}),\n \"transformers\": xls.parse(\"transformers\").replace({np.nan:None}),\n \"transformers_chp\": xls.parse(\"transformers_chp\").replace({np.nan:None}),\n \"renewables\": xls.parse(\"renewables\").replace({np.nan:None}),\n \"demand\": xls.parse(\"demand\").replace({np.nan:None}),\n \"storages\": xls.parse(\"storages\").replace({np.nan:None}),\n \"powerlines\": xls.parse(\"powerlines\").replace({np.nan:None}),\n \"timeseries\": xls.parse(\"time_series\").replace({np.nan:None}),\n \"financial\":xls.parse(\"financial\").replace({np.nan:None})\n }\n except KeyError:\n err_msg = \"Excel file must contains: [buses, commodity_sources, transformers, renewables, demand, storages, powerlines, financial and timeseries].\\n\\\n The following sheets are found: {}\".format(xls.sheet_names)\n raise Exception(err_msg)\n\n # set datetime index\n nodes_data[\"timeseries\"].set_index(\"timestamp\", inplace=True)\n nodes_data[\"timeseries\"].index = pd.to_datetime(\n nodes_data[\"timeseries\"].index\n )\n\n logger.info(\"Data from Excel file {} imported in as nodes data.\".format(excel_path))\n\n return nodes_data", "def _load_excel(self, file_path):\n if not os.path.exists(file_path):\n self._logger.error(\"File not exists {0}\".format(file_path))\n return False\n\n # Load in the workbook file\n self._workbook = openpyxl.load_workbook(file_path)\n\n if not self._workbook:\n self._logger(\"Failed to load excel file {0}\".format(file_path))\n return False\n\n self.sheet_names = self._workbook.sheetnames\n return True", "def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)", "def test_assemble_xml_file(self):\n self.maxDiff = None\n\n fh = StringIO()\n worksheet = Worksheet()\n worksheet._set_filehandle(fh)\n worksheet.str_table = SharedStringTable()\n worksheet.select()\n cell_format1 = Format({\"xf_index\": 1})\n cell_format2 = Format({\"xf_index\": 2})\n\n worksheet.merge_range(\"B3:C3\", \"Foo\", cell_format1)\n worksheet.merge_range(\"A2:D2\", \"\", cell_format2)\n\n worksheet.select()\n worksheet._assemble_xml_file()\n\n exp = _xml_to_list(\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\">\n <dimension ref=\"A2:D3\"/>\n <sheetViews>\n <sheetView tabSelected=\"1\" workbookViewId=\"0\"/>\n </sheetViews>\n <sheetFormatPr defaultRowHeight=\"15\"/>\n <sheetData>\n <row r=\"2\" spans=\"1:4\">\n <c r=\"A2\" s=\"2\"/>\n <c r=\"B2\" s=\"2\"/>\n <c r=\"C2\" s=\"2\"/>\n <c r=\"D2\" s=\"2\"/>\n </row>\n <row r=\"3\" spans=\"1:4\">\n <c r=\"B3\" s=\"1\" t=\"s\">\n <v>0</v>\n </c>\n <c r=\"C3\" s=\"1\"/>\n </row>\n </sheetData>\n <mergeCells count=\"2\">\n <mergeCell ref=\"B3:C3\"/>\n <mergeCell ref=\"A2:D2\"/>\n </mergeCells>\n <pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.3\"/>\n </worksheet>\n \"\"\"\n )\n\n got = _xml_to_list(fh.getvalue())\n\n self.assertEqual(got, exp)", "def find_all_spreadsheets(self, callback, root_id=ROOT_FOLDER_ID, folder_name=\"\"):\n fnames = []\n\n for f in self.get_folder_children(root_id):\n\n fname = f[\"name\"]\n fnames.append(fname)\n\n if f[\"mimeType\"] == FOLDER_MIME_TYPE:\n if fname in FOLDERS_TO_SKIP:\n print(f\"[INFO] Skipping folder '{fname}'\")\n continue\n \n if fname in self.version:\n print(f\"[INFO] Found and using '{fname}'\")\n else:\n print(f\"[INFO] Skipping folder with '{fname}' as we want '{self.version}'\")\n continue\n\n new_folder = os.path.join(folder_name, fname)\n\n # Make the recursive call if we have found a sub-folder\n self.find_all_spreadsheets(callback, root_id=f[\"id\"], folder_name=new_folder)\n\n elif f[\"mimeType\"] in SPREADSHEET_MIME_TYPES:\n # Process the spreadsheet\n callback(fname, f[\"id\"], folder_name)\n\n # Check valid content was found\n if len([item for item in fnames if item.endswith(\".xlsx\")]) > 5:\n expected_xlsx = {xlsx for xlsx in workflow_data if xlsx.endswith(\".xlsx\")}\n if not expected_xlsx.issubset(set(fnames)):\n diff = expected_xlsx.difference(fnames)\n raise ValueError(f\"[ERROR] The following expected spreadsheets were not found on \"\n f\"Google Drive: {diff}\")\n\n if len(fnames) < PRODUCT_COUNT_MINIMUM:\n raise Exception(f\"[ERROR] The number of product spreadsheets found is less than \"\n f\"the minimum expected: {len(fnames)} < {PRODUCT_COUNT_MINIMUM}.\"\n f\" Please investigate.\")", "def from_excel(self, path, worksheet=0):\n reader, release_resources = _from_excel(path, worksheet=worksheet)\n return Reader(reader, closefunc=release_resources)", "def fromxlsx(filename, sheet=None, range=None, **kwargs):\n \n return XLSXView(filename, sheet=sheet, range=range, **kwargs)", "def parse_file(self, file):\n return self.parse(file.read())", "def parse_rows(wb, merged_ws):\r\n number_of_sheets = len(wb.worksheets) - 9 # Number of sheets\r\n for sheet_number in range(1, 1 + number_of_sheets): # parse through each sheet\r\n sheet = wb[\"%s%s\" % (sheet_number, \"월\")] # determine sheet by number and Korean word for month\r\n for row_number in range(header_row_number + 1, sheet.max_row + 1): # parse through each row (using the row number)\r\n row = sheet[row_number] # set 'row_number's corresponding row to the row variable\r\n # start checking if it is an empty row.\r\n if row[3].value is None: # if the cell is empty, move towards deleting the row\r\n if row[14].value is None: # double check with another cell\r\n sheet.delete_rows(row_number, 1) # if both checkpoints are empty, delete that row\r\n else: # otherwise, if the cell is not empty, move towards copying and pasting\r\n current_row = merged_ws.max_row + 1 # find the first empty row\r\n paste_row(row, merged_ws, current_row) # paste the selected row into the merged Excel file at the current row\r\n print(row_number)", "def ParseFile(self):\r\n ### ################\r\n imp = PrayerTimeExcelParser();\r\n\r\n # Import the timetable in the correct year\r\n # We can't import into the past\r\n now = datetime.datetime.now();\r\n curr_month = now.month;\r\n curr_year = now.year;\r\n if self.month < curr_month:\r\n imp.year = curr_year + 1;\r\n\r\n imp.month = self.month;\r\n imp.year = curr_year;\r\n\r\n try:\r\n mosque = Mosque.objects.get(id=self.mosque_id);\r\n except Exception as e:\r\n return False, \"No mosque was found with the given ID {}\".format(self.mosque_id);\r\n\r\n try:\r\n prayer_times = imp.ImportFromFile(self.file_path);\r\n except ParsingError as e:\r\n return False, \"Parsing Error: {}\".format(str(e));\r\n\r\n for idx, pt in enumerate(prayer_times):\r\n pt_model = PrayerTimes(mosque=mosque,\r\n date=datetime.date(imp.year, imp.month, idx+1),\r\n fajr_jamaa=pt[FajrPrayerName.name],\r\n duhr_jamaa=pt[DuhrPrayerName.name],\r\n asr_jamaa=pt[AsrPrayerName.name],\r\n maghrib_jamaa=pt[MaghribPrayerName.name],\r\n ishaa_jamaa=pt[IshaPrayerName.name]);\r\n pt_model.save();\r\n\r\n return True, \"Added {} days from the timetable\".format(len(prayer_times));", "def get_xls(xls_name, sheet_name):\n cls = []\n # get xls file's path\n xlsPath = os.path.join(proDir, \"testFile\", 'case', xls_name)\n # open xls file\n file = open_workbook(xlsPath)\n # get sheet by name\n sheet = file.sheet_by_name(sheet_name)\n # get one sheet's rows\n nrows = sheet.nrows\n for i in range(nrows):\n if sheet.row_values(i)[0] != u'case_name':\n cls.append(sheet.row_values(i))\n return cls", "def parseFile(self, filename):\n\n f = open(filename, \"r\")\n s = f.read()\n f.close()\n\n logging.log(10, 'parsing filename %s: %d lines' % (filename, len(s)))\n\n self.parseString(s)", "def import_data(path=None, sheet_names=None):\n\n # process an import of a single sheet as well as several sheets,\n # which will be concatenated with an continuous index\n if type(sheet_names) == list:\n data = pd.DataFrame()\n _data = pd.read_excel(io=path, sheet_name=sheet_names, header=0, index_col=None)\n for sheet in sheet_names:\n data = data.append(_data[sheet], sort=False)\n data = data.reset_index(drop=False)\n data[\"index\"] = data[\"index\"] + 2 # sync the index with the excel index\n else:\n data = pd.read_excel(io=path, sheet_name=sheet_names, header=0, index_col=0)\n\n # Cut of leading or tailing white spaces from any string in the dataframe\n data = data.applymap(lambda x: x.strip() if type(x) is str else x)\n\n # Convert every N/A, nan, empty strings and strings called N/a, n/A, NAN,\n # nan, na, Na, nA or NA to np.nan\n data = data.replace(\n [\"\", \"N/a\", \"n/A\", \"NAN\", \"nan\", \"na\", \"Na\", \"nA\", \"NA\"], np.nan, regex=True\n )\n data = data.fillna(np.nan)\n\n return data", "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def get_entities(self, xlsx_file):\n \n self.logger.info(\"Loading workbook: {}\".format(xlsx_file))\n\n # report on total rows.\n total_rows = sum(1 for row in self._get_rows(xlsx_file))\n self.logger.info(\"Found {} rows.\".format(total_rows))\n \n # get row data and modified checksum.\n entity_rows = self._get_rows(xlsx_file)\n hash_prefix = self._get_hash_prefix(xlsx_file)\n\n # get header.\n row = next(entity_rows)\n header = [cell.value for cell in row]\n header= tuple(header)\n\n # if header is invalid, return empty generator.\n if not self._validate_header(header):\n msg = \"Invalid header row: {}.\".format(header)\n raise self.SchemaError(msg)\n\n # create generator for each row.\n def entities():\n \n # start numbering at 2 because the header has already been read.\n row_number = 2\n \n # yield a dict for each non-header row.\n header_range = range(0,len(header))\n for row in entity_rows:\n\n self.logger.info(\"Processing row {}.\".format(row_number))\n \n # get row values.\n row = [cell.value for cell in row]\n row = [cell.strip() if isinstance(cell, str) else cell for cell in row]\n row = [(header[i], row[i]) for i in header_range]\n row = dict(row)\n\n # run row validator.\n row_valid = self._validate_row(row)\n if not row_valid:\n self.logger.warning(\"Skipping row {}; row is invalid.\".format(\n row_number))\n row_number += 1\n continue\n \n # alter data as needed and create dict for row.\n row[\"identifier\"] = hash_prefix + row[\"identifier\"]\n manifestations = self.get_manifestations(row[\"pattern\"], \n row[\"case_sensitive\"], row_number)\n row[\"manifestations\"] = [\"\".join(m) for m in manifestations]\n \n # yield row as dict.\n row_number += 1\n yield(row)\n\n return entities()", "def parse_file(self, parse_all=False, file=None):\n if not parse_all:\n input()\n for line in sys.stdin:\n self.parse_and_add(line)", "def read_schedules(use, x):\n # read schedules from excel file\n occ = [x['Weekday_1'].values[:24], x['Saturday_1'].values[:24], x['Sunday_1'].values[:24]]\n el = [x['Weekday_2'].values[:24], x['Saturday_2'].values[:24], x['Sunday_2'].values[:24]]\n dhw = [x['Weekday_3'].values[:24], x['Saturday_3'].values[:24], x['Sunday_3'].values[:24]]\n month = x['month'].values[:12]\n\n if use == \"INDUSTRIAL\":\n pro = [x['Weekday_4'].values[:24], x['Saturday_4'].values[:24], x['Sunday_4'].values[:24]]\n else:\n pro = [np.zeros(24), np.zeros(24), np.zeros(24)]\n\n # read area per occupant\n area_per_occupant = x['density'].values[:1][0]\n\n return occ, el, dhw, pro, month, area_per_occupant", "def excel_fun_read(file_name, template_name, template_location, counter) :\r\n for list_number in range(1, 4) :\r\n inputWorkbook = xlrd.open_workbook(file_name)\r\n inputWorksheet = inputWorkbook.sheet_by_index(list_number)\r\n rows = inputWorksheet.nrows\r\n cols = inputWorksheet.ncols\r\n print(f'{rows} Rows in the file\\t') # <- get rows number starts from 0\r\n print(f'{cols} Cols in the file\\n') # <- get coloms number starts from 0\r\n dictionary = {1 : 'ATR', 2 : 'ESS Hot cycle 1', 3 : 'ESS Cold cycle 1', 4 : 'ESS Hot cycle 2',\r\n 5 : 'ESS Cold cycle 2'}\r\n if cols == 9 :\r\n print('next file')\r\n if cols == 12 or cols == 9 :\r\n cols = 8\r\n sub = 2\r\n else :\r\n cols = 12\r\n sub = 3\r\n for excel_row in range(1, sub) :\r\n sn = int(inputWorksheet.cell_value(0, cols))\r\n print(f'working on 000{sn}.xlsx') # <- Indicates which file is open\r\n TestLocation_list = [] # <- Creation of list\r\n PassFail_col_list = [] # <- Creation of list\r\n for i in range(rows) :\r\n # Follow the H colom check if there is 'PASS'/'FAIL' or empty cell\r\n # If empty cell skip it until the end of the excel file\r\n if inputWorksheet.cell_value(i, cols - excel_row) == 'PASS' or inputWorksheet.cell_value(i,\r\n cols - excel_row) == 'FAIL' or inputWorksheet.cell_value(\r\n i, cols - excel_row) == 'N/T' :\r\n TestLocation_list.append(i)\r\n PassFail_col_list.append(str(inputWorksheet.cell_value(i, cols - excel_row)))\r\n\r\n location_list, len_of_every_test_list = Create_2_lists_of_locations(TestLocation_list, list_number)\r\n pass_fail_list = sort_list_of_pass_and_fail(len_of_every_test_list, location_list, file_name, list_number,\r\n cols, PassFail_col_list, excel_row)\r\n\r\n # print(f'''It's the end of {list_number} in file 000{sn} excel_row = {excel_row}''')\r\n print(f'''It's the end of {dictionary.pop(counter)} in file 000{sn}\\n''')\r\n write_to_excel(sn, template_location, pass_fail_list, counter)\r\n counter += 1\r\n\r\n print('''it's the end of the loop''')", "def _parse(self, infile):\n raise NotImplementedError()", "def __init__(self, filepath):\n self._column_names = []\n self._row_list = []\n self._workbook = None\n\n try:\n self._workbook = xlrd.open_workbook(filepath)\n except:\n raise Exception('Nie można otworzyć pliku lub nieprawidłowy rodzaj pliku.')", "def _add_sheet(self, name):\n\n pages = [x for x in self._pages if x.startswith(name)]\n\n if len(pages) == 0:\n self._pages[name] = self.workbook.add_worksheet(name)\n self._pages[name].set_default_row(hide_unused_rows=True)\n return self._pages[name]", "def main():\n\n era = dt.datetime.now()\n\n parser = xlslisp_compile_argdoc()\n args = parser.parse_args()\n\n space = os.path.splitext(args.file)[0]\n\n # Import the Values of Sheets of one Xlsx File\n\n sheet_by_name = openpyxl.load_workbook(args.file, data_only=True)\n sheet_by_name_keys_list = sheet_by_name.sheetnames\n\n stderr_print(\n \"xlslisp: reading {} sheets from: {}\".format(\n len(sheet_by_name_keys_list), args.file\n )\n )\n\n # Option to quit early\n\n if not args.force:\n stderr_print(\n \"xlslisp.py: Xlsx imported, run again with --force to replace Csv's\"\n )\n\n sys.exit(1)\n\n # Visit each Sheet\n\n for (index, sheetname) in enumerate(sheet_by_name_keys_list):\n sheet = sheet_by_name[sheetname]\n\n csv_name = \"{space}-{dashed_sheet}.csv\".format(\n space=space, dashed_sheet=sheetname.replace(\" \", \"-\")\n ).lower()\n\n # Collect Rows of String Values\n\n csv_ragged_rows = list()\n for row_index in range(sheet.max_row):\n row_mark = 1 + row_index\n\n csv_cells = list()\n\n for col_index in range(sheet.max_column):\n cell = sheet.cell(1 + row_index, 1 + col_index)\n col_mark = cell.column_letter\n assert col_mark == excel_az_mark(col_index)\n\n if False:\n if (col_mark, row_mark) == (\"C\", 89):\n pdb.set_trace()\n\n csv_cells.append(cell.value)\n\n # Warn of trailing spaces\n\n if str(csv_cells[-1]).endswith(\" \"):\n stderr_print(\n \"xlslisp: Warning: \"\n \"could rstrip cell at: {!r}!{}{} {}\".format(\n sheetname, col_mark, row_mark, csv_cells[-1]\n )\n )\n\n csv_ragged_rows.append(csv_cells)\n\n # Format as rectangular Csv to please GitHub\n #\n # per GitHub > Rendering CSV and TSV data\n # flagging ragged as \"we can make this file beautiful and searchable\"\n #\n\n csv_rows = rows_complete(csv_ragged_rows, cell=None)\n\n charstream = io.StringIO()\n csv_writer = csv.writer(charstream)\n for csv_cells in csv_rows:\n csv_writer.writerow(csv_cells)\n\n charstream.seek(0)\n csv_chars = charstream.read()\n\n # Write the lines with local \"os.linesep\" line-ending's\n # specifically Not the mix of \"\\r\\n\" and \"\\n\" from multi-line Excel cells\n # but without rstrip'ping the lines # TODO: poor choice to skip rstrip?\n\n csv_lines = csv_chars.splitlines()\n csv_joined = \"\\n\".join(csv_lines) + \"\\n\"\n\n stderr_print(\n \"xlslisp: writing {} chars of {} rows to: {}\".format(\n len(csv_joined), sheet.max_row, csv_name\n )\n )\n\n with open(csv_name, \"w\") as csv_writing:\n csv_writing.write(csv_joined)\n\n now = dt.datetime.now()\n stderr_print(\"xlslisp: elapsed time of\", (now - era), \"since\", era)\n\n sys.exit(0)", "def normalize_excelSheet(self, sheetname, conversion_dictionary):\n\n sheet = self.wb.sheet_by_name(sheetname)\n\n ami_data = []\n\n date_headers = [\"bibliographic.date\", \"technical.dateCreated\"]\n time_headers = [\"technical.durationHuman\"]\n\n #copy everything from the 3rd row to the last row with a filename\n for rownum in range(2, sheet.nrows):\n if sheet.cell(rownum, 0):\n ami_data.append(sheet.row_values(rownum))\n\n for i in range(0, sheet.ncols):\n #normalize header\n header_entry = self.get_headerEntryAsString(sheetname, i)\n ami_data[0][i] = self.normalize_headerEntry(\n header_entry,\n conversion_dictionary)\n\n #convert excel dates\n if ami_data[0][i] in date_headers:\n for j in range(3, sheet.nrows):\n if sheet.cell(j, i).ctype == 3:\n value = sheet.cell(j, i).value\n ami_data[j-2][i] = self.convert_excelDateTime(value, \"date\")\n\n #convert excel times\n if ami_data[0][i] in time_headers:\n for j in range(3, sheet.nrows):\n if sheet.cell(j, i).ctype == 3:\n value = sheet.cell(j, i).value\n ami_data[j-2][i] = self.convert_excelDateTime(value, \"time\")\n\n ami_df = self.normalize_values(ami_data)\n\n return ami_df", "def load_data_from_xsl(file_name):\n\tnlp_data = pd.read_excel(file_name, sheet_name=0, header=0, usecols=[1, 2, 3],\n\t converters={'bug_id': str, 'summary': str, 'description': str})\n\tnlp_data.fillna(' ', inplace=True)\n\n\t# nlp_data['description'] = nlp_data['description'].map(lambda x: clean_str(x+''))\n\n\treturn nlp_data", "def Excel_Load_Data( self, ExcelFilename ):\n pass", "def parseRules(self, filename):\n\n try:\n with open(filename) as fd:\n\n lineNo = 1\n \n for line in fd:\n if '#' in line:\n line = line.split('#', 1)[0]\n if line.strip() != '':\n try:\n rule = self.parseRule(line, lineNo)\n if rule != None:\n self._rules.append(rule)\n else:\n eprint(\"Error in line {0} of rule file '{1}' ignoring rule:\\n{2}\\n\".format(lineNo, filename, line), end='')\n \n except Exception as ex:\n eprint(\"Error in line {0} of rule file '{1}': {2} ignoring rule:\\n{3}\\n\".format(lineNo, filename, ex, line), end='')\n lineNo = lineNo + 1\n except:\n # error opening file\n eprint(\"Error in parseRules: traceback info: {0}\".format(traceback.format_exc()))", "def parse(self, fn, board):\n with open(fn) as f:\n return [(board.get_region(i['name']), i['base']) for i in json.loads(f.read())]", "def load_excel_file(fname, sheet_name='Sheet1', cell_range=None):\n #DEPRICATED CODE\n #d = cell_range.split(:))\n #row_size = int(d[1][1:]) - int(d[0][1:]) + 1\n #col_size = (ord(d[1][0].lower()) - 96) - (ord(d[0][0].lower()) - 96) + 1\n #cell_data = np.zeros([row_size, col_size])\n\n wb = openpyxl.load_workbook(fname)\n sheet = wb[sheet_name]\n\n big_list = []\n for row in sheet[cell_range]:\n small_list = []\n for cell in row:\n #print(cell.coordinate, cell.value)\n small_list.append(cell.value)\n big_list.append(small_list)\n\n return np.array(big_list)", "def read_lines(the_file, worksheet, w):\n ListOfData = []\n Cpu_Usage = 'usr', 'sys', 'idl', 'wai', 'hiq', 'siq'\n Cpu_bool = False\n Mem_Usage = 'used', 'buff', 'cach', 'free'\n Mem_bool = False\n Disk_Usage = 'read', 'writ'\n Disk_bool = False\n Net_Usage = 'recv', 'send'\n Net_bool = False\n rowNum = 7\n lineCount = 8\n\n #with open(the_file) as input_data:\n for line in the_file:\n if \"failed\" in line:\n error_mssg = line\n print \"Error message found in log: \", error_mssg\n worksheet[w].write(0, 0, 'Error Message in log: ' + error_mssg)\n continue\n if \"total-cpu-usage\" in line:\n ListOfData.append(Cpu_Usage)\n worksheet[w].write(6, 0, 'Total CPU Usage')\n Cpu_bool = True\n if \"memory-usage\" in line:\n ListOfData.append(Mem_Usage)\n worksheet[w].write(6, 6, 'Memory Usage')\n Mem_bool = True\n if \"dsk/sd\" in line:\n ListOfData.append(Disk_Usage)\n worksheet[w].write(6, 10, 'Disk Usage')\n Disk_bool = True\n if \"net/eth\" in line:\n ListOfData.append(Net_Usage)\n worksheet[w].write(6, 12, 'Network Usage')\n Net_bool = True\n continue\n continue\n else:\n line = line.replace(\"|\", \" \")\n line = line.replace(\"\\n\", \"\")\n line = re.sub(\" +\", \" \", line)\n lineCount = lineCount + 1\n if line[0] == \" \":\n line = line[1:]\n else:\n line = line[0:]\n #print line\n for a in ListOfData:\n colNum = 0\n a = [[y] for y in line.split(\" \")]\n #print \"This is an a: \", a\n for i in a:\n #print \"This is i:\", i[0]\n if \"M\" in i[0]:\n i[0] = (float(i[0].replace(\"M\", \"\"))*1048576)\n elif \"k\" in i[0] and not \"dsk\" in i[0] and not \"ticks\" in i[0]:\n i[0] = (float(i[0].replace(\"k\", \"\"))*1024)\n elif \"B\" in i[0]:\n i[0] = (float(i[0].replace(\"B\", \"\")))\n elif \"G\" in i[0]:\n i[0] = (float(i[0].replace(\"G\", \"\"))*1073741824)\n try:\n i = float(i[0])\n #print \"Converting to float!\"\n worksheet[w].write_number(rowNum, colNum, i)\n colNum = colNum + 1\n except:\n worksheet[w].write_row(rowNum, colNum, i)\n colNum = colNum + 1\n rowNum = rowNum + 1\n continue\n return Cpu_bool, Mem_bool, Disk_bool, Net_bool, worksheet, lineCount", "def _fillinSheet(self,sheet,data,startrow=2):\n i = startrow-1\n for r in data:\n j = 0\n for c in r:\n sheet.write(i,j,c)\n j+=1\n i += 1", "def parse_file(filename):\n\n f = open(filename, 'r')\n BoardSize = int( f.readline())\n NumVals = int(f.readline())\n\n #initialize a blank board\n board= [ [ 0 for i in range(BoardSize) ] for j in range(BoardSize) ]\n\n #populate the board with initial values\n for i in range(NumVals):\n line = f.readline()\n chars = line.split()\n row = int(chars[0])\n col = int(chars[1])\n val = int(chars[2])\n board[row-1][col-1]=val\n\n return board", "def _fill_workbook_data(self, workbook, record, data_dict):\n if not record or not data_dict:\n return\n try:\n # variable to store data range of each worksheet\n worksheet_range = {}\n for sheet_name in data_dict:\n ws = data_dict[sheet_name]\n st = False\n if isinstance(sheet_name, str):\n st = get_sheet_by_name(workbook, sheet_name)\n elif isinstance(sheet_name, int):\n st = workbook.worksheets[sheet_name - 1]\n if not st:\n raise ValidationError(\n _('Sheet %s not found!') % sheet_name)\n # ================ HEAD ================\n self._fill_head(ws, st, record)\n # ============= Line Items =============\n # Check for groupby directive\n groupbys = {key: ws[key] for key in\n filter(lambda l: l[0:9] == '_GROUPBY_', ws.keys())}\n all_rc, max_row, tail_fields = self._fill_lines(ws, st, record,\n groupbys)\n # ================ TAIL ================\n self._fill_tail(ws, st, record, tail_fields)\n\n # prepare worksheet data range, to be used in BI funtions\n if all_rc:\n begin_rc = min(all_rc)\n col, row = split_row_col(\n max(sorted(all_rc, reverse=True), key=len))\n end_rc = '%s%s' % (col, max_row)\n worksheet_range[sheet_name] = '%s:%s' % (begin_rc, end_rc)\n\n # ================ BI Function ================\n self._fill_bi(workbook, data_dict, worksheet_range)\n\n except KeyError, e:\n raise except_orm(_('Key Error!'), e)\n except IllegalCharacterError, e:\n raise except_orm(\n _('IllegalCharacterError!\\n'\n 'Some exporting data may contain special character'), e)\n except Exception, e:\n raise except_orm(_('Error filling data into excel sheets!'), e)", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def _data_reader(file):\n # Create a dictionary so that filename matches a site name.\n site_dict = {'D05536000': 'NB Niles', 'D05536101': 'NS Channel-Wilmette',\n 'D05536105': 'NB Albany', 'D05536118': 'NB Grand Avenue',\n 'D05536121': 'CH River-Lock', 'D05536123': 'CH River-Columbus',\n 'D05536137': 'CSSC-Western Avenue', 'D05536140': 'CSSC-Stickney',\n 'D05536275': 'Thorn Creek', 'D05536290': 'Little Calument',\n 'D05536340': 'Midlothian Creek', 'D05536343': 'Natalie Creek',\n 'D05536357': 'Grand Calumet', 'D05536500': 'Tinley Creek',\n 'D05536700': 'Calumet-Sag Channel', 'D05536890': 'CSSC-Lemont',\n 'D05536995': 'CSSC-Romeoville'}\n df_raw = pd.read_csv(file)\n df_raw['dateTime'] = pd.to_datetime(df_raw['dateTime'])\n # Creating a dataframe with the data we only need.\n df = df_raw[['dateTime', 'X_00065_00000']]\n df = df.set_index(df_raw['dateTime'])\n\n # Retrieve site information to be used in saved excel filenames.\n site_code = file[-9:]\n site_name = [v for v in site_dict.items() if site_code in v][0]\n site = site_code + '_' + site_name[1].replace(' ', '-')\n\n # Convert index into a datetime index for easier indexing.\n df.index = pd.to_datetime(df.index)\n return df_raw, df, site, site_code", "def process(workbook: Any, contents: list) -> None:\n worksheet_name = 'Storage Inventory'\n worksheet = workbook.get_sheet_by_name(worksheet_name)\n\n headers = list(concat([\n ['Hostname', 'Model', 'OS', 'Nodes'],\n get_parser_header(DEDUPE_TMPL)\n ]))\n RowTuple = namedtuple('RowTuple', headers)\n build_header(worksheet, headers)\n\n rows = []\n for content in contents:\n doc = xmltodict.parse(content)\n component_details = search_tag_value(doc, 'component_details')\n command_details = search_tag_value(doc, 'command_details')\n\n dedupe, nodes = [], 0 # type: (list, int)\n for entry in command_details:\n nodes_content = collected_data(\n entry, 'cmd', 'isi storagepool nodepools list')\n nodes = max(map(compose(int, itemgetter(0)),\n run_parser_over(\n nodes_content,\n NODES_TMPL))) if nodes_content else nodes\n\n dedupe_content = collected_data(entry, 'cmd', 'isi dedupe stats')\n dedupe = run_parser_over(\n dedupe_content, DEDUPE_TMPL) if dedupe_content else dedupe\n\n dedupe = dedupe if len(dedupe) > 1 else [['', '', '', '', '', '']]\n rows.append([\n component_details['hostname'],\n component_details['model'],\n component_details['os'], str(nodes), *dedupe[0]\n ])\n\n final_col, final_row = 0, 0\n for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):\n for col_n, col_value in \\\n enumerate(row_tuple._asdict().values(), ord('A')):\n cell = worksheet['{}{}'.format(chr(col_n), row_n)]\n cell.value = str.strip(col_value)\n style_value_cell(cell)\n set_cell_to_number(cell)\n final_col = col_n\n final_row = row_n\n\n sheet_process_output(\n worksheet,\n 'StorageInventoryTable',\n 'Storage Inventory',\n final_col,\n final_row)", "def download_all_sheets(self, sheet_id, sheet_name):\n # Get spreadsheet as a whole and iterate through each sheet\n results = self.get_spreadsheet(sheet_id)\n\n print(\"[INFO] Saving {} sheets to {}...\".format(len(results[\"sheets\"]), self.out_dir))\n sheet_name_no_xlsx = sheet_name[:-5]\n \n # Validate sheet name\n if sheet_name_no_xlsx + '.xlsx' != sheet_name:\n raise Exception(f\"[ERROR] Sheet does not have expected name with '.xlsx' extension: {sheet_name}\")\n\n prod_def_dir = os.path.join(self.out_dir, 'product-definitions')\n tsv_dir = os.path.join(prod_def_dir, 'tsv', sheet_name_no_xlsx)\n spreadsheet_dir = os.path.join(prod_def_dir, 'spreadsheet')\n \n for sdir in (tsv_dir, spreadsheet_dir):\n if not os.path.isdir(sdir):\n os.makedirs(sdir)\n\n print('[INFO] Saving TSV files to: {}...'.format(tsv_dir))\n worksheets = set()\n\n for sheet in results[\"sheets\"]:\n name = sheet[\"properties\"][\"title\"]\n worksheets.add(name)\n\n # Check worksheet name is valid\n if name not in ALLOWED_WORKSHEET_NAMES:\n print('[ERROR] Worksheet name not recognised: {}'.format(name))\n\n cell_range = \"'{}'!A1:Z{}\".format(name, NROWS_TO_PARSE)\n out_file = os.path.join(tsv_dir, \"{}.tsv\".format(name))\n\n if os.path.isfile(out_file) and not self.regenerate:\n print(f\"[WARNING] Not regenerating TSV...file already exists: {out_file}\")\n else:\n self.write_values_to_tsv(self.get_sheet_values(sheet_id, cell_range), out_file)\n\n # Check the expected worksheet files were processed\n # For general (relating to all products) spreadsheets\n if sheet_name.startswith(\"_\"):\n if not set(workflow_data[sheet_name]) == worksheets:\n raise Exception(f\"[ERROR] Could not find/process all expected worksheets for \"\n f\"spreadsheet '{sheet_name}'. Difference is:\\n\"\n f\"\\tExpected: {sorted(workflow_data[sheet_name])}\\n\"\n f\"\\tFound: {sorted(worksheets)}\")\n\n # For product-specific spreadsheets\n else:\n required = {wsheet for wsheet in workflow_data[\"per-product\"] if \"*\" not in wsheet}\n\n if not required.issubset(worksheets): \n raise Exception(f\"[ERROR] Could not find/process product-specific worksheets \"\n f\"for '{sheet_name}'. Missing: {required.difference(worksheets)}\") \n \n # Now download the raw spreadsheet \n spreadsheet_file = os.path.join(spreadsheet_dir, sheet_name)\n\n if os.path.isfile(spreadsheet_file) and not self.regenerate:\n print(f\"[WARNING] Download not initiated...file already exists: {spreadsheet_file}\")\n return\n else:\n print(f\"[INFO] Saving spreadsheet to: {spreadsheet_file}...\")\n self.save_raw_spreadsheet(sheet_id, spreadsheet_file)", "def load_sheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n sheet_obj = wb[sheet_name]\n return sheet_obj, wb", "def read(f):\n book = openpyxl.load_workbook(f) \n sheet = book.active \n items_list =[]\n n = sheet.cell(row = 2, column = 1).value\n c = sheet.cell(row = 2, column = 2).value\n for i in range(2,n+2): \n cell_obj = sheet.cell(row = i, column = 3)\n items_list.append(cell_obj.value) \n return items_list,c" ]
[ "0.6666606", "0.64332336", "0.6416094", "0.5954307", "0.59148884", "0.5886274", "0.58763015", "0.5842564", "0.57256263", "0.5725077", "0.5706473", "0.57055527", "0.5680622", "0.56387514", "0.563543", "0.5528164", "0.55189323", "0.5491748", "0.54883724", "0.54661864", "0.53742313", "0.5372789", "0.5371306", "0.5337673", "0.53245294", "0.5299063", "0.52953976", "0.529531", "0.5286549", "0.52786183", "0.52451736", "0.5228252", "0.52189285", "0.51948875", "0.51735", "0.5125878", "0.5091176", "0.50861865", "0.5080209", "0.50667113", "0.5056067", "0.5047683", "0.5019365", "0.50184757", "0.49856922", "0.49635324", "0.49487868", "0.49406183", "0.4935323", "0.49343738", "0.49265963", "0.49240783", "0.48986307", "0.48972607", "0.4884963", "0.48751935", "0.48723462", "0.48684835", "0.4861397", "0.48583308", "0.4856983", "0.48523316", "0.48345402", "0.48341063", "0.4832413", "0.48186785", "0.4799679", "0.47968516", "0.47929826", "0.47861096", "0.4781739", "0.4779973", "0.4779009", "0.47617757", "0.47566944", "0.47551247", "0.4753825", "0.47505587", "0.4741928", "0.47329766", "0.47285023", "0.47098133", "0.46969464", "0.4682283", "0.4679544", "0.4677661", "0.4665919", "0.46597135", "0.46596682", "0.46576834", "0.4645339", "0.4637437", "0.46367642", "0.46333188", "0.46217877", "0.4613143", "0.4612458", "0.46098414", "0.46086854", "0.46077824" ]
0.54823756
19
This function sends data to kafka bus
def producer(self, topic, msg, e=None): producer = KafkaProducer(bootstrap_servers=['HOST_IP', 'HOST_IP', 'HOST_IP'] ,api_version=(2, 2, 1),security_protocol='SSL', ssl_check_hostname=True, ssl_cafile='/home/oulu/certs/ca-cert', ssl_certfile='/home/oulu/certs/cutler-p3-c1-00.crt', ssl_keyfile='/home/oulu/certs/cutler-p3-c1-00.key') msg_b = str.encode(msg) producer.send(topic, msg_b).get(timeout=30) if (e): logging.exception('exception happened')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));", "def callback(self, data):\n\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchange broadcasts all the messages it receives to all the queues it knows.\n # That is what we need for our logger.\n # Tony changed to 'topic' to work with Kuilin's group\n self.channel.exchange_declare(exchange=self.logName,\n exchange_type='topic',\n auto_delete=True)\n\n #TONY WAS HERE\n #CONVERT THE DATA BEFORE SENDING\n #this extracts the data to a tuple\n data_tuple = struct.unpack(\"<hddhdddddddddddd\", data)\n #convert tuple to string and remove the parentheses on the ends\n data_to_send = str(data_tuple).strip(\"()\")\n\n # Publish the data to the exchange\n self.channel.basic_publish(exchange=self.logName,\n routing_key=self.RoutingKey,\n body=data_to_send) #used to be body=data (from Pilot)\n\n #tony was here\n #print(\"Sending: %r via %r and %r\" % (data,self.logName,self.RoutingKey))\n\n self.connection.close()", "def sendMessage(topic, data, key, producer):\n producer.poll(0)\n producer.produce(topic, data.encode('utf-8'), key, callback=delivery_report)\n producer.flush()", "def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)", "def _publish(self, data):\n json_data = json.dumps(data)\n self._udp_socket.sendto(json_data, (self._hsflowd_addr, self._hsflowd_port))", "async def _send_message(producer, event_data):\n batch = await producer.create_batch()\n batch.add(EventData(_serialize_event_data_as_json(event_data)))\n await producer.send_batch(batch)", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "def send(self, data):", "def send_to_kafka(correct_data):\n\n # init kafka producer\n kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BROKERS,\n value_serializer=lambda x: json.dumps(x).encode('utf-8'))\n\n sent = 0\n\n for elem in correct_data:\n try:\n # try to send data to kafka\n message = elem['content']\n future = kafka_producer.send(KAFKA_TOPIC, message)\n future.get(timeout=5)\n sent += 1\n except:\n LOGGER.error('An error occurred on id {}'.format(elem['id']))\n\n # finally flush data\n kafka_producer.flush()\n LOGGER.info(\n ' {}/{} messages have been sent to Kafka, Kafka topic: {}'.format(sent, len(correct_data),\n KAFKA_TOPIC))", "def send_stream_to_kafka(self, df: pd.DataFrame) -> None:\n for row in df.values.tolist():\n value = dict(zip(self.get_value_structure(), row))\n key = dict(zip(self.get_key_structure(), [row[13]]))\n # key = {\"npi\": int(row[0])}\n # key = str(uuid.uuid4())\n self.producer.producer.produce(topic=self.topic_name, key=key, value=value)\n logger.info(f\"sent event to kafka with key: {key} and value: {value}\", class_name=self.__class__.__name__)", "def send_msg_to_kafka(self, msg: dict) -> None:\n producer_kafka_connection = self.connect_to_kafka()\n kafka_topic = self.topic_name\n url_as_key = bytes(self.source_url, 'utf-8')\n # Send message to Kafka topic\n try:\n logging.info(f'Sending to Kafka message -> {msg}')\n kafka_host = str(self.kafka_bootstrap_server).split(':')[0]\n kafka_port = str(self.kafka_bootstrap_server).split(':')[1]\n if not conn.dns_lookup(kafka_host, int(kafka_port)):\n logging.error(f'Unable to connect to {self.kafka_bootstrap_server}.'\n f' Please check if Kafka server is alive')\n sys.exit(1)\n meta = producer_kafka_connection.send(topic=kafka_topic, key=url_as_key, value=msg)\n # Make all messages in buffer ready to the sending\n producer_kafka_connection.flush()\n except Errors.BrokerNotAvailableError as e:\n producer_kafka_connection.close()\n logging.exception(f'{e}. Please check if config contains correct Kafka connection params or topic name')\n sys.exit(1)", "def send_message(self, data):\n self.transport.write(data)", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "def send_mqtt(self, data_type, data):\n try:\n client = mqtt.Client(\"rpi1_qnas\")\n client.on_connect = self.on_connect\n client.on_message = self.on_message\n client.connect(MQTT_BROKER_ADDRESS)\n client.loop_start()\n client.publish(MQTT_TOPIC + \"/{}\".format(data_type), data)\n client.disconnect()\n client.loop_stop()\n except Exception:\n msg = \"{} \\nMQTT error\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.loggers[\"log_stdout\"].warning(msg)\n self.loggers[\"log_errors\"].warning(msg)\n self.verbose(msg)", "def send(self, msg):\n raise NotImplementedError(\"DataStream does not implement send.\")", "def kafka_commit(self):\n self.kf_producer.flush()", "def send_to_bus(self, data: str):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((self.__address[0], BusController.PASSENGERS_PORT))\n data = str(data).encode()\n s.send(data)\n s.close()\n return True\n except:\n print(\"Failed to send message to the bus\")\n return False", "def send(self, bytes: bytes):\n assert self.status == Status.Active, \"Connection not established yet. Use sendto instead.\"\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n\n # 将要发送数据拆分放入transmit queue等待transmit thread处理\n u = 0\n v = DATA_LEN\n while u < len(bytes):\n self.transmit_queue.put({'data': bytes[u:v], 'is_end': v >= len(bytes)})\n u = v\n v += DATA_LEN\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def send_messages(self, partition, *msg):\n if self.async:\n for m in msg:\n self.queue.put((partition, create_message(m)))\n resp = []\n else:\n messages = [create_message(m) for m in msg]\n req = ProduceRequest(self.topic, partition, messages)\n try:\n resp = self.client.send_produce_request([req], acks=self.req_acks,\n timeout=self.ack_timeout)\n except Exception as e:\n log.exception(\"Unable to send messages\")\n raise e\n return resp", "def send_msg(self, my_queue, my_msg):", "def send(self, data: bytes):", "def send_data(queue, data):\n for obj in data:\n queue.put(dumps(obj, protocol=-1))", "def send_data(self, **kwargs):", "def send(self, data):\n self.sent.put(data)", "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n msg = self.kernel_client.session.msg(msg_type, content=content, parent=self.parent_header, metadata=metadata)\n self.kernel_client.shell_channel.send(msg)", "def _send_data(self):\n pass", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def send_to_kafka(self, kafka_client, is_multi_partitioner, noOfMsgs, topic, msg_interval=1):\n\n kafka = KafkaClient(kafka_client)\n #logger.debug(\"Arguments : %s %s %s %s %s\" % (kafka_client, is_multi_partitioner, noOfMsgs, topic, msg_interval))\n\n if is_multi_partitioner is True:\n self.producer = KeyedProducer( kafka, partitioner=RoundRobinPartitioner )\n if ( noOfMsgs == -1 ):\n x=1\n while True:\n self.producer.send_messages(topic, \"key\"+str(x), \"Message sent from Keyed Producer : \" + str(datetime.now().time()))\n x += 1\n time.sleep(msg_interval)\n else:\n for i in range(0, noOfMsgs):\n self.producer.send_messages(topic, \"k\" + str(i), \"Message sent from Keyed Producer : \" + str(datetime.now().time()) )\n\n else:\n self.producer = SimpleProducer(kafka)\n if ( noOfMsgs == -1 ):\n x=1\n while True:\n self.producer.send_messages(topic, \"Message sent from Simple Producer : \" + str(datetime.now().time()))\n x += 1\n time.sleep(msg_interval)\n else:\n for i in range(0, noOfMsgs):\n self.producer.send_messages(topic, \"Message sent from Simple Producer : \" + str(datetime.now().time()) )", "def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")", "def _send(self, msg, adb_info):\n packed = msg.pack()\n _LOGGER.debug(\"bulk_write(%d): %r\", len(packed), packed)\n self._transport.bulk_write(packed, adb_info.transport_timeout_s)\n\n if msg.data:\n _LOGGER.debug(\"bulk_write(%d): %r\", len(msg.data), msg.data)\n self._transport.bulk_write(msg.data, adb_info.transport_timeout_s)", "def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.", "def message_sender(m):\n my_producer = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n my_producer.send(cfg.end_topic,m)\n return m", "def message_sender(m):\n my_producer = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n my_producer.send(cfg.end_topic,m)\n return m", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')", "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n self.kernel.session.send(\n self.kernel.iopub_socket,\n msg_type,\n content,\n metadata=json_clean(metadata),\n parent=self.kernel._parent_header,\n ident=self.topic,\n buffers=buffers,\n )", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def publish_data(data):\n redis_db.publish(DATA_CHANNEL, json.dumps(data))", "def send_to_kafka(rows):\n producer = connect_kafka_producer()\n for row in rows:\n print(row.asDict())\n producer.send(TOPIC_NAME, value=row.asDict())\n producer.flush()", "def emit(self, record):\n try:\n topic, record.msg = record.msg.split(TOPIC_DELIM,1)\n except Exception:\n topic = \"\"\n try:\n bmsg = cast_bytes(self.format(record))\n except Exception:\n self.handleError(record)\n return\n \n if isinstance(topic, str):\n btopic = cast_bytes(topic)\n else:\n print(\"Exception: topic is not string:{topic}\".format(topic=topic))\n btopic = b'Debug' \n\n self.socket.send_multipart([btopic, bmsg])", "def send(self, data):\n pass", "def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)", "def sendToSplunk(self,\n splunk_hec):\n\n # Initialize and start consumer if down\n if(not self.consumer_started):\n self.consumer = self.getConsumer(self.client.topics[self.topic])\n\n # Attempt to send messages to Splunk\n status_code = splunk_hec.writeToHec(self.messages)\n\n # clear messages\n self.messages = []\n\n # Check for successful delivery\n if(status_code == 200):\n # commit offsets in Kafka\n self.consumer.commit_offsets()\n return\n else:\n # Stop consumer and mark it down\n self.consumer.stop()\n self.consumer_started = False\n\n # Raise exception for retry\n logging.error(\"Failed to send data to Splunk HTTP Event Collector - check host, port, token & channel\")\n raise Exception('Failed to send data to Splunk HTTP Event Collector - Retrying')", "def trace_callback(msg):\n # Construct topic\n msg_topic = 'modbus/msg/trace/{}/{}/{}'.format(node_id, msg.address, msg.function)\n # Send message as JSON\n logging.debug('Publishing message on {}, address={}, function={}'.format(msg_topic, msg.address, msg.function))\n client.publish(topic = msg_topic, payload = msg.to_JSON())", "def send_message(self, msg):\n if msg is not None:\n try:\n self.node.write(msg.encode(encoding='UTF-8'))\n time.sleep(self.delay)\n except serial.serialutil.SerialTimeoutException:\n self.handle_congestion()\n self.send_message(msg)\n except serial.SerialException:\n self.handle_disconnection()\n self.send_message(msg)\n except:\n print(\"\\n!!!Unexpected error occurred in send_message()!!!\\n\")\n finally:\n return False\n return True", "def send_data(self, data):\n self._transport.write(data)", "def send_bytes(self, data: bytes) -> None:", "def handle_write(self):\n self.initiate_send()", "def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)", "def send(self, data):\r\n\r\n self._serial_object.write(data)", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending message to Kafka for visualizing\")", "def handle_write(self):\n #send_types = \" + \".join(\n # messages.get_message_type(message) for message in self.buffer)\n for message in self.buffer:\n if isinstance(message, str):\n self.send(message)\n else:\n self.send(message.pack())\n self.buffer = []\n #print \"To %s:%s sent: \" % (self.address, self.port), send_types", "def send_message(self,data):\n num_bytes = len(data)\n message = WriteMessage()\n message.write_uint32(num_bytes)\n message.data.extend(data)\n self.socket.sendall(message.data)", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)", "def process_data():\n if ARG.WRITE:\n producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n key_serializer=lambda v: json.dumps(v).encode('utf-8'),\n bootstrap_servers=BROKERS)\n datestruct = dict()\n fetch_counts(datestruct)\n uts = dict()\n for sub in CAT_SUBGROUPS:\n for user in CAT_SUBGROUPS[sub]:\n uts[user] = sub\n epoch_seconds = time.time()\n for user in sorted(datestruct):\n payload = {'time': epoch_seconds}\n wuser = user.split('@')[0]\n workday = call_responder('config', 'config/workday/' + wuser)\n payload['user'] = wuser\n if 'config' in workday:\n payload['organization'] = workday['config']['organization']\n if payload['organization'] == 'Connectome Annotation Team':\n payload['subgroup'] = uts[wuser] if wuser in uts else ''\n else:\n LOGGER.warning(\"Could not find user %s\", wuser)\n payload['organization'] = 'unknown'\n for key in OPERATIONS:\n payload['operation'] = key\n payload['count'] = datestruct[user][key]\n if ARG.WRITE:\n LOGGER.debug(json.dumps(payload))\n future = producer.send(ARG.TOPIC, payload, str(datetime.datetime.now()))\n try:\n future.get(timeout=10)\n except KafkaError:\n LOGGER.critical(\"Failed publishing to %s\", ARG.TOPIC)\n else:\n LOGGER.info(json.dumps(payload))", "def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "def send(self, data: Union[ActionEvent, TurnEvent], compression=None):\n # pause_receive is irrelevant now\n # self._pause_receive.set()\n self._send_queue.append(data)\n # super(MastermindClientUDP, self).send(JSONSerializer.serialize(data), compression)\n # self._pause_receive.clear()\n return", "def send_msg(self, type, data):\n data = json.dumps(\n {\n \"job\": self._job_id,\n \"idx\": self._job_idx,\n \"tool\": self._tool,\n \"type\": type,\n \"data\": data\n },\n\n # use this so that users don't run into errors with ObjectIds not being\n # able to be encodable. If using bson.json_util.dumps was strictly used\n # everywhere, could just use that dumps method, but it's not, and I'd rather\n # keep it simple for now\n cls=FriendlyJSONEncoder\n )\n\n self._connected.wait(2 ** 31)\n\n data_len = struct.pack(\">L\", len(data))\n if not self._dev:\n try:\n with self._send_recv_lock:\n self._sock.send(data_len + data)\n except:\n # yes, just silently fail I think???\n pass", "def send(self,msg):\n try:\n if self.mutexCmd.tryLock(100):\n self.cmd.append(msg)\n self.mutexCmd.unlock()\n #print(\"ADD TO QUEUE: {}\".format(msg))\n else:\n print(\"WARN: cmd not added to queue\")\n except Exception as e:\n print(\"ERROR:Serial:send:\",e)\n self.ConnexionError.emit(True)", "def send_data_with_pika(self, category, check_data_type=False):\n data_to_send = self.prepare_data_to_send(category, get_data_config(\"identifier\"), check_data_type)\n publisher.publish_message(data_to_send)\n print(data_to_send)", "def test_topic_bytes(self):\n self.failureResultOf(self.producer.send_messages(b\"topic\", msgs=[b\"\"]), TypeError)", "def _send_data_to_wbt(self,nnData):\n\t\tnnData += \"END\\n\"\n\t\tself._conn.send(nnData)", "def sendData(self):\n\n while self.keep_running:\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchange broadcasts all the messages it receives to all the queues it knows.\n # That is what we need for our logger.\n self.channel.exchange_declare(exchange=self.logName,\n exchange_type='fanout')\n\n # Publish the data to the exchange\n self.channel.basic_publish(exchange=self.logName,\n routing_key='',\n body=self.message)\n\n self.connection.close()\n\n time.sleep(self.loopTime)", "def send(self, msg):\n if self.isConnected():\n pmsg = pickle.dumps(msg)\n if COMPRESS:\n pmsg = zlib.compress(pmsg)\n buffer = QByteArray()\n stream = QDataStream(buffer, QIODevice.WriteOnly)\n stream.setVersion(QDataStream.Qt_5_3)\n stream.writeUInt32(len(pmsg))\n stream.writeRawData(pmsg)\n bytesWritten = self.tcpsocket.write(buffer)\n self.tcpsocket.flush()\n self.tcpsocket.waitForBytesWritten()\n # qApp.processEvents() # send data immediately and don't wait for next mainloop\n logging.debug(\"Bytes written: %i\", bytesWritten)\n if bytesWritten > 0:\n return True\n else:\n logging.debug(\"Message not send. Not connected\")\n return False", "def send_message(self, msg):\n self.logger.debug(msg)\n self.writer.send(json.dumps(msg))", "def publish(self, data):\n # [START pubsub_quickstart_publisher]\n # [START pubsub_publish]\n # Data must be a bytestring\n logger.info(\"publishing message %s\" % data)\n data = data.encode('utf-8')\n self.publisher.publish(self.topic_path, data=data)\n\n logger.info('Published messages: {}'.format(data))\n # [END pubsub_quickstart_publisher]\n # [END pubsub_publish]", "def send_data(data):\n\n # In order for the data to be transmitted, it has to be in bytes format\n pickled_data = pickle.dumps(data)\n # Actual length of the data (for example 3) \n data_length = len(pickled_data)\n # Padded length of the data (for example '3 ')\n padded_length = pickle.dumps(data_length)\n padded_length += b' ' * (HEADER_SIZE - len(padded_length))\n\n # Send the padded length and then the data right after\n conn.send(padded_length)\n conn.send(pickled_data)", "def sendDataMessage(iTag, clsName, msgID, msg): #@NoSelf", "def send(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('spi.send(data, timeout=50000)\\r\\n'.encode('utf-8'))\n sleep(1)", "def publish(self, topic, partition, data):\n c = self._connctions.get('__runpub__')\n\n if c and c.ws is not None:\n sm = SocketMessage('pub', topic=topic, partitions=partition, entries=data)\n c.ws.send(sm.toJSON())\n return True\n else:\n return False", "def send_data(self, msg):\n totalsent = 0\n # tt= struct.unpack('c'*len(msg), msg)\n # print(tt)\n while totalsent < len(msg):\n try:\n sent = self.sockfd.send(msg)\n except:\n print(f'{self.ip} socket failed')\n break\n if sent == 0:\n raise RuntimeError(\"Socket connection broken\")\n totalsent = totalsent + sent", "def send(self, topic, msg):\n with self.mutex:\n length = len(msg)\n checksum = 255 - ( ((topic&255) + (topic>>8) + (length&255) + (length>>8) + sum([ord(x) for x in msg]))%256 )\n data = '\\xff\\xff'+ chr(topic&255) + chr(topic>>8) + chr(length&255) + chr(length>>8)\n data = data + msg + chr(checksum)\n self.port.write(data)", "def _send_data(self, data):\n if isinstance(data, int):\n data = [data]\n self._spi_write(_SPI_DATA, data)", "def produce_messages(self, device_id, measurements):\n\n msg = {'device_id': device_id,\n 'measurements': measurements}\n\n self.channel.basic_publish(exchange='',\n routing_key=self.queue_name,\n body=json.dumps(msg),\n properties=pika.BasicProperties(content_type='application/json'))", "def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])", "def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])", "async def send(self, websocket, payload) -> None:\n if isinstance(payload, list):\n data_size: int = 0\n\n for data in payload:\n _data = pickle.dumps(data)\n await websocket.send(_data)\n data_size += sys.getsizeof(_data)\n else:\n _data = pickle.dumps(payload)\n await websocket.send(_data)\n data_size = sys.getsizeof(_data)\n\n logging.info(\"[Client #%d] Sent %s MB of payload data to the server.\",\n self.client_id, round(data_size / 1024**2, 2))", "def send(self, topic, msg):\n out = \"%s %s\" % (topic, msg)\n self.topics[topic].send(bytes(out, 'utf-8'))", "def producer():\n\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit'))\n channel = connection.channel()\n\n channel.queue_declare(queue=QUEUE_NAME)\n\n # Create two unique device ids to provide more example data\n timestamp = arrow.now().timestamp\n device_name = b'A' if timestamp % 2 == 0 else b'B'\n '''\n This creates the same hash value each time so we can use the Raspberry Pi\n serial number to create a unique ID for each device\n '''\n device_id = hashlib.sha1(device_name).hexdigest()\n\n # Currently a python dict\n data = {\n 'device_id': device_id,\n 'timestamp': timestamp,\n 'data': {\n 'key': 'value'\n }\n }\n\n channel.basic_publish(exchange='',\n routing_key=QUEUE_NAME,\n body=json.dumps(data)) # Encode as a JSON string\n msg = f' [x] Sent {data}'\n print(msg)\n logging.info(msg)\n connection.close()", "def write(self, msg):\n # Transmit messages using the serial connection. Encodes strings to byte-arrays\n self.Serial.write(msg.encode('ascii'))", "def _send_data(self, data, time):\n pass", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def send(self, data=None, metadata=None, buffers=None):\n self._publish_msg(\n 'comm_msg', data=data, metadata=metadata, buffers=buffers,\n )", "def send(self, ard: Arduino_functions.Arduino, write_msg_str):\n self.worker_send.queue.put((ard, write_msg_str))\n\n # Trigger processing the worker_send queue.\n self.worker_send.qwc.wakeAll()", "def run(self):\n data = ''\n while not rospy.is_shutdown():\n if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):\n rospy.logerr(\"Lost sync with device, restarting...\")\n self.requestTopics()\n self.lastsync = rospy.Time.now() \n \n flag = [0,0]\n flag[0] = self.port.read(1)\n if (flag[0] != '\\xff'):\n continue\n flag[1] = self.port.read(1)\n if ( flag[1] != '\\xff'):\n rospy.loginfo(\"Failed Packet Flags \")\n continue\n # topic id (2 bytes)\n header = self.port.read(4)\n if (len(header) != 4):\n #self.port.flushInput()\n continue\n \n topic_id, msg_length = struct.unpack(\"<hh\", header)\n msg = self.port.read(msg_length)\n if (len(msg) != msg_length):\n rospy.loginfo(\"Packet Failed : Failed to read msg data\")\n #self.port.flushInput()\n continue\n chk = self.port.read(1)\n checksum = sum(map(ord,header) ) + sum(map(ord, msg)) + ord(chk)\n\n if checksum%256 == 255:\n if topic_id == TopicInfo.ID_PUBLISHER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n self.senders[m.topic_id] = Publisher(m.topic_name, m.message_type)\n rospy.loginfo(\"Setup Publisher on %s [%s]\" % (m.topic_name, m.message_type) )\n except Exception as e:\n rospy.logerr(\"Failed to parse publisher: %s\", e)\n elif topic_id == TopicInfo.ID_SUBSCRIBER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n self.receivers[m.topic_name] = [m.topic_id, Subscriber(m.topic_name, m.message_type, self)]\n rospy.loginfo(\"Setup Subscriber on %s [%s]\" % (m.topic_name, m.message_type))\n except Exception as e:\n rospy.logerr(\"Failed to parse subscriber. %s\"%e)\n elif topic_id == TopicInfo.ID_SERVICE_SERVER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n\t\t\tservice = ServiceServer(m.topic_name, m.message_type, self)\n self.receivers[m.topic_name] = [m.topic_id, service]\n self.senders[m.topic_id] = service\n rospy.loginfo(\"Setup ServiceServer on %s [%s]\"%(m.topic_name, m.message_type) )\n except:\n rospy.logerr(\"Failed to parse service server\")\n elif topic_id == TopicInfo.ID_SERVICE_CLIENT:\n pass\n \n elif topic_id == TopicInfo.ID_PARAMETER_REQUEST:\n self.handleParameterRequest(msg)\n \n elif topic_id == TopicInfo.ID_LOG:\n self.handleLogging(msg)\n \n elif topic_id == TopicInfo.ID_TIME:\n t = Time()\n t.data = rospy.Time.now()\n data_buffer = StringIO.StringIO()\n t.serialize(data_buffer)\n self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )\n self.lastsync = rospy.Time.now()\n elif topic_id >= 100: # TOPIC\n try:\n self.senders[topic_id].handlePacket(msg)\n except KeyError:\n rospy.logerr(\"Tried to publish before configured, topic id %d\" % topic_id)\n else:\n rospy.logerr(\"Unrecognized command topic!\")\n rospy.sleep(0.001)", "def sendData(self, data):\n self.tx.sendBuffer(data)", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def callback_serial_write(data):\n serial_write(data.data)", "def transmit(self, message):\n pass", "def send_message_to_server(self, key, value):\n if self.from_kivy_queue is None:\n return\n self.from_kivy_queue.put((key, value))", "def produce():\n # argument parsing\n args = parse_args()\n broker = args.broker_host + ':9092'\n topic = args.kafka_topic\n print 'Starting up ... Broker: ' + broker\n # connect to Kafka\n producer = KafkaProducer(bootstrap_servers=broker)\n counter = 1\n while True:\n # send messages\n for user in users:\n user_activity = generate_activity(user)\n producer.send(topic, user_activity)\n print 'Message ' + str(counter) + ' send...'\n time.sleep(0.5)\n counter += 1", "def transmit_mqtt(form_obj):\n # Print to console for debug\n print(form_obj)\n # Create a message to send\n topic = 'Testdevice/team2_module/RECEIVE'\n send_me = [topic,\n form_obj['sender'],\n form_obj['angle'],\n form_obj['brightness'],\n form_obj['resistance']\n ]\n # debug output to console\n print(\"før!!\")\n print(send_me)\n send_me = str(send_me)\n print(\"Efter!!\")\n\n # Send it\n\n # The donothing callback function\n def donothing(client, userdata, message):\n pass\n\n # Callback on publishing - After handshakes\n def on_publish_callback(client, userdata, mid):\n global sending\n sending = False\n\n # Create client\n publisher = MqttClient(\"Team2ModuleMessageSender\", donothing, on_publish_callback)\n\n # Send and disconnect\n rc = publisher.publish(topic, send_me)\n\n publisher.loop_start()\n global sending\n sending = True\n # Wait for the handshaking to end\n while sending:\n pass\n publisher.loop_stop()\n\n publisher.disconnect()\n\n return rc", "def send_msg():\n\tmessage = \"%s %s %d\\n\" % (metric, activeDAHDIChannels, int(time.time()))\n\t# print 'sending message:\\n%s' % message\n\tcarbonSocket = socket.socket()\n\tcarbonSocket.connect((CARBON_HOST, CARBON_PORT))\n\tcarbonSocket.sendall(message)\n\tcarbonSocket.close()\n\tlast_send = int(time.time())", "def generate_data(self):\n print(\"generate_data - init\")\n with open(self.input_file, \"r\") as f:\n\n # read JSON data from input file\n data = json.loads(f.read())\n\n for idx, row in enumerate(data): \n # serialize Python dict to string\n msg = self.serialize_json(row)\n #print(f\"Linha: {row}\")\n self.send(self.topic, msg)\n self.flush()\n #print(\"Sleeping\")\n time.sleep(1)", "def run(self, topic: str):\n while self.events:\n wait = self.get_wait_time()\n self.logger.debug('sleeping for %s seconds', wait)\n time.sleep(wait)\n\n event = self.events.pop(0)\n self.send(topic, event)\\\n .add_callback(self.on_send_success, event=event)\\\n .add_errback(self.on_send_failure, event=event)\n\n self.flush()", "def data_sync(self, broker):\n\n # publish the data values that this program is sourcing\n for attr in self.parm_list:\n if attr.direction == attr.PUB:\n self.logging.debug(\"From data_sync() \" + \"Publishing: \"+attr.label)\n self.mqtt_client.publish(attr.topic, attr.value)\n\n # note that the subscribed values are updated asynchronously by on_message()", "def transfer(self, event_data, callback=None):\n if event_data.partition_key and self.partition:\n raise ValueError(\"EventData partition key cannot be used with a partition sender.\")\n if callback:\n event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c))\n self._handler.queue_message(event_data.message)" ]
[ "0.68818235", "0.6817525", "0.6561873", "0.6313053", "0.6285782", "0.62410563", "0.6211618", "0.6198164", "0.61639136", "0.61561596", "0.6113484", "0.60954404", "0.60725266", "0.6064175", "0.60134137", "0.59805125", "0.59797984", "0.5979177", "0.5969394", "0.5968888", "0.5966107", "0.59551746", "0.5946582", "0.5945592", "0.5939317", "0.5936175", "0.5934959", "0.5926427", "0.59223586", "0.59133506", "0.5912351", "0.59008753", "0.59008753", "0.5897973", "0.5894882", "0.5886061", "0.5885628", "0.58822036", "0.5879032", "0.5841839", "0.58405983", "0.58333635", "0.5828563", "0.5821171", "0.5816843", "0.5809288", "0.5799428", "0.579092", "0.57815987", "0.5776892", "0.5774215", "0.576806", "0.5767668", "0.57631207", "0.5752584", "0.5744364", "0.5743749", "0.57404745", "0.5738955", "0.5720459", "0.57185376", "0.5709131", "0.56986505", "0.5691962", "0.56858027", "0.5677523", "0.56711936", "0.56702787", "0.5665228", "0.5644441", "0.56440336", "0.5642834", "0.56345165", "0.56328213", "0.5630528", "0.5628278", "0.5628264", "0.56228316", "0.56228316", "0.5614911", "0.56135035", "0.5611539", "0.56093913", "0.5607991", "0.5607898", "0.5607495", "0.55974996", "0.5584734", "0.5582737", "0.5573101", "0.5572047", "0.5570036", "0.5569051", "0.55609465", "0.55606955", "0.55586225", "0.55570066", "0.5549084", "0.5546781", "0.55453813" ]
0.58462435
39
Builds and returns (in the form returned by decoderawtransaction) a transaction that spends the given utxo, pays CHI to some output
def build_tx (self, utxo, chiOut, name, nameAddr, value): nameData = self.nodes[0].name_show (name) inputs = [nameData, utxo] outputs = {nameAddr: Decimal ('0.01')} outputs.update (chiOut) tx = self.nodes[0].createrawtransaction (inputs, outputs) nameOp = { "op": "name_update", "name": name, "value": value, } tx = self.nodes[0].namerawtransaction (tx, 0, nameOp) res = self.nodes[0].decoderawtransaction (tx["hex"]) res["hex"] = tx["hex"] return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sochain_utxo_to_xchain_utxo(utxo):\n hash = utxo['txid']\n index = utxo['output_no']\n \n value = round(float(utxo['value']) * 10 ** 8)\n script = bytearray.fromhex(utxo['script_hex']) #utxo['script_hex']\n witness_utxo = Witness_UTXO(value, script)\n return UTXO(hash, index, witness_utxo)", "def select_utxo_and_create_tx(transaction_input: TransactionInput) -> (TransactionOutput, str):\n\ttry:\n\t\tunspent = Utxo.get_unspent_outputs(transaction_input.source_address)\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to get unspent outputs: {e}\")\n\t\treturn None, \"There was a problem trying to get unspent outputs\"\n\n\ttotal_unspent = sum([u['value'] for u in unspent])\n\n\tbest_selected = SelectedInfo(total_unspent, \"\", list(unspent), dict(transaction_input.outputs))\n\t# It checks which selector gives the best results in terms of lower fees\n\tfor selector in [BiggerFirst(), SmallerFirst(), FirstFit(), BestFit()]:\n\t\toutputs = dict(transaction_input.outputs)\n\t\ttotal_outputs = sum([u for u in outputs.values()])\n\n\t\tselected, err = create_transaction_with_change(\n\t\t\tselector, outputs, total_outputs, unspent, total_unspent,\n\t\t\ttransaction_input.source_address, transaction_input.fee_kb)\n\n\t\tif err is not None:\n\t\t\treturn None, err\n\n\t\t# Case it's found a smaller fee or less UTXO are used or less no change is necessary\n\t\tbest_selected = min(best_selected, selected)\n\n\tif len(best_selected.selected) == 0:\n\t\treturn None, \"It was unable the select the UTXO for creating the transaction\"\n\n\tresp = TransactionOutput(best_selected.raw, [])\n\tfor utxo in best_selected.selected:\n\t\tresp.inputs += [Utxo.to_tx_output_item(utxo)]\n\n\treturn resp, None", "def create_tx(self, coin, account, to, amount):\n if coin is ETH:\n gasEstimate = self.w3.eth.estimateGas(\n {\"from\": account.address, \"to\": to, \"value\": amount}\n )\n return {\n \"from\": account.address,\n \"to\": to,\n \"value\": self.w3.toWei(amount, 'ether'),\n \"gasPrice\": self.w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": self.w3.eth.getTransactionCount(account.address),\n }\n elif coin is BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(to, amount, BTC)])\n elif coin is BTC:\n return PrivateKey.prepare_transaction(account.address, [(to, amount, BTC)])\n else:\n return None", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):\n if not type(inputs) is list:\n inputs = [inputs]\n\n tx = CTransaction()\n for i in inputs:\n tx.vin.append(CTxIn(COutPoint(i[\"txid\"], i[\"vout\"]), b\"\", 0xffffffff))\n for addr, amount in outputs.items():\n if addr == \"data\":\n tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))\n else:\n tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))\n tx.rehash()\n return hexlify(tx.serialize()).decode(\"utf-8\")", "def create_tx(coin, account, recipient, amount):\n if coin ==ETH:\n gasEstimate = w3.eth.estimateGas(\n {\"from\": account.address, \"to\": recipient, \"value\": amount})\n return{\n \"to\": recipient,\n \"from\": account.address,\n \"value\": amount,\n \"gasPrice\": w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": w3.eth.getTransactionCount(account.address)\n }\n if coin == BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(recipient, amount, BTC)])", "def send_tx(args):\n kwargs = {\n '--privkey': args.privkey,\n '--to': AMEND_ADDR,\n '--code': args.code,\n '--value': str(args.value),\n }\n args = functools.reduce(\n lambda lst, kv: lst + list(kv),\n kwargs.items(),\n [],\n )\n print(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'send_tx.py'])\n with open('../output/transaction/hash') as fobj:\n return fobj.read().strip()", "def transact(self, args):\n private_key = os.environ.get('private_key')\n if private_key:\n set_gas_prices(self.w3, args)\n tx = send(self.w3, private_key, args)\n return self.w3.toHex(tx)", "def mk_simple_transaction(self, from_addr, to_addr, send_value):\n transaction = dict(\n nonce=self.web3.eth.get_transaction_count(from_addr),\n gasPrice=self.web3.eth.gasPrice,\n # there must be an automated way to automatically set the gas price\n # based off of the gas strategy\n gas=100000,\n to=to_addr,\n value=self.web3.toWei(send_value, 'wei')\n )\n return transaction", "def __create_transaction(self):\n log.debug(\"Displaying __create_transaction\")\n # Make the admin select an user\n user = self.__user_select()\n # Allow the cancellation of the operation\n if isinstance(user, CancelSignal):\n return\n # Create an inline keyboard with a single cancel button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"),\n callback_data=\"cmd_cancel\")]])\n # Request from the user the amount of money to be credited manually\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_credit\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(-? ?[0-9]{1,3}(?:[.,][0-9]{1,2})?)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Convert the reply to a price object\n price = self.Price(reply)\n # Ask the user for notes\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_transaction_notes\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(.*)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Create a new transaction\n transaction = db.Transaction(user=user,\n value=int(price),\n provider=\"Manual\",\n notes=reply)\n self.session.add(transaction)\n # Change the user credit\n user.recalculate_credit()\n # Commit the changes\n self.session.commit()\n # Notify the user of the credit/debit\n self.bot.send_message(user.user_id,\n self.loc.get(\"notification_transaction_created\",\n transaction=transaction.text(w=self)))\n # Notify the admin of the success\n self.bot.send_message(self.chat.id, self.loc.get(\"success_transaction_created\",\n transaction=transaction.text(w=self)))", "def call_contract(w3, account, func):\n tx = func.buildTransaction({\n 'nonce': w3.eth.getTransactionCount(account.address),\n 'gas': func.estimateGas()\n })\n signed_tx = w3.eth.account.signTransaction(tx, account.privateKey)\n tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n return tx_hash", "def send_tx(coin, account, recipient, amount):\n if coin =='eth':\n txn = create_tx(coin, account, recipient, amount)\n signed_txn = w3.eth.account.signTransaction(txn)\n result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n print(result.hex())\n return result.hex()\n\n else:\n tx_btctest= create_tx(coin, account, recipient, amount)\n sign_tx_btctest = account.sign_transaction(tx_btctest)\n from bit.network import NetworkAPI\n NetworkAPI.broadcast_tx_testnet(sign_tx_btctest) \n return sign_tx_btctest", "def create_transaction(inputs: list, outputs: dict) -> ((str, int), str):\n\ttry:\n\t\tc = Bitcoin(testnet=bitcoin_is_testnet)\n\t\touts = []\n\t\tfor outk, outv in outputs.items():\n\t\t\touts += [{'value': outv, 'address': outk}]\n\t\ttx = c.mktx(inputs, outs)\n\t\ttx_serialize = serialize(tx)\n\n\t\t# Signing each input to predict the transaction size\n\t\tpriv = sha256('a big long brainwallet password')\n\t\ttx_signed = tx.copy()\n\t\tfor i in range(len(inputs)):\n\t\t\ttx_signed = c.sign(tx_signed, i, priv)\n\n\t\t# The serialization uses one char per nibble so in order the get the number of bytes it's necessary to\n\t\t# divide the size of the string serialization by 2\n\t\treturn (str(tx_serialize), len(str(serialize(tx_signed))) // 2), None\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to create the transaction: {e}\")\n\t\treturn (None, None), \"There was a problem trying to create the transaction\"", "def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200", "def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}", "def add_tx(self, txid, tx):\n outputs = tx.outputs()\n so = outputs and outputs[0][1]\n # Note: ScriptOutput here is the subclass defined in this file, not\n # address.ScriptOutput\n if not isinstance(so, ScriptOutput):\n return\n transaction_type = so.message.transaction_type\n try:\n if transaction_type == \"GENESIS\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"MINT\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"SEND\":\n self._add_send_tx(so, outputs, txid, tx)\n elif transaction_type == \"COMMIT\":\n return # ignore COMMIT, they don't produce any tokens\n else:\n raise InvalidOutputMessage(\"Bad transaction type\")\n except (AssertionError, ValueError, KeyError, TypeError, IndexError) as e:\n self.print_error(f\"ERROR: tx {txid}; exc =\", repr(e))", "def testnet_receive_coin(self):\n try:\n datas = get_transaction_details(tx_hash=self.tx_hash,\n coin_symbol='bcy')\n except:\n raise ValidationError('Hash da transacao invalido ou nao '\n 'identificado.')\n if datas.get('error'):\n raise ValidationError('Transacao nao encontrada.')\n vals = {'name': datas.get('hash')}\n if datas.get('confirmations') >= 2:\n vals.update({'confirmation': datas.get('confirmations'),\n 'date_time': str(datas.get('confirmed')),\n 'state': 'D',\n 'satoshi': datas.get('outputs')[0].get('value')})\n self.write(vals)\n return datas.get('hash')", "def CreateTx(self, request, context):\n channel_name = request.channel or conf.LOOPCHAIN_DEFAULT_CHANNEL\n utils.logger.info(f\"peer_outer_service::CreateTx request({request.data}), channel({channel_name})\")\n\n channel_stub = StubCollection().channel_stubs[channel_name]\n result_hash = asyncio.run_coroutine_threadsafe(\n channel_stub.async_task().create_tx(request.data),\n self.peer_service.inner_service.loop\n ).result()\n\n return loopchain_pb2.CreateTxReply(\n response_code=message_code.Response.success,\n tx_hash=result_hash,\n more_info='')", "def makeTx(self):\n new_tx = transaction.Tx(self.simulation.tick, self.id, self.id_bag.getNextId(), [])\n self.simulation.all_tx.append(new_tx)\n return new_tx", "def createrawtransaction(\n self,\n outpoints: List[Dict[str, Any]],\n send_to: Dict[str, float],\n locktime: Optional[int] = None,\n ) -> str:\n assert type(outpoints) == list\n assert type(send_to) == dict\n assert locktime is None or type(locktime) == int\n return self.rpc_call(\"createrawtransaction\", outpoints, send_to, locktime)", "async def new_tx(request: Request) -> dict:\n peer = request.client.host\n tx = await request.json()\n tx = Transaction(**tx)\n chain.mempool.put_nowait(tx)\n return {\"sender\": peer, \"receipt\": tx.receipt()}", "def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)", "def create_raw_transaction(amount, network_fee, from_address, to_address):\n tx_total = amount + network_fee\n tx_inputs = []\n input_total = 0\n unspent = list_unspent(from_address)\n\n # Are there enough funds in one block to cover the amount\n for block in unspent:\n if float(block[\"amount\"]) >= tx_total:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total = float(block[\"amount\"])\n tx_inputs.append(tx_input)\n break\n # If tx_inputs is empty that means we have to\n # build the transaction from multiple blocks\n if not tx_inputs:\n for block in unspent:\n if input_total >= tx_total:\n break\n else:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total += float(block[\"amount\"])\n tx_inputs.append(tx_input)\n\n # Amount left over after amount to send and network fees are subtracted\n # from input_total. Change is sent back to sender\n change = round((input_total - amount) - network_fee, 8)\n \n if change < dust:\n tx_output = {to_address: amount}\n else:\n tx_output = {to_address: amount, from_address: change}\n \n try:\n tx_hex_string = subprocess.check_output([\"litecoin-cli\", \"createrawtransaction\", json.dumps(tx_inputs), json.dumps(tx_output)])\n except:\n sys.exit(1)\n\n return tx_hex_string.strip()", "def get_utxo_in_wallet(env):\n command = build_command(env.cardano_cli, \"query\", \"utxo\", \"--address\", \"$(cat \" + env.wallet_payment_addr + \")\",\n \"--testnet-magic\", env.magic)\n success, message = run_command(command)\n lines = message.split(\"\\\\n\")\n if len(lines) <= 2:\n raise Exception(\"Could not find utxo with \" + command + \" in\\n \" + format_shell_error(command, message))\n\n # TODO: smarter selection than just using the first one\n # parse while handling multiple whitespaces\n first_utxo_line = re.sub(\" +\", \" \", lines[2]).split(\" \")\n utxo = first_utxo_line[0] + \"#\" + first_utxo_line[1]\n return utxo", "def create_god_transaction(to_pk):\n\n god_pk, god_sk = signature.generate_keys()\n tx = Transaction(god_pk, to_pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n return tx", "def send_unsigned_transaction(self, tx: Dict[str, Any], private_key: Optional[str] = None,\n public_key: Optional[str] = None, retry: bool = False,\n block_identifier: Optional[str] = 'pending') -> bytes:\n if private_key:\n address = self.private_key_to_address(private_key)\n elif public_key:\n address = public_key\n else:\n logger.error('No ethereum account provided. Need a public_key or private_key')\n raise ValueError('Ethereum account was not configured or unlocked in the node')\n\n if tx.get('nonce') is None:\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n\n number_errors = 5\n while number_errors >= 0:\n try:\n if private_key:\n signed_tx = self.w3.eth.account.sign_transaction(tx, private_key=private_key)\n logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])\n try:\n return self.send_raw_transaction(signed_tx.rawTransaction)\n except TransactionAlreadyImported as e:\n # Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's\n # processed\n tx_hash = signed_tx.hash\n logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e)))\n return tx_hash\n elif public_key:\n tx['from'] = address\n return self.send_transaction(tx)\n except ReplacementTransactionUnderpriced as e:\n if not retry or not number_errors:\n raise e\n current_nonce = tx['nonce']\n tx['nonce'] = max(current_nonce + 1, self.get_nonce_for_account(address,\n block_identifier=block_identifier))\n logger.error('Tx with nonce=%d was already sent for address=%s, retrying with nonce=%s',\n current_nonce, address, tx['nonce'])\n except InvalidNonce as e:\n if not retry or not number_errors:\n raise e\n logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again',\n address, tx['nonce'])\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n number_errors -= 1", "def generate_privacy_transaction(self):\n\n # recharge transaction sending wallet\n self.recharge_account()\n\n # recharge transaction receiving wallet, (recharging because we need to use this wallet in otaRefund)\n self.privacy_transaction_receiving_wallet = Recharge()\n self.privacy_transaction_receiving_wallet.recharge_account()\n\n # wait for some time, recharge takes time to reflect.\n time.sleep(commonUtil.default_wait_after_recharge)\n\n child = pexpect.spawn('node privacyTransaction', cwd='../src/')\n if commonUtil.show_logs:\n child.logfile = sys.stdout\n\n commonUtil.check_expect(\"Input file name\", child, test_name, \"'Input file name' prompt not found\")\n child.sendline(self.get_file_name())\n\n commonUtil.check_expect(\"Input password\", child, test_name, \"'Input password' prompt not found\")\n child.sendline(self.get_password())\n\n commonUtil.check_expect(\"wallet has been unlocked\", child, test_name,\n \"'wallet has been unlocked' message not found\")\n child.sendline(\"Y\")\n\n commonUtil.check_expect(\"Input receiver\\'s waddress\", child, test_name,\n \"'Input receiver\\'s waddress' prompt not found\")\n child.sendline(self.privacy_transaction_receiving_wallet.get_wan_address());\n\n commonUtil.check_expect(\"Input\", child, test_name, \"Input eth address prompt not found\")\n child.sendline(commonUtil.default_eth_privacy_transaction)\n\n child.expect(commonUtil.default_eth_privacy_transaction)\n\n result = child.read()\n\n if result.find(\"value: \" + commonUtil.default_stamp_value) == -1:\n commonUtil.exit_test(\"'value: \" + commonUtil.default_stamp_value +\n \"' not found in summary\", test_name, child)\n\n if result.find(\"otaDestAddress\") == -1:\n commonUtil.exit_test(\"'otaDestAddress' text not found \", test_name, child)\n\n ota_address_start = result.find('0x', result.find(\"otaDestAddress\"))\n if ota_address_start == -1:\n commonUtil.exit_test(\"'otaDestAddress' value not found\", test_name, child)\n\n self.ota_address = result[ota_address_start + 2:ota_address_start + 135]\n\n if result.find(commonUtil.default_stamp_value) == -1:\n commonUtil.exit_test(\"stamp value \" + commonUtil.default_stamp_value + \" not found\", test_name, child)\n\n child.expect(pexpect.EOF)", "def tx(self):\n cmd = Command()\n cmd.set_num(0x1C)\n cmd.set_subcmd_num(0x00)\n cmd.set_data([0x01])\n self.connection.send_cmd(cmd.render())", "def getrawtransaction(self, txid, verbose=True):\n if verbose:\n return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))\n return self.proxy.getrawtransaction(txid, 0)", "def decoderawtransaction_asm_sighashtype(self):\n\n self.log.info(\"- various mainnet txs\")\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])\n\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc\n # verify that we have not altered scriptPubKey decoding.\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])\n assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])\n assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n txSave = tx_from_hex(tx)\n\n self.log.info(\"- tx not passing DER signature checks\")\n # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type\n tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])\n\n self.log.info(\"- tx passing DER signature checks\")\n # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n\n # some more full transaction tests of varying specific scriptSigs. used instead of\n # tests in decodescript_script_sig because the decodescript RPC is specifically\n # for working on scriptPubKeys (argh!).\n push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]\n signature = push_signature[2:]\n der_signature = signature[:-2]\n signature_sighash_decoded = der_signature + '[ALL]'\n signature_2 = der_signature + '82'\n push_signature_2 = '48' + signature_2\n signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'\n\n self.log.info(\"- P2PK scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n # make sure that the sighash decodes come out correctly for a more complex / lesser used case.\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- multisig scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- scriptSig that contains more than push operations\")\n # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.\n txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])", "def transaction(self, uuid):\r\n return tx.Transaction(self, uuid)", "def prepare_raw_tx(self, mn_address, change_address, inputs, total, fee=0.00001):\n raw_tx = {mn_address: self.send_amount, change_address: total - self.send_amount - fee}\n return self.rpc.createrawtransaction(inputs, raw_tx)", "def create_from_transaction(tx, prev_hash):\n\n tx_hash = HashAssist.hash_value(tx.to_string_for_hashing())\n\n print(\"Mining nonce....\")\n nonce = proof.mint(prev_hash + tx_hash, WORK_FACTOR)\n header_hash = HashAssist.hash_value(prev_hash + tx_hash + nonce)\n\n return Block(header_hash, prev_hash, nonce, tx_hash, tx)", "def encode(self):\n packet = (\n\n str(self.pos_number) + # 2 octets 0:3\n\n ('%.0f' % (self.amount * 100)).zfill(8) + # 8 octets 3:11\n\n self.answer_flag + # 1 octet 11:12\n\n self.payment_mode + # 1 octet 12:13\n\n self.transaction_type + # 1 octet 13:14\n\n self.currency_numeric + # 3 octet 14:17\n\n self.private + # 10 octet 17:27\n\n self.delay + # 4 octet 27:31\n\n self.authorization) # 4 octet 31:35\n\n packet_len = len(packet)\n\n if packet_len != TERMINAL_ASK_REQUIRED_SIZE:\n raise SequenceDoesNotMatchLengthException('Cannot create ask payment sequence with len != {0} octets. '\n 'Currently have {1} octet(s).'.format\n (TERMINAL_ASK_REQUIRED_SIZE, packet_len))\n\n return TeliumData.framing(packet)", "def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}", "def create_transactions(self, revocation_address):\n self.tree.make_tree()\n\n spendables = self.connector.get_unspent_outputs(self.issuing_address)\n if not spendables:\n error_message = 'No money to spend at address {}'.format(self.issuing_address)\n logging.error(error_message)\n raise InsufficientFundsError(error_message)\n\n last_input = spendables[-1]\n\n op_return_value = unhexlify(self.tree.get_merkle_root())\n\n tx_outs = self.build_recipient_tx_outs()\n tx_outs.append(tx_utils.create_transaction_output(revocation_address,\n self.tx_cost_constants.get_minimum_output_coin()))\n\n transaction = tx_utils.create_trx(\n op_return_value,\n self.total,\n self.issuing_address,\n tx_outs,\n last_input)\n\n transaction_data = TransactionData(uid=self.batch_id,\n tx=transaction,\n tx_input=last_input,\n op_return_value=hexlify(op_return_value),\n batch_metadata=self.batch_metadata)\n\n return [transaction_data]", "def generate_transaction(recipient_id: int, amount: float, mute: bool = False) -> bool:\n logging.debug(\"Transaction requested: %f NBC to node %d\", amount, recipient_id)\n sender = wallet.get_public_key().dumpb()\n recipient = wallet.get_public_key(recipient_id).dumpb()\n r = util.get_db()\n inputs: List[TransactionInput] = []\n input_amount = 0.0\n with r.lock(\"blockchain:tx_pool:lock\"), \\\n r.lock(\"blockchain:utxo-tx:lock\"):\n for ib, ob in r.hgetall(\"blockchain:utxo-tx\").items():\n o = TransactionOutput.loadb(ob)\n if o.recipient == sender:\n inputs.append(TransactionInput.loadb(ib))\n input_amount += o.amount\n if input_amount >= amount:\n t = Transaction(recipient=recipient,\n amount=amount,\n inputs=inputs,\n input_amount=input_amount)\n # Add to transaction pool\n r.hset(\"blockchain:tx_pool\", t.id, t.dumpb())\n # \"Add to wallet if mine\"\n r.hdel(\"blockchain:utxo-tx\", *(i.dumpb() for i in t.inputs))\n r.hmset(\"blockchain:utxo-tx\", {TransactionInput(t.id, o.index).dumpb(): \\\n o.dumpb() for o in t.outputs})\n break\n else:\n # Not enough UTXOs\n logging.error(\"Cannot send %f NBC to node %d (not enough coins)\", amount, recipient_id)\n return False\n\n logging.debug(\"Generated transaction %s\", util.bintos(t.id))\n _check_for_new_block()\n if not mute:\n logging.debug(\"Broadcasting transaction %s\", util.bintos(t.id))\n chatter.broadcast_transaction(t, util.get_peer_ids())\n return True", "def create_transaction_with_change(\n\t\tselector: SelectUtxo, outputs: dict, total_outputs: int, unspent: list, total_unspent: int,\n\t\tsource_address: str, fee_kb: int) -> (SelectedInfo, str):\n\t# Selecting UTXO without fee just to estimate the transaction size\n\tselected_utxo, _ = selector.select(unspent, total_outputs)\n\testimated_size = guess_transaction_size(selected_utxo, outputs)\n\testimated_fee = estimate_fee(estimated_size, fee_kb)\n\n\tif total_unspent < total_outputs + estimated_fee:\n\t\treturn None, \"The output cannot be greater than the input\"\n\n\tselected_utxo, total_selected = selector.select(unspent, total_outputs + estimated_fee)\n\t# Create transaction and calculate the fee\n\t(raw_transaction, estimated_size), err = create_transaction(selected_utxo, outputs)\n\tif err is not None:\n\t\treturn None, err\n\testimated_fee = estimate_fee(estimated_size, fee_kb)\n\n\toutputs, change = create_change(\n\t\toutputs, total_selected, source_address, total_outputs, estimated_fee\n\t)\n\ttotal_outputs += change\n\t# If a change was added then it needs to create the transaction again\n\tif change != 0:\n\t\t(raw_transaction, _), err = create_transaction(selected_utxo, outputs)\n\t\tif err is not None:\n\t\t\treturn None, err\n\n\tfee_value = total_selected - total_outputs\n\treturn SelectedInfo(fee_value, raw_transaction, selected_utxo, outputs), None", "def make_transaction():\n account_id = request.json['account_id']\n aux_account = [account for account in accounts if account['id'] == account_id]\n if len(aux_account) == 0:\n abort(404)\n account_balance = Decimal(aux_account[0].get('balance')).quantize(Decimal('0.00'))\n transaction = request.json['transaction']\n transaction_amount = Decimal(abs(request.json['amount'])).quantize(Decimal('0.00'))\n\n if not request.json:\n abort(400)\n if transaction not in ['withdrawal', 'deposit']:\n abort(400, f'Invalid transaction name: {transaction}')\n if transaction == 'withdrawal':\n transaction_amount = transaction_amount*-1\n\n # the user can't withdraw more than the account has\n validation_sum = (account_balance + transaction_amount).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n if validation_sum >= 0:\n for real_account in accounts:\n if real_account.get('id') == account_id:\n real_account['balance'] = round(float(validation_sum),2)\n else:\n abort(400, {'error':'Not enough funds for this transaction'})\n\n return json.dumps({f'{transaction.capitalize()} Done. New balance': str(validation_sum)}, ensure_ascii=False), 200", "def buildTxOut (self, addr, amount):\n\n addrData = self.nodes[0].validateaddress (addr)\n addrScript = bytes.fromhex (addrData[\"scriptPubKey\"])\n\n return CTxOut (int (amount * COIN), addrScript)", "def _handle_icx_send_transaction(self,\n context: 'IconScoreContext',\n params: dict) -> 'TransactionResult':\n tx_result = TransactionResult(context.tx, context.block)\n\n try:\n to: Address = params['to']\n tx_result.to = to\n\n if context.get_revision() >= 3:\n # Check if from account can charge a tx fee\n self._icon_pre_validator.execute_to_check_out_of_balance(context, params,\n step_price=context.step_counter.step_price)\n else:\n # Check if from account can charge a tx fee\n self._icon_pre_validator.execute_to_check_out_of_balance(None, params,\n step_price=context.step_counter.step_price)\n\n # Every send_transaction are calculated DEFAULT STEP at first\n context.step_counter.apply_step(StepType.DEFAULT, 1)\n input_size = self._get_byte_length(params.get('data', None))\n\n context.step_counter.apply_step(StepType.INPUT, input_size)\n self._transfer_coin(context, params)\n\n if to.is_contract:\n tx_result.score_address = self._handle_score_invoke(context, to, params)\n\n tx_result.status = TransactionResult.SUCCESS\n except BaseException as e:\n tx_result.failure = self._get_failure_from_exception(e)\n trace = self._get_trace_from_exception(context.current_address, e)\n context.tx_batch.clear()\n context.traces.append(trace)\n context.event_logs.clear()\n finally:\n # Revert func_type to IconScoreFuncType.WRITABLE\n # to avoid DatabaseException in self._charge_transaction_fee()\n context.func_type = IconScoreFuncType.WRITABLE\n\n # Charge a fee to from account\n final_step_used, final_step_price = \\\n self._charge_transaction_fee(\n context,\n params,\n tx_result.status,\n context.step_counter.step_used)\n\n # Finalize tx_result\n context.cumulative_step_used += final_step_used\n tx_result.step_used = final_step_used\n tx_result.step_price = final_step_price\n tx_result.cumulative_step_used = context.cumulative_step_used\n tx_result.event_logs = context.event_logs\n tx_result.logs_bloom = self._generate_logs_bloom(context.event_logs)\n tx_result.traces = context.traces\n\n return tx_result", "def gettxout(self, txid, index, mempool=True):\n tx = self.proxy.gettxout(txid, index, mempool)\n if tx != None:\n return TransactionInfo(**tx)\n else:\n return TransactionInfo()", "def make_unsigned(cls, outpoints, outputs, tx_fee=TRANSACTION_FEE, testnet=False, out_value=None):\n # build the inputs from the outpoints object\n SelectParams(\"testnet\" if testnet else \"mainnet\")\n txins = []\n in_value = 0\n for outpoint in outpoints:\n in_value += outpoint[\"value\"]\n txin = CMutableTxIn(COutPoint(lx(outpoint[\"txid\"]), outpoint[\"vout\"]))\n txin.scriptSig = CScript(x(outpoint[\"scriptPubKey\"]))\n txins.append(txin)\n\n # build the outputs\n txouts = []\n if isinstance(outputs, list):\n for output in outputs:\n value = output[\"value\"]\n address = output[\"address\"]\n txouts.append(CMutableTxOut(value, CIoncoinAddress(address).to_scriptPubKey()))\n else:\n value = out_value if out_value is not None else (in_value - tx_fee)\n txouts.append(CMutableTxOut(value, CIoncoinAddress(outputs).to_scriptPubKey()))\n\n # make the transaction\n tx = CMutableTransaction(txins, txouts)\n\n return IoncoinTransaction(tx)", "def coinbase_transaction(self):\n return self.txns[0]", "def get_tx(txid):\n return requests.get(BASE+f'/api/tx/{txid}').json()", "def exec_eth_transfer(self, timestamp=-1):\n ticker = self.ticker\n gdax_trading_account = self.trading_acc1\n cex_trading_account = self.trading_acc2\n gdax_eth_amount = gdax_trading_account.get_balance(ticker)\n cex_deposit_address = cex_trading_account.get_crypto_deposit_address(ticker)\n\n # transfer\n gdax_trading_account.transfer_crypto(ticker, cex_deposit_address, gdax_eth_amount)\n\n # audit info\n audit_info = {\n 'ticker': ticker,\n 'action_type': 'exec_eth_transfer',\n 'gdax_eth_amount': gdax_eth_amount,\n 'cex_deposit_address': cex_deposit_address\n }\n return audit_info", "def sign (self, node, tx):\n\n signed = node.signrawtransactionwithwallet (tx[\"hex\"])\n\n res = node.decoderawtransaction (signed[\"hex\"])\n res.update (signed)\n\n return res", "def _generate_transaction(\n payment: Payment,\n kind: str,\n amount: Decimal,\n *,\n id='',\n is_success=True,\n **data) -> Transaction:\n transaction = create_transaction(\n payment=payment,\n kind=kind,\n amount=amount,\n currency=data.pop('currency', payment.currency),\n gateway_response=data,\n token=id,\n is_success=is_success)\n return transaction", "def add_UI_transaction(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\t_type = read_type()\n\tadd_transaction(_day, _amount, _type, account)", "async def build_tx(\n self, priv: str, addrs: List[Tuple[str, D]], split_fee=True\n ):\n addr = Key.from_text(priv).address()\n\n spendables = await self.get_spendable_list_for_addr(addr)\n addrs.append((addr, D(0)))\n\n txs_out = []\n for payable in addrs:\n bitcoin_address, coin_value = payable\n coin_value *= COIN\n script = standard_tx_out_script(bitcoin_address)\n txs_out.append(TxOut(coin_value, script))\n\n txs_in = [spendable.tx_in() for spendable in spendables]\n tx = Tx(version=1, txs_in=txs_in, txs_out=txs_out, lock_time=0)\n\n tx.set_unspents(spendables)\n\n fee = await self.calc_fee(tx)\n\n total_coin_value = sum(\n spendable.coin_value\n for spendable in tx.unspents\n )\n coins_allocated = sum(tx_out.coin_value for tx_out in tx.txs_out)\n\n if split_fee:\n fee_per_tx_out, extra_count = divmod(fee, len(tx.txs_out) - 1)\n\n if coins_allocated > total_coin_value:\n raise NotEnoughAmountError(\n 'Coins allocated exceeds total spendable: '\n f'allocated: {coins_allocated}, '\n f'spendable: {total_coin_value}'\n )\n\n for tx_out in tx.txs_out:\n if tx_out.address(netcode=self.NETCODE) == addr:\n tx_out.coin_value = total_coin_value - coins_allocated\n else:\n tx_out.coin_value -= fee_per_tx_out\n if extra_count > 0:\n tx_out.coin_value -= 1\n extra_count -= 1\n if tx_out.coin_value < 1:\n raise NotEnoughAmountError(\n 'Not enough in each output to spread fee evenly: '\n f'{tx_out.address} allocated too little'\n )\n else:\n if (coins_allocated + fee) > total_coin_value:\n raise NotEnoughAmountError(\n 'Coins allocated exceeds total spendable: '\n f'allocated: {coins_allocated}, '\n f'fee: {fee}, '\n f'spendable: {total_coin_value}'\n )\n for tx_out in tx.txs_out:\n if tx_out.address(netcode=self.NETCODE) == addr:\n tx_out.coin_value = (\n total_coin_value - coins_allocated - fee\n )\n break\n return tx", "def payment_transaction(self, acquirer_id):\n cr, uid, context = request.cr, request.uid, request.context\n payment_obj = request.registry.get('payment.acquirer')\n transaction_obj = request.registry.get('payment.transaction')\n order = request.website.sale_get_order(context=context)\n\n if not order or not order.order_line or acquirer_id is None:\n return request.redirect(\"/shop/checkout\")\n\n assert order.partner_id.id != request.website.partner_id.id\n\n # find an already existing transaction\n tx = request.website.sale_get_transaction()\n if tx:\n tx_id = tx.id\n if tx.sale_order_id.id != order.id or tx.state in ['error', 'cancel'] or tx.acquirer_id.id != acquirer_id:\n tx = False\n tx_id = False\n elif tx.state == 'draft': # button cliked but no more info -> rewrite on tx or create a new one ?\n tx.write(dict(transaction_obj.on_change_partner_id(cr, SUPERUSER_ID, None, order.partner_id.id, context=context).get('values', {}), amount=order.amount_total))\n if not tx:\n tx_id = transaction_obj.create(cr, SUPERUSER_ID, {\n 'acquirer_id': acquirer_id,\n 'type': 'form',\n 'amount': order.amount_total,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'partner_id': order.partner_id.id,\n 'partner_country_id': order.partner_id.country_id.id,\n 'reference': request.env['payment.transaction'].get_next_reference(order.name),\n 'sale_order_id': order.id,\n }, context=context)\n request.session['sale_transaction_id'] = tx_id\n tx = transaction_obj.browse(cr, SUPERUSER_ID, tx_id, context=context)\n\n # update quotation\n request.registry['sale.order'].write(\n cr, SUPERUSER_ID, [order.id], {\n 'payment_acquirer_id': acquirer_id,\n 'payment_tx_id': request.session['sale_transaction_id']\n }, context=context)\n\n return payment_obj.render(\n cr, SUPERUSER_ID, tx.acquirer_id.id,\n tx.reference,\n order.amount_total,\n order.pricelist_id.currency_id.id,\n partner_id=order.partner_shipping_id.id or order.partner_invoice_id.id,\n tx_values={\n 'return_url': '/shop/payment/validate',\n },\n context=dict(context, submit_class='btn btn-primary', submit_txt=_('Оформить')))", "def push_tx(self, crypto, tx_hex):\n raise NotImplementedError(\n \"This service does not support pushing transactions to the network. \"\n \"Or rather it has no defined 'push_tx' method.\"\n )", "def _add_genesis_or_mint_tx(self, so, outputs, txid, tx):\n token_type = so.message.token_type\n is_genesis = so.message.transaction_type == \"GENESIS\"\n token_id_hex = txid if is_genesis else so.message.token_id_hex\n assert token_type in valid_token_types, \"Invalid token type: FIXME\" # paranoia\n r_type, r_addr, _dummy = outputs[1] # may raise\n\n # Not clear here if we should be rejecting the whole message or\n # just the output. Comment this out when that becomes clear.\n # For now I'm doing what the EC-SLP wallet did rejecting this\n # genesis message here.\n assert (\n r_type == bitcoin.TYPE_ADDRESS\n ), \"Token genesis/mint: output 1 != TYPE_ADDRESS, ignoring tx\"\n\n # neither of the below 2 can ever be negative due to how we read the bytes\n baton_vout = so.message.mint_baton_vout\n token_qty = (\n so.message.initial_token_mint_quantity\n if is_genesis\n else so.message.additional_token_quantity\n )\n if baton_vout is not None:\n b_type, b_addr, _dummy = outputs[baton_vout] # may raise\n # SLP wallet silently ignored non-TYPE_ADDRESS, so we do same here.\n # assert b_type == bitcoin.TYPE_ADDRESS, f\"Token baton vout ({baton_vout}) != TYPE_ADDRESS, ignoring tx\"\n self._add_mint_baton(\n token_id_hex, txid, baton_vout, b_addr\n ) # this silently ignores non-TYPE_ADDRESS\n self._add_txo(token_id_hex, txid, 1, r_addr, token_qty)", "def save_transaction(**kwargs):\n if not 'user_id' in kwargs:\n raise AttributeError(\"Cannot create a transaction without user_id\")\n\n\n return History.create(\n user_id=kwargs['user_id'],\n from_curr=kwargs['currencyFrom'],\n to_curr=kwargs['currencyTo'],\n amount=kwargs['amountTo'],\n address_in=kwargs['payinAddress'],\n address_out=kwargs['payoutAddress'],\n extraid=kwargs['payinExtraId'],\n transaction_id=kwargs['id'],\n exchange_status=kwargs['status'],\n )", "def encode_txs(self, fee=3000):\r\n if len(self.df) == 0:\r\n self.update_unique_tokens()\r\n \r\n # TODO: Something more like this (not using apply)\r\n \"\"\"\r\n self.df[\"encoded_out\"] = self.contract.functions.quoteExactInputSingle(\r\n self.WETH, self.df[\"address\"], fee, self.ETH_SIZE, 0\r\n )._encode_transaction_data()\r\n \"\"\"\r\n self.df[\"encoded_in\"] = self.df[\"address\"].apply(\r\n lambda x: self.contract.functions.quoteExactInputSingle(\r\n self.WETH, x, fee, self.ETH_SIZE, 0\r\n )._encode_transaction_data()\r\n )\r\n\r\n self.df[\"encoded_out\"] = self.df[\"address\"].apply(\r\n lambda x: self.contract.functions.quoteExactOutputSingle(\r\n x, self.WETH, fee, self.ETH_SIZE, 0\r\n )._encode_transaction_data()\r\n )\r\n\r\n #TODO: Logger WARN\r\n #assert len(q.df) == q.df[\"encoded_out\"].nunique(), \"Duplicate encodings\"\r\n #assert len(q.df) == q.df[\"encoded_in\"].nunique(), \"Duplicate decodings\"\r\n\r\n #self.df[\"padded_idx\"] = self.df.index + len(self.df)\r\n \r\n self.df[\"rpc_in\"] = self.df.apply(\r\n lambda x: rpc_encoder.create_rpc_call(\r\n x.name,\r\n rpc_encoder.create_rpc_params(\r\n self.quoter_address,\r\n x[\"encoded_in\"]\r\n )\r\n ), axis=1\r\n )\r\n\r\n self.df[\"rpc_out\"] = self.df.apply(\r\n lambda x: rpc_encoder.create_rpc_call(\r\n x.name + len(self.df),\r\n rpc_encoder.create_rpc_params(\r\n self.quoter_address,\r\n x[\"encoded_out\"]\r\n )\r\n ), axis=1\r\n )\r\n \r\n self._update_call_list()", "def getTransaction(hash, config):\n platon = get_eth_obj(config)\n try:\n # 交易信息\n transaction = dict(platon.getTransaction(hash))\n HexBytes_to_str(transaction)\n # 交易回执信息\n transaction_receipt = dict(platon.getTransactionReceipt(hash))\n HexBytes_to_str(transaction_receipt)\n except Exception as e:\n cust_print('Failed to query transaction information,error message:{}.'.format(e))\n sys.exit(1)\n cust_print('query transaction information successful!', fg='g')\n info = \"transaction:\\n\"\n info += \"{}\\n\".format(json.dumps(dict(transaction), indent=2))\n info += \"\\n\\ntransaction receipt:\\n\"\n info += \"{}\".format(json.dumps(dict(transaction_receipt), indent=2))\n cust_print('{}'.format(info), fg='g')", "def send_transaction(self, signd_txn):\n return self.web3.eth.send_raw_transaction(signd_txn.rawTransaction).hex()", "def get_payu_transaction_id():\n hash_object = sha256(str(int(time.time() * 1000)).encode('utf-8'))\n txnid = hash_object.hexdigest().lower()[0:32]\n return txnid", "def make_txout(amount=None, address=None, counter=None):\n passphrase_template = \"correct horse battery staple txout {counter}\"\n\n if not counter:\n counter = random.randrange(0, 2**50)\n\n if not address:\n passphrase = passphrase_template.format(counter=counter)\n address = make_address_from_passphrase(bytes(passphrase, \"utf-8\"))\n\n if not amount:\n maxsatoshis = (21 * 1000 * 1000) * (100 * 1000 * 1000) # 21 million BTC * 100 million satoshi per BTC\n amount = random.randrange(0, maxsatoshis) # between 0 satoshi and 21 million BTC\n\n txout = CMutableTxOut(amount, CBitcoinAddress(address).to_scriptPubKey())\n\n return txout", "def _add_send_tx(self, so, outputs, txid, tx):\n token_type = so.message.token_type\n token_id_hex = so.message.token_id_hex\n assert token_type in valid_token_types, \"Invalid token type: FIXME\" # paranoia\n amounts = so.message.token_output\n amounts = amounts[\n : len(outputs)\n ] # truncate amounts to match outputs -- shouldn't we reject such malformed messages?\n for n, qty in enumerate(amounts):\n if qty <= 0: # safely ignore 0 qty as per spec\n continue\n _type, addr, _dummy = outputs[\n n\n ] # shouldn't raise since we truncated list above\n self._add_txo(token_id_hex, txid, n, addr, qty)", "def getunspent_command(chat, message, args):\n\n total = 0\n msg = \"\"\n get_last = os.popen(path_to_bin + \"/bitcanna-cli listunspent\").read()\n loaded_json = json.loads(get_last)\n chat.send (\"Unspent inputs to transfer\\n======================\")\n for tx in loaded_json:\n if tx['amount'] == 3.30000000: # > 2.6 for fullnode\n msg = msg + \"Mint: \" + str(tx['spendable']) + \" BCNA: \" + str(tx['amount']) + \"\\n\"\n total = total + tx['amount']\n else:\n msg = msg + 'Other: ' + str(tx['spendable']) + \" BCNA: \" + str(tx['amount']) + \"\\n\"\n print(msg) #Is printed in console , could be saved in a file and sent by telegram\n #chat.send (msg) #if there are a lot of inputs Telegram can't handle in a message\n chat.send (\"Make your transfer with \" + (str(total)) + \" BCNA\")", "def encode(self):\n\n packet = (\n\n str(self.pos_number) + # 2 octets\n\n str(self.transaction_result) + # 1 octet\n\n ('%.0f' % (self.amount * 100)).zfill(8) + # 8 octets\n\n str(self.payment_mode) + # 1 octet\n\n str(self.repport) + # 55 octets\n\n str(self.currency_numeric) + # 3 octets\n\n str(self.private) # 10 octets\n\n )\n\n packet_len = len(packet)\n\n if packet_len not in [TERMINAL_ANSWER_COMPLETE_SIZE - 3, TERMINAL_ANSWER_LIMITED_SIZE - 3]:\n raise SequenceDoesNotMatchLengthException(\n 'Cannot create response payment sequence with len != {0} or {1} octet(s) '\n 'Currently have {2} octet(s).'\n .format(TERMINAL_ANSWER_COMPLETE_SIZE - 3, TERMINAL_ANSWER_LIMITED_SIZE - 3, packet_len))\n\n return TeliumData.framing(packet)", "def send_or_transact(args):\n if current_app.config.get('TESTING') == True:\n tx = transact(args)\n else:\n tx = send(g.w3, current_app.config['PRIVATE_KEY'], args)\n\n return tx", "def _santander_generate_digital_sign(self,type, acquirer,time_stamp,merchant_id,reference,amount,cur,secret, result=False,\n message=False,\n pasref=False,\n authcode=False,\n\t\t\t\t\t\t\t\t\t\t\t\t order_id=False):\n assert acquirer.provider == 'santander'\n\n '''\n def get_value(key):\n if values.get(key):\n return values[key]\n return ''\n\n if inout == 'out':\n keys = ['Ds_Amount',\n 'Ds_Order',\n 'Ds_MerchantCode',\n 'Ds_Currency',\n 'Ds_Response']\n else:\n keys = ['Ds_Merchant_Amount',\n 'Ds_Merchant_Order',\n 'Ds_Merchant_MerchantCode',\n 'Ds_Merchant_Currency',\n 'Ds_Merchant_TransactionType',\n 'Ds_Merchant_MerchantURL']\n sign = ''.join('%s' % (get_value(k)) for k in keys)\n # Add the pre-shared secret key at the end of the signature\n sign = sign + acquirer.santander_secret_key\n if isinstance(sign, str):\n sign = urlparse.parse_qsl(sign)\n shasign = sha1(sign).hexdigest().upper()\n '''\n if type == 'in':\n clave1 = sha1(str(time_stamp) + '.' + str(merchant_id) + '.' + str(reference) + '.' + str(amount) + '.' + str(cur))\n clave2 = sha1(str(clave1.hexdigest()) + '.' + str(secret))\n print str(clave2.hexdigest())\n return str(clave2.hexdigest())\n if type == 'out':\n clave1 = sha1(str(time_stamp) + '.' + str(merchant_id) + '.' + str(order_id) + '.' + str(result) + '.' + str(message) + '.' + str(pasref) + '.' + str(authcode))\n clave2 = sha1(str(clave1.hexdigest()) + '.' + str(secret))\n print str(clave2.hexdigest())\n return str(clave2.hexdigest())", "def transact(self, transaction_type, digicoins_No):\n\n #Raise an exception of digicoins_No is not multiple of 10.\n try:\n if digicoins_No % 10 != 0:\n raise MyError.MyError(digicoins_No)\n except Exception as inst:\n print \"\\nYou can only transact multiples of 10 of digicoins.\\nTransaction Failed!\"\n return\n\n lowest_price = 0\n digicoins_remain = digicoins_No\n while digicoins_remain > 0:\n if digicoins_remain > 100:\n digicoins_No_to_be_transacted = 100\n else:\n digicoins_No_to_be_transacted = digicoins_remain\n\n A_price = self.Broker1.offered_price(digicoins_No_to_be_transacted)\n B_price = self.Broker2.offered_price(digicoins_No_to_be_transacted)\n\n if A_price < B_price:\n self.Broker1.execute_transaction(digicoins_No_to_be_transacted)\n lowest_price += A_price\n else:\n self.Broker2.execute_transaction(digicoins_No_to_be_transacted)\n lowest_price += B_price\n digicoins_remain -= 100\n\n if transaction_type == \"BUY\":\n print self.name, \"buys\", digicoins_No_to_be_transacted, \"at\", lowest_price\n #update the clients list with a pair [price, digicoins]\n self.transactions.append([lowest_price, digicoins_No])\n else:\n print self.name, \"sells\", digicoins_No_to_be_transacted, \"at\", lowest_price\n self.transactions.append([lowest_price, -digicoins_No])", "def get_transaction(tx):\n global INVOKE_COUNTER\n INVOKE_COUNTER = INVOKE_COUNTER + 1\n if INVOKE_COUNTER % 3 == 0:\n return \"\"\n else:\n raise_connection_error()", "def CreateTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def sign_transaction_with_cosignatories(\n self,\n initiator: Account,\n gen_hash: typing.AnyStr,\n cosignatories: typing.Optional[typing.Sequence[Account]] = None,\n fee_strategy: util.FeeCalculationStrategy = util.FeeCalculationStrategy.MEDIUM,\n ) -> SignedTransaction:\n\n transaction = self.to_catbuffer(fee_strategy=fee_strategy)\n\n if (cosignatories):\n COSIGNATURE_SIZE = 96\n new_fee = util.calculate_fee(\n fee_strategy,\n self.catbuffer_size() + COSIGNATURE_SIZE * len(cosignatories),\n self.max_fee,\n )\n\n if (self.max_fee != new_fee):\n transaction = transaction[0:106] + new_fee.to_bytes(8, 'little') + transaction[114:]\n\n payload = initiator.sign(transaction, gen_hash) # type: ignore\n hash = self.transaction_hash(payload, gen_hash) # type: ignore\n\n if (cosignatories):\n for cosignatory in cosignatories:\n payload += util.decode_hex(cosignatory.public_key)\n payload += cosignatory.sign_data(hash)\n\n new_size = len(payload)\n payload = new_size.to_bytes(4, 'little') + payload[4:]\n\n return SignedTransaction( # type: ignore\n payload,\n hash,\n initiator.public_key,\n self.type,\n self.network_type\n )", "def actual_ico_funding_goal():\n return to_wei(0, \"ether\")", "def buy(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.buy(symbol, quantity, in_force, extended)", "def push_tx(tx, network='testnet', fee=False):\n\n if network in ['testnet', 'main']:\n if network is 'testnet':\n if fee:\n url = 'http://tbtc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/test3/txs/push'\n elif network is 'main':\n if fee:\n url = 'http://btc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/main/txs/push'\n\n if fee:\n data = {'hex': tx}\n else:\n data = {'tx': tx}\n\n response = post(url, data=json.dumps(data))\n else:\n response = 'Bad network'\n\n r_code = response.status_code\n r_reason = response.reason\n\n if r_code is 200:\n # blockr server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['data'])\n elif r_code is 201:\n # blockcyper server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['tx']['hash'])\n else:\n tx_hash = None\n\n return r_code, r_reason, tx_hash", "def do_buy(self, args):\n if not self._check_args(args):\n return\n else:\n self.wallet.get_coins_from_faucet(args)", "def make_transaction(self):\n if self.pk:\n raise CannotRecreateTransactionOnRecurredCost(\n 'The transaction for this recurred cost has already been created. You cannot create it again.'\n )\n\n amount = self.recurring_cost.get_amount(self.billing_cycle)\n\n # It is quite possible that there will be nothing to bill, in which\n # case we cannot create a transaction with no legs, nor can we create\n # legs with zero values. Therefore we don't create any transaction.\n if not amount:\n return None\n\n self.transaction = Transaction.objects.create(\n description='Created by recurring cost',\n date=self.billing_cycle.date_range.lower\n )\n\n # Use the SplitManager's custom queryset's split() method to get the\n # amount to be billed for each split\n splits = self.recurring_cost.splits.all().split(amount)\n\n # Create the transaction leg for the outbound funds\n # (normally to an expense account)\n self.transaction.legs.add(Leg.objects.create(\n transaction=self.transaction,\n amount=Money(amount, self.recurring_cost.currency),\n account=self.recurring_cost.to_account,\n ))\n\n for split, split_amount in splits:\n # Create the transaction legs for the inbound funds\n # (from housemate accounts)\n if split_amount:\n self.transaction.legs.add(Leg.objects.create(\n transaction=self.transaction,\n amount=Money(split_amount * -1, self.recurring_cost.currency),\n account=split.from_account,\n ))\n\n return self.transaction", "def tx_create(self,\n keypair,\n amount=1,\n deposit=CONTRACT_DEFAULT_DEPOSIT,\n init_state=\"()\",\n gas=CONTRACT_DEFAULT_GAS,\n gas_price=CONTRACT_DEFAULT_GAS_PRICE,\n fee=DEFAULT_FEE,\n vm_version=CONTRACT_DEFAULT_VM_VERSION,\n tx_ttl=DEFAULT_TX_TTL):\n try:\n call_data = self.encode_calldata(\"init\", init_state)\n\n # get the transaction builder\n txb = TxBuilder(self.client, keypair)\n # create spend_tx\n tx, sg, tx_hash, contract_id = txb.tx_contract_create(self.bytecode, call_data, amount, deposit, gas, gas_price, vm_version, fee, tx_ttl)\n # post the transaction to the chain\n txb.post_transaction(tx, tx_hash)\n if self.client.blocking_mode:\n txb.wait_tx(tx_hash)\n # store the contract address in the instance variabl\n self.address = contract_id\n return tx\n except OpenAPIClientException as e:\n raise ContractError(e)", "def create_transaction(self, receiver, amount, comment=\"\"):\n new_tx = Transaction.new(sender=self.pubkey, receiver=receiver,\n amount=amount, privkey=self.privkey,\n comment=comment)\n tx_json = new_tx.to_json()\n msg = \"t\" + json.dumps({\"tx_json\": tx_json})\n self.add_transaction(tx_json)\n self.broadcast_message(msg)\n return new_tx", "def create_gas_command(gas_amount, idx):\n # enable_bit = 1 << 7\n # max_gas = 176.526\n # offset = -83.3\n # scaled_offset = offset / 1023.0 * max_gas\n # offset_raw = 21\n # offset2_raw = 11\n # print(offset_raw)\n # print(scaled_offset)\n # gas_amount_ = (float(gas_amount) / 1023.0) * max_gas\n # print(gas_amount_)\n # gas_amount_2 = (float(gas_amount) / 1023.0) * max_gas / 2.0\n # gas_amount_ = gas_amount_ + offset_raw\n # print(gas_amount_)\n # gas_amount_2 = gas_amount_2 + offset2_raw\n\n # gas_amount_ = int(gas_amount_ / max_gas * 1023.0)\n # gas_amount_2 = int(gas_amount_2 / max_gas * 1023.0)\n # print(\"gas 1 gas 2\")\n # print(gas_amount_)\n # print(gas_amount_2)\n\n offset1_raw = 328 # 21\n offset2_raw = 656 # 11\n gas_amount_1 = gas_amount + offset1_raw\n gas_amount_2 = gas_amount * 2 + offset2_raw\n if gas_amount <= 0:\n enable_bit = 0\n else:\n enable_bit = 1 << 7\n\n if gas_amount <= 0:\n enable_bit = 0\n msg = struct.pack(\"!HHB\", gas_amount_1, gas_amount_2, enable_bit)\n return make_can_msg(0x200, msg, idx, 0)", "def signrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to signrawtransaction.\")\n\n transaction = CMutableTransaction.deserialize(given_bytes)\n transaction_hex = b2x(transaction.serialize())\n return {\"hex\": transaction_hex}", "def Transaction(self,OutBuffer, read = 0):\n ReceiveBuffer = create_string_buffer(chr(0) * 0x80)\n TransmitBuffer= create_string_buffer(OutBuffer)\n Transaction = spi_ioc_transfer()\n Transaction.speed_hz = c_uint32(self.Speed)\n Transaction.tx_buf=addressof(TransmitBuffer)\n Transaction.rx_buf=addressof(ReceiveBuffer)\n Transaction.delay_usecs = self.Delay\n if read > 0 and self.Speed!= self.ReadSpeed: # Slow down speed for reading\n Transaction.speed_hz = self.ReadSpeed\n elif read==0 and self.Speed!=self.WriteSpeed:\n Transaction.speed_hz = self.WriteSpeed\n if self.Speed != Transaction.speed_hz:\n self.Speed = Transaction.speed_hz\n self.SetSpeed()\n if read > len(OutBuffer):\n Transaction.len=read\n else:\n Transaction.len= len(OutBuffer)\n Transaction.bits_per_word = self.Bits\n Transaction.cs_change = 0\n Transaction.pad = 0\n # print type(addressof(Transaction))\n ret = ioctl(self.File,SPI_IOC_MESSAGE(1), addressof(Transaction))\n return ret, ReceiveBuffer", "def create_transaction(conn, transaction):\n sql = ''' INSERT INTO transactions(date, value, currency, desc, categ)\n VALUES(?, ?, ?, ?, ?) '''\n cur = conn.cursor()\n cur.execute(sql, transaction)", "def gettransaction(self, txid):\n return TransactionInfo(**self.proxy.gettransaction(txid))", "def emit_tag(self):\n root = etree.Element('CdtTrfTxInf')\n pmtid = etree.SubElement(root, 'PmtId')\n # etree.SubElement(pmtid, 'InstrId').text = self.tx_id\n # etree.SubElement(pmtid, 'EndToEndId').text = self.eeid\n etree.SubElement(pmtid, 'EndToEndId').text = self.creditor.name\n # info = etree.SubElement(root, 'PmtTpInf')\n # purp = etree.SubElement(info, 'CtgyPurp')\n # etree.SubElement(purp, 'Cd').text = self.category\n amt = etree.SubElement(root, 'Amt')\n etree.SubElement(\n amt, 'InstdAmt', attrib={'Ccy': 'EUR'}).text = str(self.amount)\n if hasattr(self, 'ultimate_debtor'):\n root.append(self.ultimate_debtor.__tag__('UltmtDbtr'))\n if self.account.is_foreign() or self.bic:\n agt = etree.SubElement(root, 'CdtrAgt')\n agt.append(Bank(bic=self.bic).__tag__())\n root.append(self.creditor.__tag__('Cdtr'))\n root.append(self.account.__tag__('CdtrAcct'))\n if hasattr(self, 'ultimate_creditor'):\n root.append(self.ultimate_creditor.__tag__('UltmtCdtr'))\n rmtinf = etree.SubElement(root, 'RmtInf')\n if hasattr(self, 'rmtinfo'):\n etree.SubElement(rmtinf, 'Ustrd').text = self.rmtinfo\n else:\n etree.SubElement(rmtinf, 'Ustrd').text = ''.join(\n [str(doc) for doc in self.docs])\n return root", "def to_raw_tx(self):\n return b2x(self.tx.serialize())", "def _add_utxo(self, item: UtxoIndexItem) -> None:\n raise NotImplementedError", "def _query_transaction():\n # if not mode or mode not in mode_list:\n # raise ValidationError('Mode must be one of the following {}.'\n # .format(', '.join(mode_list)))\n\n tx_dict = \"gautham=awesome\"\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n \"method\": \"abci_query\",\n \"jsonrpc\": \"2.0\",\n #\"params\": [None, tx_dict, None, None],\n \"params\": [None, encode_transaction(tx_dict), None, False],\n #\"params\": [None, encode_transaction(tx_dict), None],\n \"id\": str(uuid4())\n }\n\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)", "def buildAsk (self, node, name, value, price):\n\n nameData = node.name_show (name)\n namePrevOut = node.gettxout (nameData[\"txid\"], nameData[\"vout\"])\n nameValue = namePrevOut[\"value\"]\n addr = node.getnewaddress ()\n\n tx = CTransaction ()\n nameOut = COutPoint (int (nameData[\"txid\"], 16), nameData[\"vout\"])\n tx.vin.append (CTxIn (nameOut))\n tx.vout.append (self.buildNameUpdate (name, value, addr, nameValue + price))\n\n txHex = tx.serialize ().hex ()\n\n signed = node.signrawtransactionwithwallet (txHex, [],\n \"SINGLE|ANYONECANPAY\")\n assert signed[\"complete\"]\n return signed[\"hex\"]", "def _generate_cybersource_sa_payload(*, order, receipt_url, cancel_url, ip_address):\n # http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_WM/Secure_Acceptance_WM.pdf\n # Section: API Fields\n\n # NOTE: be careful about max length here, many (all?) string fields have a max\n # length of 255. At the moment none of these fields should go over that, due to database\n # constraints or other reasons\n\n coupon_redemption = CouponRedemption.objects.filter(order=order).first()\n coupon_version = (\n coupon_redemption.coupon_version if coupon_redemption is not None else None\n )\n\n line_items = {}\n total = 0\n for i, line in enumerate(order.lines.all()):\n product_version = line.product_version\n unit_price = get_product_version_price_with_discount(\n coupon_version=coupon_version, product_version=product_version\n )\n line_items[f\"item_{i}_code\"] = str(product_version.product.content_type)\n line_items[f\"item_{i}_name\"] = str(product_version.description)[:254]\n line_items[f\"item_{i}_quantity\"] = line.quantity\n line_items[f\"item_{i}_sku\"] = product_version.product.content_object.id\n line_items[f\"item_{i}_tax_amount\"] = \"0\"\n line_items[f\"item_{i}_unit_price\"] = str(unit_price)\n\n total += unit_price\n\n # At the moment there should only be one line\n product_version = order.lines.first().product_version\n product = product_version.product\n content_object = product.content_object\n readable_id = get_readable_id(content_object)\n\n merchant_fields = {\n \"merchant_defined_data1\": str(product.content_type),\n \"merchant_defined_data2\": readable_id,\n \"merchant_defined_data3\": \"1\",\n }\n\n if coupon_version is not None:\n merchant_fields[\"merchant_defined_data4\"] = coupon_version.coupon.coupon_code\n merchant_fields[\"merchant_defined_data5\"] = ( # company name\n coupon_version.payment_version.company.name\n if coupon_version.payment_version.company\n else \"\"\n )\n merchant_fields[\"merchant_defined_data6\"] = (\n coupon_version.payment_version.payment_transaction or \"\"\n )\n merchant_fields[\"merchant_defined_data7\"] = (\n coupon_version.payment_version.payment_type or \"\"\n )\n\n return {\n \"access_key\": settings.CYBERSOURCE_ACCESS_KEY,\n \"amount\": str(total),\n \"consumer_id\": order.purchaser.username,\n \"currency\": \"USD\",\n \"locale\": \"en-us\",\n **line_items,\n \"line_item_count\": order.lines.count(),\n **merchant_fields,\n \"reference_number\": order.reference_number,\n \"profile_id\": settings.CYBERSOURCE_PROFILE_ID,\n \"signed_date_time\": now_in_utc().strftime(ISO_8601_FORMAT),\n \"override_custom_receipt_page\": receipt_url,\n \"override_custom_cancel_page\": cancel_url,\n \"transaction_type\": \"sale\",\n \"transaction_uuid\": uuid.uuid4().hex,\n \"unsigned_field_names\": \"\",\n \"customer_ip_address\": ip_address if ip_address else None,\n }", "def blockchain_set_tx_detail(transaction):\n info_endpoint = \"address/%s?format=json\" % transaction.to_address\n try:\n info = json.loads(util.call_api(info_endpoint))\n except:\n return\n\n transaction.txid = info['txs'][0]['hash']\n transaction.amount_paid = round(info['total_received'] * SATOSHI, 8)\n\n if transaction.amount_paid >= transaction.amount_btc:\n transaction.status = Transaction.STATUS_CONFIRMED\n send_webhook.apply_async(kwargs={'transaction_id': transaction.id})\n\n transaction.save()", "def sendrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to sendrawtransaction.\")\n transaction = CMutableTransaction.deserialize(given_bytes)\n return b2lx(transaction.GetHash())", "def trade(self, send_Wallet, recv_Wallet, amount):\n # send_balance = self.web3.eth.get_balance(send_Wallet.address)\n # recv_balance = self.web3.eth.get_balance(recv_Wallet.address)\n # Transaction sequence moving from send_Wallet to rcv_Wallet\n print('{s:{c}^{n}}'.format(s=' creating transaction data ', n=80, c='.'))\n txn = self.mk_simple_transaction(send_Wallet.address, recv_Wallet.address, amount)\n print(txn)\n print('{s:{c}^{n}}'.format(s=' signing transaction ', n=80, c='.'))\n signed_txn = self.sign_transaction(txn, send_Wallet.prvkey)\n print(\"signed transaction hash = {}\".format(signed_txn))\n print('{s:{c}^{n}}'.format(s=' sending transaction ', n=80, c='.'))\n txn_hash = self.send_transaction(signed_txn)\n print(\"transaction hash = {}\".format(txn_hash))\n print('{s:{c}^{n}}'.format(s=' getting transaction receipt ', n=80, c='.'))\n receipt = self.wait_for_receipt(txn_hash)\n # pdb.set_trace()\n print(receipt)\n print('{s:{c}^{n}}'.format(s=' getting block transaction was a part of ', n=80, c='.')) \\\n # realistically this part of confirming the status of the block & transaction (mined or not)\n # might be able to be checked using the reciept? Not sure though\n # Answer : Looks like once we get a receipt from the transaction, the transaction will have\n # been completed and added to the ledger (aka block is mined i believe)\n block = self.get_tnx_block(receipt.blockNumber)\n # above line for getting transactino block is flakey...\n # not sure why, but the error that gets raised is as follows\n # raise BlockNotFound(f\"Block with id: {block_identifier} not found.\")\n # web3.exceptions.BlockNotFound: Block with id: 0x9c5b5d not found.\n print(block)", "def put_ask(curr, depth, asset, price, price2):\n #Transaction 1\n tmp_list = bitty.buy_limit(asset+\"-\"+curr, depth, price)\n time.sleep(5) #wait for network latency\n wait = 0\n while wait < 15:\n oList = bitty.get_open_orders(asset + \"-\" + curr)['result']\n if oList: #if there are orders open, wait until 15\n wait += 1\n print(\"Alt order outstanding\")\n else:#order is filled, switch liquidity assets\n break\n time.sleep(1)\n print(wait)\n if wait == 15: #if it's been 15 seconds and the order is not filled, cancel it\n\n for o in oList:\n orderId = o['OrderUuid']\n bitty.cancel(orderId)\n time.sleep(5)\n if asset == \"BTC\":\n asset = \"ETH\"\n elif asset == \"ETH\":\n asset = \"BTC\"\n bal_result = bitty.get_balance(curr)['result'] # gets exact balance of the altcoin, including dust\n depth_to_main = bal_result['Balance']\n print(\"Order canceled, submitting sell order for any quantity filled.\")\n bitty.sell_limit(asset + \"-\" + curr, depth_to_main, price2)\n return(asset) #back to searching\n\n if asset == \"BTC\":\n asset = \"ETH\"\n elif asset == \"ETH\":\n asset = \"BTC\"\n\n #Transaction 2\n bal_result = bitty.get_balance(curr)['result'] # gets exact balance of the altcoin, including dust\n depth_to_main = bal_result['Balance']\n print(depth_to_main)\n print(\"Submitting transaction 2, please wait, this may take a while.\")\n tmp_list = bitty.sell_limit(asset + \"-\" + curr, depth_to_main, price2)\n while tmp_list['success'] == False:\n print(\"Order failed.\")\n time.sleep(5)\n tmp_list = bitty.sell_limit(asset + \"-\" + curr, depth_to_main, price2)\n\n time.sleep(15)#wait for latency\n wait = 5\n oList= []\n while wait < 86400: #wait ten minutes\n oList = bitty.get_open_orders(asset + \"-\" + curr)['result']\n if oList:\n wait += 5\n if wait % 60 == 0:\n price2 = recast_lower_sell(oList, asset, curr, price2)\n #elif wait > 675:\n # price2 = recast_lower_sell(oList, asset, curr, depth_to_main, price2)\n print(\"Main order outstanding\")\n else:\n return(asset)\n time.sleep(5)\n if wait == 86400:\n return(\"timeout\")", "def sign_transaction(self):\n private_key=RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer=PKCS1_v1_5.new(private_key)\n h=SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def create_outgoing_transaction(transaction: OutgoingTransactionCreate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):\n try:\n transaction = transaction_service.create(db, auth_user, transaction)\n return transaction\n except ItensNotFound as err:\n\t raise HTTPException(status_code=404, detail=f\"Os seguintes produtos não foram encontrados no sistema: {str(err)}\")\n except ProductsNotFound as err:\n raise HTTPException(status_code=400, detail=\"A movimentação a ser registrada deve conter no minimo um produto.\")\n except InvalidStockQuantity as err:\n products_missing = transaction_service.make_response(db, str(err))\n raise HTTPException(status_code=400, detail={\n \"message\": \"A quantidade informada para os seguintes produtos deve ser maior do que zero.\",\n \"products_missing\": products_missing\n })\n except NotEnoughStockQuantity as err:\n products_missing = transaction_service.make_response(db, str(err))\n raise HTTPException(status_code=422, detail={\n \"message\": \"Os produtos informados não possuem quantidade em estoque suficiente para a saída.\",\n \"products_missing\": products_missing\n })", "def test_05_transaction_create_buy_stock(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n buy_stock_aapl = Transaction.buy_stock(\n portfolio=portfolio,\n asset=\"AAPL\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=256,\n unit_price=162.94,\n user=user\n )\n\n buy_stock_ibm = Transaction.buy_stock(\n portfolio=portfolio,\n asset=\"IBM\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=128,\n unit_price=145.56,\n user=user\n )\n\n buy_stock_msft = Transaction.buy_stock(\n portfolio=portfolio,\n asset=\"MSFT\",\n t_currency=TRANSACTION_CURRENCY_EUR,\n amount=64,\n unit_price=76.22,\n user=user\n )\n\n self.assertTrue(isinstance(buy_stock_aapl, Transaction),\n msg=\"Transaction is NOT returning a valid object while buying an APPLE stock\")\n print(\"Transaction buy_stock method is returning a valid APPLE transaction: {}\".format(\n buy_stock_aapl))\n\n self.assertTrue(isinstance(buy_stock_ibm, Transaction),\n msg=\"Transaction is NOT returning a valid object while buying an IBM stock\")\n print(\"Transaction buy_stock method is returning a valid IBM transaction: {}\".format(\n buy_stock_ibm))\n\n self.assertTrue(isinstance(buy_stock_msft, Transaction),\n msg=\"Transaction is NOT returning a valid object while buying an MICROSOFT stock\")\n print(\"Transaction buy_stock method is returning a valid MICROSOFT transaction: {}\".format(\n buy_stock_msft))", "def send_contract_tx(contract: Web3Contract, function_name: str, from_acc: str, private_key: bytes,\n gas: int = 0, gas_price: int = 0, value: int = 0, args: Tuple = ()):\n\n tx = getattr(contract.functions, function_name)(*args). \\\n buildTransaction(\n {\n 'from': from_acc,\n 'chainId': w3.eth.chainId,\n # gas_price is in gwei\n 'gasPrice': gas_price * 1e9 if gas_price else estimate_gas_price(),\n 'gas': gas or None,\n 'nonce': w3.eth.getTransactionCount(from_acc, block_identifier='pending'),\n 'value': value\n })\n signed_txn = w3.eth.account.sign_transaction(tx, private_key)\n return w3.eth.sendRawTransaction(signed_txn.rawTransaction)", "def spend_sh_fund(tx_ins, wif_keys, tx_outs):\n _txs_in = []\n _un_spent = []\n for tx_id, idx, balance, address, _ in tx_ins:\n # must h2b_rev NOT h2b\n tx_id_b = h2b_rev(tx_id)\n _txs_in.append(TxIn(tx_id_b, idx))\n\n _un_spent.append(Spendable(balance, network.contract.for_address(address),\n tx_id_b, idx))\n\n _txs_out = []\n for balance, receiver_address in tx_outs:\n _txs_out.append(TxOut(balance, network.contract.for_address(receiver_address)))\n\n version, lock_time = 1, 0\n tx = Tx(version, _txs_in, _txs_out, lock_time)\n tx.set_unspents(_un_spent)\n\n # construct hash160_lookup[hash160] = (secret_exponent, public_pair, compressed) for each individual key\n hash160_lookup = build_hash160_lookup([network.parse.wif(wif_key).secret_exponent() for wif_key in wif_keys],\n [secp256k1_generator])\n\n for i in range(0, len(tx_ins)):\n # you can add some conditions that if the input script is not p2sh type, not provide p2sh_lookup,\n # so that all kinds of inputs can work together\n p2sh_lookup = build_p2sh_lookup([binascii.unhexlify(tx_ins[i][-1])])\n r = BitcoinSolver(tx).solve(hash160_lookup, i, hash_type=SIGHASH_ALL, p2sh_lookup=p2sh_lookup)\n if isinstance(r, bytes):\n tx.txs_in[i].script = r\n else:\n tx.txs_in[i].script = r[0]\n tx.set_witness(i, r[1])\n\n return tx.as_hex(), tx.id()", "def test_sign_tx_fetchai(self):\n tx_hash = Web3.keccak(text=\"some_bytes\")\n\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n ledger_id=self.ledger_id,\n info=self.info,\n signing_payload={\"tx_hash\": tx_hash},\n )\n\n tx_signature = self.decision_maker._sign_tx(tx_message)\n assert tx_signature is not None", "def get_withdrawal_transaction(contract, first_valid, gh, fee):\n address = logic.address(contract)\n _, ints, bytearrays = logic.read_program(contract)\n if not (len(ints) == 7 and len(bytearrays) == 2):\n raise error.WrongContractError(\"periodic payment\")\n amount = ints[5]\n withdrawing_window = ints[4]\n period = ints[2]\n max_fee = ints[1]\n lease_value = bytearrays[0]\n receiver = encoding.encode_address(bytearrays[1])\n\n if first_valid % period != 0:\n raise error.TemplateInputError(\n \"first_valid must be divisible by the period\")\n txn = transaction.PaymentTxn(\n address, fee, first_valid, first_valid + withdrawing_window, gh,\n receiver, amount, lease=lease_value)\n\n if txn.fee > max_fee:\n raise error.TemplateInputError(\n \"the transaction fee should not be greater than \"\n + str(max_fee))\n\n lsig = transaction.LogicSig(contract)\n stx = transaction.LogicSigTransaction(txn, lsig)\n return stx", "def make_fake_transaction_data(date=None,\n transaction_type=None,\n transaction_type_raw=None,\n spent_currency=None,\n spent_amount=None,\n source_peer=None,\n acquired_currency=None,\n acquired_amount=None,\n target_peer=None,\n fee_currency=None,\n fee_amount=None,\n tags=None):\n # pylint: disable=E1101\n\n fake = Faker()\n transaction_data = schema.TransactionData()\n transaction_data.date = date or fake.date_time_between(\n start_date=\"-30y\", end_date=\"now\",\n tzinfo=None).strftime(\"%d.%m.%Y %H:%m:%S\")\n transaction_data.transaction_type = transaction_type or \"exchange\"\n transaction_data.transaction_type_raw = transaction_type_raw or \"Buy\"\n transaction_data.spent_currency = \\\n spent_currency or fake.cryptocurrency_code()\n transaction_data.spent_amount = spent_amount or random.uniform(1, 20)\n transaction_data.source_peer = source_peer or 1\n transaction_data.acquired_currency = \\\n acquired_currency or fake.cryptocurrency_code()\n transaction_data.acquired_amount = \\\n acquired_amount or random.uniform(0.001, 10)\n transaction_data.target_peer = target_peer or 1\n transaction_data.fee_currency = fee_currency or transaction_data.spent_currency\n transaction_data.fee_amount = fee_amount or random.uniform(0.000001, 0.001)\n transaction_data.tags = tags or [\"tag1\", \"tag2\"]\n return transaction_data", "def test_17_transaction_create_sell_cash(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=100000,\n unit_price=1.17,\n user=user\n )\n\n self.assertTrue(isinstance(sell_cash_eur, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling EUR in cash\")\n print(\"Transaction sell_cash method is returning a valid EUR transaction: {}\".format(\n sell_cash_eur))\n\n \"\"\"Is transaction avoiding short sell cash objects?\"\"\"\n short_sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=500000,\n unit_price=1.10,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_cash_eur, Transaction),\n msg=\"Transaction is NOT avoiding short selling EUR in cash\")\n print(\"Transaction sell_cash method is avoiding a short sell EUR transaction: {}\".format(\n short_sell_cash_eur))" ]
[ "0.65796894", "0.630716", "0.62986237", "0.6173014", "0.60841966", "0.5914946", "0.5743247", "0.5731016", "0.5521235", "0.55083513", "0.5494718", "0.5486861", "0.54759413", "0.54701596", "0.54108584", "0.54077893", "0.53893286", "0.5363596", "0.5303798", "0.52935517", "0.52907497", "0.52741724", "0.52684134", "0.5263301", "0.5262662", "0.5258399", "0.52453554", "0.5237799", "0.52267975", "0.52017426", "0.51909053", "0.5190202", "0.5162038", "0.51514286", "0.51477456", "0.51417434", "0.51392275", "0.5133098", "0.5112253", "0.5106457", "0.50950295", "0.5084842", "0.50842494", "0.50791085", "0.5068837", "0.50679636", "0.50595355", "0.5057058", "0.50308675", "0.5020965", "0.5006124", "0.4999016", "0.49886727", "0.4960925", "0.49602532", "0.49538276", "0.49511656", "0.49499148", "0.4947102", "0.4942551", "0.49417853", "0.49340382", "0.49338928", "0.49271664", "0.4922939", "0.49130288", "0.49087456", "0.49073422", "0.49061137", "0.49049115", "0.49020517", "0.4901938", "0.4897471", "0.489649", "0.48878276", "0.48725763", "0.48650816", "0.4845085", "0.4836002", "0.4835376", "0.48350447", "0.4816832", "0.48153198", "0.48022708", "0.48018235", "0.47993118", "0.47869784", "0.47835314", "0.47774342", "0.4764926", "0.47615963", "0.47548538", "0.4752577", "0.4746444", "0.4744865", "0.47424752", "0.47384152", "0.47364354", "0.47296306", "0.472462" ]
0.70295274
0
Signs a transaction (in format of build_tx) with the given node, and returns the decoderawtransactiontype result again.
def sign (self, node, tx): signed = node.signrawtransactionwithwallet (tx["hex"]) res = node.decoderawtransaction (signed["hex"]) res.update (signed) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_transaction():\n data = request.get_json()\n\n try:\n tx = Transaction.from_dict(data)\n except TypeError:\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n signature = tx.sign(node.wallet.private_key_rsa)\n response = dict(signature=signature)\n return jsonify(response), 200", "def sign_transaction(self, transaction, prvkey):\n return self.web3.eth.account.sign_transaction(transaction, prvkey)", "def signrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to signrawtransaction.\")\n\n transaction = CMutableTransaction.deserialize(given_bytes)\n transaction_hex = b2x(transaction.serialize())\n return {\"hex\": transaction_hex}", "def sign_transaction(self, transaction):\n try:\n address = transaction.from_address\n private_key = self.addresses[address]['private_key']\n transaction.sign_transaction(private_key)\n except Exception as ex:\n print(\"Error signing transaction from address: \" + address + \" \" + str(ex))", "def sign_transaction(self):\n private_key=RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer=PKCS1_v1_5.new(private_key)\n h=SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')", "def sign_tx(self, network, txn, inputs, change, use_ae_signatures=False):\n # 1st message contains txn and number of inputs we are going to send.\n # Reply ok if that corresponds to the expected number of inputs (n).\n base_id = 100 * random.randint(1000, 9999)\n params = {'network': network,\n 'txn': txn,\n 'num_inputs': len(inputs),\n 'use_ae_signatures': use_ae_signatures,\n 'change': change}\n\n reply = self._jadeRpc('sign_tx', params, str(base_id))\n assert reply\n\n # Send inputs and receive signatures\n return self._send_tx_inputs(base_id, inputs, use_ae_signatures)", "def sign_raw_transaction(hexstring):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"signrawtransaction\", hexstring])\n signed_tx = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return signed_tx", "def _sign_ledger_tx(self, tx_message: TransactionMessage) -> Any:\n if tx_message.ledger_id == OFF_CHAIN:\n crypto_object = self.wallet.crypto_objects.get(\"ethereum\")\n # TODO: replace with default_ledger when recover_hash function is available for FETCHAI\n else:\n crypto_object = self.wallet.crypto_objects.get(tx_message.ledger_id)\n tx = tx_message.signing_payload.get(\"tx\")\n tx_signed = crypto_object.sign_transaction(tx)\n return tx_signed", "def sign_tx(self, tx):\n if self.privkey:\n log.info('signing tx', tx=tx, account=self)\n tx.sign(self.privkey)\n else:\n raise ValueError('Locked account cannot sign tx')", "def send_transaction(self, signd_txn):\n return self.web3.eth.send_raw_transaction(signd_txn.rawTransaction).hex()", "def sign_tx(self, tx: payloads.Transaction, password: str, magic: Optional[int] = None) -> None:\n if magic is None:\n magic = settings.network.magic\n\n self._validate_tx(tx)\n\n message = magic.to_bytes(4, byteorder=\"little\", signed=False) + tx.hash().to_array()\n signature = self.sign(message, password)\n\n invocation_script = vm.ScriptBuilder().emit_push(signature).to_array()\n # mypy can't infer that the is_watchonly check ensures public_key has a value\n verification_script = contracts.Contract.create_signature_redeemscript(self.public_key) # type: ignore\n tx.witnesses.insert(0, payloads.Witness(invocation_script, verification_script))", "def getrawtransaction(self, txid, verbose=True):\n if verbose:\n return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))\n return self.proxy.getrawtransaction(txid, 0)", "def sign_transaction(self, private_key):\n\n to_be_hashed = (str(self.timestamp) +\n str(self.sender_address) +\n str(self.recipient_address) +\n str(self.amount) +\n # str(self.transaction_inputs) +\n # str(self.transaction_outputs) +\n str(self.transaction_id))\n\n # Create a hash value of the whole message\n sha_hash = SHA256.new(to_be_hashed.encode())\n\n # Import private key\n key = RSA.importKey(private_key)\n\n # print(sha_hash)\n\n # Construct an instance of the crypto object\n cipher = PKCS1_v1_5.new(key)\n\n # Create and return the signature\n self.transaction_signature = cipher.sign(sha_hash)", "def submit_transaction():\n data = request.get_json()\n\n # Create candidate transaction object\n try:\n tx = Transaction.from_dict(data['transaction'])\n except (KeyError, TypeError):\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n statuses = []\n # Broadcast if needed and turn off broadcasting for other nodes\n if request.args.get('broadcast', type=int, default=0):\n for node_ in node.network:\n if not node_['id'] == node.node_id:\n response = requests.post(\n node_['ip'] + '/transactions/submit?broadcast=0',\n json=dict(\n transaction=data['transaction'],\n signature=data['signature']\n )\n )\n statuses.append(response.status_code)\n\n if not response.status_code == 200:\n response = dict(message='Transaction rejected by the network.')\n return jsonify(response), 202\n\n # Validate transaction as-is\n val_result = validate_transaction_document(tx)\n if isinstance(val_result, str):\n response = dict(message=val_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Verify signature\n # defined in backend/utils\n sign_result = verify_signature(tx, data['signature'])\n if isinstance(sign_result, str):\n response = dict(message=sign_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Add transaction to local blockchain\n node.blkchain.add_transaction(tx)\n myurl = node.network[node.node_id]['ip']\n url = myurl + '/blockchain/mine_block'\n mine_resp = requests.get(url=url)\n if mine_resp.status_code == 200:\n block_dict = mine_resp.json()\n add_resp = requests.post(url=myurl + '/blockchain/add_block?\\\n broadcast=1', json=block_dict)\n # run consensus \n requests.get(url=myurl+'/blockchain/consensus')\n\n response = dict(message='Transaction added.')\n\n return jsonify(response), 200", "def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)", "def decoderawtransaction_asm_sighashtype(self):\n\n self.log.info(\"- various mainnet txs\")\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])\n\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc\n # verify that we have not altered scriptPubKey decoding.\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])\n assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])\n assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n txSave = tx_from_hex(tx)\n\n self.log.info(\"- tx not passing DER signature checks\")\n # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type\n tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])\n\n self.log.info(\"- tx passing DER signature checks\")\n # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n\n # some more full transaction tests of varying specific scriptSigs. used instead of\n # tests in decodescript_script_sig because the decodescript RPC is specifically\n # for working on scriptPubKeys (argh!).\n push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]\n signature = push_signature[2:]\n der_signature = signature[:-2]\n signature_sighash_decoded = der_signature + '[ALL]'\n signature_2 = der_signature + '82'\n push_signature_2 = '48' + signature_2\n signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'\n\n self.log.info(\"- P2PK scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n # make sure that the sighash decodes come out correctly for a more complex / lesser used case.\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- multisig scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- scriptSig that contains more than push operations\")\n # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.\n txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])", "def send_tx(coin, account, recipient, amount):\n if coin =='eth':\n txn = create_tx(coin, account, recipient, amount)\n signed_txn = w3.eth.account.signTransaction(txn)\n result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n print(result.hex())\n return result.hex()\n\n else:\n tx_btctest= create_tx(coin, account, recipient, amount)\n sign_tx_btctest = account.sign_transaction(tx_btctest)\n from bit.network import NetworkAPI\n NetworkAPI.broadcast_tx_testnet(sign_tx_btctest) \n return sign_tx_btctest", "def send_raw_transaction(signed_tx):\n try:\n txid = subprocess.check_output([\"litecoin-cli\", \"sendrawtransaction\", signed_tx])\n except:\n sys.exit(1)\n return txid.strip()", "def sign_with(self, account: Account) -> CosignatureSignedTransaction:\n\n transaction_info = self.transaction.transaction_info\n if transaction_info is None:\n raise ValueError('Transaction info not present.')\n parent_hash = typing.cast(TransactionInfo, transaction_info).hash\n if parent_hash is None:\n raise ValueError('Transaction info to cosign has no hash.')\n\n signature = util.hexlify(account.sign_data(parent_hash))\n signer = account.public_key\n return CosignatureSignedTransaction(parent_hash, signature, signer)", "def signed_transaction(self) -> CustomSignedTransaction:\n enforce(\n self.is_set(\"signed_transaction\"),\n \"'signed_transaction' content is not set.\",\n )\n return cast(CustomSignedTransaction, self.get(\"signed_transaction\"))", "def sendrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to sendrawtransaction.\")\n transaction = CMutableTransaction.deserialize(given_bytes)\n return b2lx(transaction.GetHash())", "def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None):\n return dict(self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys))", "def submit_and_store_transaction(self, signed_transaction_data):\n return self._call_account_method(\n 'submitAndStoreTransaction', {\n 'signedTransactionData': signed_transaction_data\n }\n )", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def test_new_transaction_return_type(self):\n transaction = self.blockchain.new_transaction(self.sender, self.recipient, self.amount)\n self.assertIsInstance(transaction, int)", "def sign_liquid_tx(self, network, txn, inputs, commitments, change, use_ae_signatures=False,\n asset_info=None):\n # 1st message contains txn and number of inputs we are going to send.\n # Reply ok if that corresponds to the expected number of inputs (n).\n base_id = 100 * random.randint(1000, 9999)\n params = {'network': network,\n 'txn': txn,\n 'num_inputs': len(inputs),\n 'trusted_commitments': commitments,\n 'use_ae_signatures': use_ae_signatures,\n 'change': change,\n 'asset_info': asset_info}\n\n reply = self._jadeRpc('sign_liquid_tx', params, str(base_id))\n assert reply\n\n # Send inputs and receive signatures\n return self._send_tx_inputs(base_id, inputs, use_ae_signatures)", "def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200", "def sign(self, payload):\n raise NotImplementedError", "def sign_trx(self, signture):\n self.trx_signature = signture", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")", "def gettransaction(self, txid):\n return TransactionInfo(**self.proxy.gettransaction(txid))", "def sign(self, data: bytes, password: str) -> bytes:\n if self.is_watchonly:\n raise ValueError(\"Cannot sign transaction using a watch only account\")\n # mypy can't infer that the is_watchonly check ensures encrypted_key has a value\n private_key = self.private_key_from_nep2(self.encrypted_key.decode(\"utf-8\"), password) # type: ignore\n return cryptography.sign(data, private_key)", "def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}", "def get_tx_signature(tx, private_key, btc_address, hashcode=SIGHASH_ALL):\n tx_obj = deserialize(tx)\n index = None\n\n for tx_in in tx_obj['ins']:\n prev_tx_hash = tx_in['outpoint']['hash']\n prev_tx_info = get_tx_info(prev_tx_hash)\n if btc_address in prev_tx_info['to']:\n index = prev_tx_info['to'].index(btc_address)\n\n if index is not None:\n signing_tx = signature_form(tx, index, mk_pubkey_script(btc_address), hashcode)\n signature = ecdsa_tx_sign(signing_tx, private_key, hashcode)\n response = signature, index\n else:\n response = \"Error, no input tx to sign\", -1\n\n return response", "def hash_transaction(transaction: SignedRawTransaction) -> str:\n hashable_transaction = transaction.SerializeToString()\n return Verification.hash_bytes_256(hashable_transaction)", "def ecdsa_tx_sign(txhash, priv):\n rawsig = ecdsa_raw_sign(txhash, priv)\n return der_encode_sig(*rawsig)", "def transaction_action(self):\n # trigger scene signal\n self.scene().node_transaction.emit(self.metadata)", "def _sign_tx_hash(self, tx_message: TransactionMessage) -> str:\n if tx_message.ledger_id == OFF_CHAIN:\n crypto_object = self.wallet.crypto_objects.get(\"ethereum\")\n # TODO: replace with default_ledger when recover_hash function is available for FETCHAI\n else:\n crypto_object = self.wallet.crypto_objects.get(tx_message.ledger_id)\n tx_hash = tx_message.signing_payload.get(\"tx_hash\")\n is_deprecated_mode = tx_message.signing_payload.get(\"is_deprecated_mode\", False)\n tx_signature = crypto_object.sign_message(tx_hash, is_deprecated_mode)\n return tx_signature", "def send_unsigned_transaction(self, tx: Dict[str, Any], private_key: Optional[str] = None,\n public_key: Optional[str] = None, retry: bool = False,\n block_identifier: Optional[str] = 'pending') -> bytes:\n if private_key:\n address = self.private_key_to_address(private_key)\n elif public_key:\n address = public_key\n else:\n logger.error('No ethereum account provided. Need a public_key or private_key')\n raise ValueError('Ethereum account was not configured or unlocked in the node')\n\n if tx.get('nonce') is None:\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n\n number_errors = 5\n while number_errors >= 0:\n try:\n if private_key:\n signed_tx = self.w3.eth.account.sign_transaction(tx, private_key=private_key)\n logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])\n try:\n return self.send_raw_transaction(signed_tx.rawTransaction)\n except TransactionAlreadyImported as e:\n # Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's\n # processed\n tx_hash = signed_tx.hash\n logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e)))\n return tx_hash\n elif public_key:\n tx['from'] = address\n return self.send_transaction(tx)\n except ReplacementTransactionUnderpriced as e:\n if not retry or not number_errors:\n raise e\n current_nonce = tx['nonce']\n tx['nonce'] = max(current_nonce + 1, self.get_nonce_for_account(address,\n block_identifier=block_identifier))\n logger.error('Tx with nonce=%d was already sent for address=%s, retrying with nonce=%s',\n current_nonce, address, tx['nonce'])\n except InvalidNonce as e:\n if not retry or not number_errors:\n raise e\n logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again',\n address, tx['nonce'])\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n number_errors -= 1", "def sign_transaction_essence(self, prepared_transaction_data):\n return self._call_account_method(\n 'signTransactionEssence', {\n 'preparedTransactionData': prepared_transaction_data\n }\n )", "def sign(module):\n results = {\n \"changed\": False,\n \"results\": [],\n \"changes\": []\n }\n if not module.params['passphrase'] and not module.params['key']:\n module.fail_json(rc=1, msg='Error: Both passphrase and key are '\n 'required when signing an rpm')\n else:\n if module.params['macros']:\n for macro, value in module.params['macros'].items():\n rpm.addMacro(macro, value)\n for package in module.params['rpms']:\n pyread, cwrite = os.pipe()\n cwrite = os.fdopen(cwrite, 'w')\n rpm.setLogFile(cwrite)\n result = rpm.addSign(\n '{rpm}'.format(rpm=package),\n module.params['passphrase'], module.params['key']\n )\n cwrite.close()\n pyread = os.fdopen(pyread)\n msg = pyread.readline()\n pyread.close()\n\n if not result:\n module.fail_json(rc=1, msg='Error: Failed to sign {rpm}, {msg}'.format(rpm=package, msg=msg))\n\n if not msg:\n results['changes'].append('{}'.format(package))\n results['results'].append('{} was signed'.format(package))\n if not results['changed']:\n results['changed'] = True\n else:\n results['results'].append('{} skipped, already signed'.format(package))\n module.exit_json(\n changed=results['changed'],\n results=results['results'],\n changes=dict(signed=results['changes'])\n )", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def new_transaction():\n\n data = request.get_json()\n\n if not data:\n return \"No transation data passed\", 400\n\n required = ['sender', 'recipient', 'amount']\n\n if not (list(data.keys()) == required):\n return 'Missing Value', 400\n \n block_index = blockchain.add_transaction(data['sender'], data['recipient'], data['amount'])\n response = {'message':f'Adding the transaction to block at index: {block_index}'}\n\n return jsonify(response), 201", "def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig", "def submit_transaction(self, sender_address, recipient_address, stock, quanitity, signature):\n print(\"self.transactions=\", len(self.transactions))\n\n transaction = OrderedDict({\n 'sender_address': sender_address,\n 'recipient_address': recipient_address,\n 'stock': stock,\n 'quantity': quanitity\n })\n\n verified = self.verify_signature(sender_address, signature, transaction)\n if verified:\n self.transactions.append(transaction)\n print('Added tranasaction successfully (len={})'.format(len(self.transactions)))\n self.mine()\n return len(self.chain) + 1\n else:\n raise Exception(\"Failed to add transaction to blockchain\")", "def test_sign(self):\n self.signer.Sign(b'notadb')\n self.assertTrue(True)", "def get_transaction(tx):\n global INVOKE_COUNTER\n INVOKE_COUNTER = INVOKE_COUNTER + 1\n if INVOKE_COUNTER % 3 == 0:\n return \"\"\n else:\n raise_connection_error()", "def rawTxInSignature(tx, idx, subScript, hashType, key):\n sigHash = calcSignatureHash(subScript, hashType, tx, idx, None)\n sig = signRFC6979(key, sigHash).serialize()\n return sig + ByteArray(hashType)", "def coinbase_transaction(self):\n return self.txns[0]", "def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}", "def signTxOutput(\n netParams, tx, idx, pkScript, hashType, keysource, previousScript, sigType\n):\n\n sigScript, scriptClass, addresses, nrequired = sign(\n netParams, tx, idx, pkScript, hashType, keysource, sigType\n )\n\n isStakeType = (\n scriptClass == StakeSubmissionTy\n or scriptClass == StakeSubChangeTy\n or scriptClass == StakeGenTy\n or scriptClass == StakeRevocationTy\n )\n if isStakeType:\n scriptClass = getStakeOutSubclass(pkScript)\n\n if scriptClass == ScriptHashTy:\n raise NotImplementedError(\"ScriptHashTy signing unimplemented\")\n # # TODO keep the sub addressed and pass down to merge.\n # realSigScript, _, _, _ = sign(\n # privKey, netParams, tx, idx, sigScript, hashType, sigType)\n\n # Append the p2sh script as the last push in the script.\n # script = ByteArray(b'')\n # script += realSigScript\n # script += addData(sigScript)\n\n # sigScript = script\n # # TODO keep a copy of the script for merging.\n\n # Merge scripts. with any previous data, if any.\n mergedScript = mergeScripts(\n netParams,\n tx,\n idx,\n pkScript,\n scriptClass,\n addresses,\n nrequired,\n sigScript,\n previousScript,\n )\n return mergedScript", "def algo_transaction(sender, private_key, receiver, amount):\n params = ALGODCLIENT.suggested_params()\n txn = PaymentTxn(sender, params, receiver, amount, None)\n signed_tx = txn.sign(private_key)\n ALGODCLIENT.send_transaction(signed_tx)\n return True", "def generate_transaction(recipient_id: int, amount: float, mute: bool = False) -> bool:\n logging.debug(\"Transaction requested: %f NBC to node %d\", amount, recipient_id)\n sender = wallet.get_public_key().dumpb()\n recipient = wallet.get_public_key(recipient_id).dumpb()\n r = util.get_db()\n inputs: List[TransactionInput] = []\n input_amount = 0.0\n with r.lock(\"blockchain:tx_pool:lock\"), \\\n r.lock(\"blockchain:utxo-tx:lock\"):\n for ib, ob in r.hgetall(\"blockchain:utxo-tx\").items():\n o = TransactionOutput.loadb(ob)\n if o.recipient == sender:\n inputs.append(TransactionInput.loadb(ib))\n input_amount += o.amount\n if input_amount >= amount:\n t = Transaction(recipient=recipient,\n amount=amount,\n inputs=inputs,\n input_amount=input_amount)\n # Add to transaction pool\n r.hset(\"blockchain:tx_pool\", t.id, t.dumpb())\n # \"Add to wallet if mine\"\n r.hdel(\"blockchain:utxo-tx\", *(i.dumpb() for i in t.inputs))\n r.hmset(\"blockchain:utxo-tx\", {TransactionInput(t.id, o.index).dumpb(): \\\n o.dumpb() for o in t.outputs})\n break\n else:\n # Not enough UTXOs\n logging.error(\"Cannot send %f NBC to node %d (not enough coins)\", amount, recipient_id)\n return False\n\n logging.debug(\"Generated transaction %s\", util.bintos(t.id))\n _check_for_new_block()\n if not mute:\n logging.debug(\"Broadcasting transaction %s\", util.bintos(t.id))\n chatter.broadcast_transaction(t, util.get_peer_ids())\n return True", "def create_transaction(inputs: list, outputs: dict) -> ((str, int), str):\n\ttry:\n\t\tc = Bitcoin(testnet=bitcoin_is_testnet)\n\t\touts = []\n\t\tfor outk, outv in outputs.items():\n\t\t\touts += [{'value': outv, 'address': outk}]\n\t\ttx = c.mktx(inputs, outs)\n\t\ttx_serialize = serialize(tx)\n\n\t\t# Signing each input to predict the transaction size\n\t\tpriv = sha256('a big long brainwallet password')\n\t\ttx_signed = tx.copy()\n\t\tfor i in range(len(inputs)):\n\t\t\ttx_signed = c.sign(tx_signed, i, priv)\n\n\t\t# The serialization uses one char per nibble so in order the get the number of bytes it's necessary to\n\t\t# divide the size of the string serialization by 2\n\t\treturn (str(tx_serialize), len(str(serialize(tx_signed))) // 2), None\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to create the transaction: {e}\")\n\t\treturn (None, None), \"There was a problem trying to create the transaction\"", "def call_backend_sign(self, account: str, message: bytes) -> str:\n provider = self._providers[0]\n if isinstance(provider, EthereumTesterProvider):\n address = to_canonical_address(account)\n sig_key = provider.ethereum_tester.backend._key_lookup[address]\n signed_message = sig_key.sign_msg(message)\n return signed_message\n else:\n return self.w3.eth.sign(account, data=message) # Technically deprecated...", "def push_tx(tx, network='testnet', fee=False):\n\n if network in ['testnet', 'main']:\n if network is 'testnet':\n if fee:\n url = 'http://tbtc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/test3/txs/push'\n elif network is 'main':\n if fee:\n url = 'http://btc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/main/txs/push'\n\n if fee:\n data = {'hex': tx}\n else:\n data = {'tx': tx}\n\n response = post(url, data=json.dumps(data))\n else:\n response = 'Bad network'\n\n r_code = response.status_code\n r_reason = response.reason\n\n if r_code is 200:\n # blockr server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['data'])\n elif r_code is 201:\n # blockcyper server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['tx']['hash'])\n else:\n tx_hash = None\n\n return r_code, r_reason, tx_hash", "def _get_transaction(self, hash_bytes: bytes) -> BaseTransaction:\n raise NotImplementedError", "def sign(self, encoded):\n signature = self._hmac.copy()\n signature.update(encoded)\n return signature.hexdigest().encode('utf-8')", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def sign(self, data: bytes) -> bytes:\n return self._signing_key.sign(data).signature", "def buildAsk (self, node, name, value, price):\n\n nameData = node.name_show (name)\n namePrevOut = node.gettxout (nameData[\"txid\"], nameData[\"vout\"])\n nameValue = namePrevOut[\"value\"]\n addr = node.getnewaddress ()\n\n tx = CTransaction ()\n nameOut = COutPoint (int (nameData[\"txid\"], 16), nameData[\"vout\"])\n tx.vin.append (CTxIn (nameOut))\n tx.vout.append (self.buildNameUpdate (name, value, addr, nameValue + price))\n\n txHex = tx.serialize ().hex ()\n\n signed = node.signrawtransactionwithwallet (txHex, [],\n \"SINGLE|ANYONECANPAY\")\n assert signed[\"complete\"]\n return signed[\"hex\"]", "def send_tx(self, tx):\n if sys.version_info >= (3, 0):\n tx = tx.encode('ascii')\n tx_b64 = base64.b64encode(tx)\n self.__rpc_client.call(\"Babble.SubmitTx\", [tx_b64], expect_reply=True)", "def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )", "def sign(self):\r\n self._reset()\r\n if hasattr(self, \"_privateKey\"):\r\n if \"fee\" not in self:\r\n setFees(self)\r\n if self.type == 4:\r\n missings = \\\r\n self.asset[\"multiSignature\"][\"min\"] - \\\r\n len(self.get(\"signature\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n self[\"signature\"] = dposlib.core.crypto.getSignature(\r\n self, self._privateKey\r\n )\r\n else:\r\n raise Exception(\"orphan transaction can not sign itsef\")", "def _generate_transaction(\n payment: Payment,\n kind: str,\n amount: Decimal,\n *,\n id='',\n is_success=True,\n **data) -> Transaction:\n transaction = create_transaction(\n payment=payment,\n kind=kind,\n amount=amount,\n currency=data.pop('currency', payment.currency),\n gateway_response=data,\n token=id,\n is_success=is_success)\n return transaction", "def verify(blocknumber, trx, use_api):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n b = Blockchain(morphene_instance=stm)\n i = 0\n if not blocknumber:\n blocknumber = b.get_current_block_num()\n try:\n int(blocknumber)\n block = Block(blocknumber, morphene_instance=stm)\n if trx is not None:\n i = int(trx)\n trxs = [block.json_transactions[int(trx)]]\n else:\n trxs = block.json_transactions\n except Exception:\n trxs = [b.get_transaction(blocknumber)]\n blocknumber = trxs[0][\"block_num\"]\n wallet = Wallet(morphene_instance=stm)\n t = PrettyTable([\"trx\", \"Signer key\", \"Account\"])\n t.align = \"l\"\n if not use_api:\n from morphenepythonbase.signedtransactions import Signed_Transaction\n for trx in trxs:\n if not use_api:\n # trx is now identical to the output of get_transaction\n # This is just for testing porpuse\n if True:\n signed_tx = Signed_Transaction(trx.copy())\n else:\n tx = b.get_transaction(trx[\"transaction_id\"])\n signed_tx = Signed_Transaction(tx)\n public_keys = []\n for key in signed_tx.verify(chain=mph.chain_params, recover_parameter=True):\n public_keys.append(format(Base58(key, prefix=mph.prefix), mph.prefix))\n else:\n tx = TransactionBuilder(tx=trx, morphene_instance=stm)\n public_keys = tx.get_potential_signatures()\n accounts = []\n empty_public_keys = []\n for key in public_keys:\n account = wallet.getAccountFromPublicKey(key)\n if account is None:\n empty_public_keys.append(key)\n else:\n accounts.append(account)\n new_public_keys = []\n for key in public_keys:\n if key not in empty_public_keys or use_api:\n new_public_keys.append(key)\n if isinstance(new_public_keys, list) and len(new_public_keys) == 1:\n new_public_keys = new_public_keys[0]\n else:\n new_public_keys = json.dumps(new_public_keys, indent=4)\n if isinstance(accounts, list) and len(accounts) == 1:\n accounts = accounts[0]\n else:\n accounts = json.dumps(accounts, indent=4)\n t.add_row([\"%d\" % i, new_public_keys, accounts])\n i += 1\n print(t)", "def transaction_type(self) -> str:\n return self.chunks[2].decode(\"ascii\")", "def get_transaction(self, txid):\n\n return self._blocks._txns.get(txid)", "def call_contract(w3, account, func):\n tx = func.buildTransaction({\n 'nonce': w3.eth.getTransactionCount(account.address),\n 'gas': func.estimateGas()\n })\n signed_tx = w3.eth.account.signTransaction(tx, account.privateKey)\n tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n return tx_hash", "def send_tx(args):\n kwargs = {\n '--privkey': args.privkey,\n '--to': AMEND_ADDR,\n '--code': args.code,\n '--value': str(args.value),\n }\n args = functools.reduce(\n lambda lst, kv: lst + list(kv),\n kwargs.items(),\n [],\n )\n print(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'send_tx.py'])\n with open('../output/transaction/hash') as fobj:\n return fobj.read().strip()", "def test_modify_transaction_after_signing(mocker):\n transaction_original = Transaction(\n chain=0,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n )\n\n transaction = transaction_original.sign(PRIVATE_KEY_1)\n transaction.value = 10_000_000\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_INVALID_SIGNATURE\n ):\n transaction.validate(raise_exception=True)", "def compute_transaction_id(self):\n self.tx_id = self.get_sign_data()", "def add_transaction(self, transaction, signature, client_public_key):\r\n # Check If transaction is already in the transaciton_pool\r\n if transaction not in self.transaction_pool:\r\n # Verify With All Other Nodes\r\n if self.verify_transaction(transaction, signature, client_public_key):\r\n # Encrypt the transaction\r\n client_public_key = load_pem_public_key(client_public_key, default_backend())\r\n encrypted_transaction = client_public_key.encrypt(\r\n json.dumps(transaction).encode(),\r\n padding.OAEP(\r\n mgf = padding.MGF1(algorithm=hashes.SHA256()),\r\n algorithm = hashes.SHA256(),\r\n label = None\r\n )\r\n )\r\n\r\n self.transaction_pool.append(str(encrypted_transaction))\r\n\r\n else: return False, self.transaction_pool # Return False if Verification fails\r\n\r\n # Return True if transaction was already in transaction_pool or if verification was successful and new transaction was added\r\n return True, self.transaction_pool", "def CreateTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def mk_simple_transaction(self, from_addr, to_addr, send_value):\n transaction = dict(\n nonce=self.web3.eth.get_transaction_count(from_addr),\n gasPrice=self.web3.eth.gasPrice,\n # there must be an automated way to automatically set the gas price\n # based off of the gas strategy\n gas=100000,\n to=to_addr,\n value=self.web3.toWei(send_value, 'wei')\n )\n return transaction", "def sign(self, message):\n return Signature(self._sk.sign(message))", "def add_tx(self, txid, tx):\n outputs = tx.outputs()\n so = outputs and outputs[0][1]\n # Note: ScriptOutput here is the subclass defined in this file, not\n # address.ScriptOutput\n if not isinstance(so, ScriptOutput):\n return\n transaction_type = so.message.transaction_type\n try:\n if transaction_type == \"GENESIS\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"MINT\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"SEND\":\n self._add_send_tx(so, outputs, txid, tx)\n elif transaction_type == \"COMMIT\":\n return # ignore COMMIT, they don't produce any tokens\n else:\n raise InvalidOutputMessage(\"Bad transaction type\")\n except (AssertionError, ValueError, KeyError, TypeError, IndexError) as e:\n self.print_error(f\"ERROR: tx {txid}; exc =\", repr(e))", "def create_raw_transaction(amount, network_fee, from_address, to_address):\n tx_total = amount + network_fee\n tx_inputs = []\n input_total = 0\n unspent = list_unspent(from_address)\n\n # Are there enough funds in one block to cover the amount\n for block in unspent:\n if float(block[\"amount\"]) >= tx_total:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total = float(block[\"amount\"])\n tx_inputs.append(tx_input)\n break\n # If tx_inputs is empty that means we have to\n # build the transaction from multiple blocks\n if not tx_inputs:\n for block in unspent:\n if input_total >= tx_total:\n break\n else:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total += float(block[\"amount\"])\n tx_inputs.append(tx_input)\n\n # Amount left over after amount to send and network fees are subtracted\n # from input_total. Change is sent back to sender\n change = round((input_total - amount) - network_fee, 8)\n \n if change < dust:\n tx_output = {to_address: amount}\n else:\n tx_output = {to_address: amount, from_address: change}\n \n try:\n tx_hex_string = subprocess.check_output([\"litecoin-cli\", \"createrawtransaction\", json.dumps(tx_inputs), json.dumps(tx_output)])\n except:\n sys.exit(1)\n\n return tx_hex_string.strip()", "def get_tx(txid):\n return requests.get(BASE+f'/api/tx/{txid}').json()", "def add_transaction():\n index = blockchain.add_transaction(request.form['sender'], request.form['receiver'], request.form['amount'])\n response = {'message': \"Transaction will be added to Block #{0}\".format(index)}\n return jsonify(response), 200", "def get_payee_transaction(payee: str) -> Any:\n entry = g.ledger.attributes.payee_transaction(payee)\n return serialise(entry) if entry else None", "def sign(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.sign(msg)", "def sign(self, object):\n pass", "def local_push(tx, rpc_user=None, rpc_password=None):\n\n rpc_connection = AuthServiceProxy(\"http://\"+rpc_user+\":\"+rpc_password+\"@127.0.0.1:18332\")\n\n try:\n tx_hash = rpc_connection.sendrawtransaction(tx)\n code = 200\n print \"Transaction broadcast \" + tx_hash\n except JSONRPCException as e:\n print e.message\n tx_hash = None\n code = 500\n\n return tx_hash, code", "def _santander_generate_digital_sign(self,type, acquirer,time_stamp,merchant_id,reference,amount,cur,secret, result=False,\n message=False,\n pasref=False,\n authcode=False,\n\t\t\t\t\t\t\t\t\t\t\t\t order_id=False):\n assert acquirer.provider == 'santander'\n\n '''\n def get_value(key):\n if values.get(key):\n return values[key]\n return ''\n\n if inout == 'out':\n keys = ['Ds_Amount',\n 'Ds_Order',\n 'Ds_MerchantCode',\n 'Ds_Currency',\n 'Ds_Response']\n else:\n keys = ['Ds_Merchant_Amount',\n 'Ds_Merchant_Order',\n 'Ds_Merchant_MerchantCode',\n 'Ds_Merchant_Currency',\n 'Ds_Merchant_TransactionType',\n 'Ds_Merchant_MerchantURL']\n sign = ''.join('%s' % (get_value(k)) for k in keys)\n # Add the pre-shared secret key at the end of the signature\n sign = sign + acquirer.santander_secret_key\n if isinstance(sign, str):\n sign = urlparse.parse_qsl(sign)\n shasign = sha1(sign).hexdigest().upper()\n '''\n if type == 'in':\n clave1 = sha1(str(time_stamp) + '.' + str(merchant_id) + '.' + str(reference) + '.' + str(amount) + '.' + str(cur))\n clave2 = sha1(str(clave1.hexdigest()) + '.' + str(secret))\n print str(clave2.hexdigest())\n return str(clave2.hexdigest())\n if type == 'out':\n clave1 = sha1(str(time_stamp) + '.' + str(merchant_id) + '.' + str(order_id) + '.' + str(result) + '.' + str(message) + '.' + str(pasref) + '.' + str(authcode))\n clave2 = sha1(str(clave1.hexdigest()) + '.' + str(secret))\n print str(clave2.hexdigest())\n return str(clave2.hexdigest())", "def test_sign_tx_fetchai(self):\n tx_hash = Web3.keccak(text=\"some_bytes\")\n\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n ledger_id=self.ledger_id,\n info=self.info,\n signing_payload={\"tx_hash\": tx_hash},\n )\n\n tx_signature = self.decision_maker._sign_tx(tx_message)\n assert tx_signature is not None", "def _transact(self, payment_method_token, amount, processor_token,\n transaction_type, endpoint, options):\n purchase_data = self._construct_options(payment_method_token, transaction_type,\n amount, options)\n # Send payload and return transaction.\n req = Request(endpoint % processor_token, purchase_data, method='post')\n req.add_header(\"Content-Type\", \"application/xml\")\n return Transaction(fetch_url(req))", "def create_god_transaction(to_pk):\n\n god_pk, god_sk = signature.generate_keys()\n tx = Transaction(god_pk, to_pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n return tx", "def create_tx(coin, account, recipient, amount):\n if coin ==ETH:\n gasEstimate = w3.eth.estimateGas(\n {\"from\": account.address, \"to\": recipient, \"value\": amount})\n return{\n \"to\": recipient,\n \"from\": account.address,\n \"value\": amount,\n \"gasPrice\": w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": w3.eth.getTransactionCount(account.address)\n }\n if coin == BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(recipient, amount, BTC)])", "def _run_transaction(operation_name, txn):\n if not txn:\n return None\n\n response = None\n try:\n response = requests.put(PoliciesOutput.CONSUL_TRANSACTION_URL, json=txn)\n except requests.exceptions.RequestException as ex:\n ctx.logger.error(\n \"RequestException - failed to {0} at {1}: {2} on txn={3}\"\n .format(operation_name, PoliciesOutput.CONSUL_TRANSACTION_URL,\n str(ex), json.dumps(txn)))\n return None\n\n if response.status_code != requests.codes.ok:\n ctx.logger.error(\n \"failed {0} for {1} {2}: text={3} txn={4}\"\n .format(response.status_code, operation_name,\n PoliciesOutput.CONSUL_TRANSACTION_URL, response.text, json.dumps(txn)))\n return None\n ctx.logger.info(\n \"response {0} for {1} {2}: text={3} txn={4}\"\n .format(response.status_code, operation_name,\n PoliciesOutput.CONSUL_TRANSACTION_URL, response.text, json.dumps(txn)))\n return True", "def sign(file, outfile):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if file and file != \"-\":\n if not os.path.isfile(file):\n raise Exception(\"File %s does not exist!\" % file)\n with open(file) as fp:\n tx = fp.read()\n if tx.find('\\0') > 0:\n with open(file, encoding='utf-16') as fp:\n tx = fp.read()\n else:\n tx = click.get_text_stream('stdin')\n tx = ast.literal_eval(tx)\n tx = mph.sign(tx, reconstruct_tx=False)\n tx = json.dumps(tx, indent=4)\n if outfile and outfile != \"-\":\n with open(outfile, 'w') as fp:\n fp.write(tx)\n else:\n print(tx)", "def transaction(self, uuid):\r\n return tx.Transaction(self, uuid)", "def sign(data):\n return _make.sign(data)", "def database_transaction_to_rlp_transaction(transaction):\n\n nonce = transaction['nonce']\n value = parse_int(transaction['value'])\n gas = parse_int(transaction['gas'])\n gas_price = parse_int(transaction['gas_price'])\n\n tx = create_transaction(nonce=nonce, gasprice=gas_price, startgas=gas,\n to=transaction['to_address'], value=value,\n data=data_decoder(transaction['data']),\n v=parse_int(transaction['v']),\n r=parse_int(transaction['r']),\n s=parse_int(transaction['s']))\n\n return tx", "def transact_with(self, peer, transaction_type=None):\n if hex(id(peer)) == hex(id(self.node)):\n return\n \n if not max(peer.trust, 0):\n return None\n\n # Locate the routing table responsible for the peer we're dealing with\n router = filter(lambda x: x.node == peer, self.routers)\n if not any(router): return\n router = router[0]\n \n # Routers can be subclassed to turn their .malicious attr into a property\n # with statistical variance. E.g. to return True every 100th transaction.\n if transaction_type == None:\n transaction_type = not router.malicious\n \n peer.transact(positively=transaction_type, router=self)\n \n #log(\"[%s] %s <-- %s\" % \\\n # (\"+\" if not maliciousness else \"-\", self.node, peer))\n\n # Reinforce the network by making ourselves aware of this peers' peers\n for node in router.peers:\n if node == self.node or node in self.peers:\n continue\n self.peers.append(node.copy(router=self))\n\n # and make the peer routing table aware of our peers.\n for node in self.peers:\n if node == router.node or node in router.peers:\n continue\n router.peers.append(node.copy(router=router))\n\n # NoneType indicates an unreachable peer, True indicates a positive\n # transaction and False means the remote peer can be said to have\n # provided a malicious resource.\n return transaction_type", "def parseHexTx (self, txHex):\n\n data = bytes.fromhex (txHex)\n\n tx = CTransaction ()\n tx.deserialize (io.BytesIO (data))\n\n return tx", "def transaction_from_result(cls, result: JSON, **kwargs: Any) -> Transaction:\n res = cls.normalize_transaction_result(result)\n res.update(kwargs)\n\n return Transaction(\n hash=to_bytes(hexstr=res['hash']),\n sender=to_checksum_address(res['sender']),\n success=res['success'],\n timestamp=int(res['timestamp']),\n to=to_checksum_address(res['to']),\n value=int(res['value']),\n )", "def submit_tx_callback(submit_request: dict):\n log.info('submit_tx_callback received', submit_request=submit_request)\n submit_request = SubmitTransactionRequest(submit_request)\n\n try:\n tx_id = root_wallet.submit_transaction(submit_request.transaction)\n except PERSISTENT_ERRORS as e:\n raise PersistentError(e)\n except Exception as e:\n enqueue_payment_failed_callback(submit_request, str(e))\n raise # crash the job\n payment = Payment.from_payment_request(submit_request, submit_request.sender_address, tx_id)\n enqueue_payment_callback(submit_request.callback, payment, 'receive')", "def get_payu_transaction_id():\n hash_object = sha256(str(int(time.time() * 1000)).encode('utf-8'))\n txnid = hash_object.hexdigest().lower()[0:32]\n return txnid", "def insert_tx_signature(tx, index, signature, public_key):\n tx_obj = deserialize(tx)\n tx_obj[\"ins\"][index][\"script\"] = serialize_script([signature, public_key])\n\n return serialize(tx_obj)" ]
[ "0.6919308", "0.6778266", "0.6380237", "0.627283", "0.61029476", "0.6083491", "0.5987487", "0.58167666", "0.57365465", "0.5712428", "0.56981504", "0.56660604", "0.56095326", "0.55846405", "0.55624753", "0.5552044", "0.54918426", "0.5468147", "0.54531074", "0.54221904", "0.5362187", "0.534654", "0.53127396", "0.5284009", "0.52756524", "0.527408", "0.5249939", "0.5228976", "0.5205368", "0.5204117", "0.5184792", "0.5184099", "0.51577383", "0.5024115", "0.50207984", "0.501832", "0.5013838", "0.50050825", "0.49934402", "0.49842024", "0.49813572", "0.49469128", "0.4915934", "0.49127626", "0.49088678", "0.4904845", "0.49041983", "0.4895515", "0.48945612", "0.48927733", "0.4892498", "0.48902103", "0.48868656", "0.48464224", "0.48424318", "0.48382175", "0.48346844", "0.4827431", "0.48251456", "0.48234284", "0.4820536", "0.48191902", "0.48113778", "0.4792901", "0.4789554", "0.47783285", "0.47665325", "0.4764376", "0.47636002", "0.47605386", "0.4755534", "0.4755117", "0.47501242", "0.4744019", "0.4739814", "0.47337583", "0.4722816", "0.4718421", "0.47162744", "0.4708272", "0.46968496", "0.46907973", "0.4689589", "0.4681352", "0.46762648", "0.46747962", "0.4674683", "0.46741635", "0.46678412", "0.46651658", "0.46552607", "0.46544042", "0.4652649", "0.46491984", "0.4648751", "0.46380457", "0.4632886", "0.4631134", "0.46155992", "0.461439" ]
0.77226585
0
Do not return anything, modify root inplace instead.
def recoverTree(self, root: TreeNode) -> None: arr1=[] self.toList(root,arr1) print (arr1) num1=None num2=arr1[-1] l=len(arr1) i=0 arr2=[]+arr1 arr2.sort() for i in range(l): if not arr1[i]==arr2[i]: if num1: num2=arr1[i] break num1=arr1[i] self.replace(root,num1,num2) print(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents", "def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)", "def update_root(self, action: Action) -> \"MonteCarloSearchTree\":\n if action in self._root.children:\n new_root = self._root.children[action]\n else:\n new_root = self._root.add_child(action)\n self._root.remove_child(new_root)\n self._root = new_root\n return self", "def fix_root(self):\n # In the main bzrlib code, this forces the new tree to use the same\n # tree root as the old tree. But merge-into explicitly doesn't want\n # that. So the first portion is just a copy of the old code, and then\n # we change the rest.\n try:\n self.tt.final_kind(self.tt.root)\n except NoSuchFile:\n self.tt.cancel_deletion(self.tt.root)\n if self.tt.final_file_id(self.tt.root) is None:\n self.tt.version_file(self.tt.tree_file_id(self.tt.root),\n self.tt.root)\n # All we do is skip the step which used to sanitize the root id.", "def recoverTree(self, root: TreeNode) -> None:\n self.tmp, self.left, self.right = None, None, None\n self.helper(root)\n self.left.val, self.right.val = self.right.val, self.left.val", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val", "def edit_root(self) -> Generator[Root, None, None]:\n with self.edit(Root.type) as root:\n if not isinstance(root, Root):\n raise RuntimeError(\"Unexpected root type\")\n yield root", "def set_relative_root(self, root):\r\n self.root = root", "def set_relative_root(self, root):\r\n self.root = root", "def root_orig(self):\n if hasattr(self, \"orig\"):\n return self.orig.root_orig\n return self", "def set_root(self, x, root):\n\n while self.P[x] < x:\n\n j = self.P[x]\n self.P[x] = root\n x = j\n\n self.P[x] = root", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def clone_as_root(self) :\n clone = deepcopy(self)\n clone.parent = None\n clone.path_length = 0\n clone.previous_action = None\n return clone", "def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val", "def set_relative_root(self, root):\n self.root = root", "def update_with_move(self, point, last_move):\n if point == -1:\n # reset the tree\n self._root = TreeNode(None, 1.0)\n else:\n self._root = self._root._children[point][last_move]\n self._root._parent = None", "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a root')", "def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)", "def root(self, node):\n\n if self.set[node] == node:\n return node\n\n self.set[node] = self.root(self.set[node])\n return self.set[node]", "def _on_root_finder_update(self, change):\n if hasattr(self, \"_root_finder\"):\n del self._root_finder", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n self.pre = None\n self.m1 = None\n self.m2 = None\n self.helper(root)\n self.m1.val,self.m2.val = self.m2.val, self.m1.val", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def recoverTree(self, root):\n it = self.isValidBST(root)\n a, b = next(it)\n c = next(it, None)\n if c:\n _, c = c\n a.val, c.val = c.val, a.val\n else:\n a.val, b.val = b.val, a.val\n return root", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def SyncRoot(self) -> object:", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def _root(self, ind):\n while (ind != self._id[ind]):\n #make every other node in path to point to its grandparent\n self._id[ind] = self._id[self._id[ind]]\n ind = self._id[ind]\n return ind", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def __init__(self):\n self.root = self.get_new_node();", "def test_removing_root(item):\n item.root = None\n assert not item.has_root", "def clean():\n new_tree = None", "def _rotate_left(self, og_root):\r\n new_root = og_root.right\r\n og_root.right = new_root.left\r\n if new_root.left:\r\n new_root.left.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root is self.root: # if our original root of the rotation is the tree root, replace tree root with new root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.left:\r\n og_root.parent.left = new_root\r\n else:\r\n og_root.parent.right = new_root\r\n new_root.left = og_root\r\n og_root.parent = new_root", "def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt", "def flatten(self, root: TreeNode) -> None:\n if not root: return\n self.flatten(root.right)\n self.flatten(root.left)\n root.right = self.last\n root.left = None\n self.last = root", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n left = root.left\n right = root.right\n root.left = None\n self.flatten(left)\n self.flatten(right)\n root.right = left\n cur = root\n while cur.right:\n cur = cur.right\n cur.right = right", "def replace_by_etree(self, root_el, el_idx=0):\n el = self.get_element_by_name(root_el.tag, el_idx)\n el[:] = list(root_el)\n el.attrib = root_el.attrib", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return None\n\n self.flatten(root.right)\n self.flatten(root.left)\n\n root.right = self.prev\n root.left = None\n self.prev = root", "def delete_root(self, node):\n current = node\n successor = self.find_successor(current) \n temp_height = current.height\n current.height = successor.height\n successor.height = temp_height\n\n if successor != None:\n self.root = successor\n parent = successor.parent\n\n if successor.parent != node:\n if parent.left == successor:\n parent.left = successor.left\n else:\n parent.right = successor.right\n if node.left != successor:\n successor.left = node.left\n else:\n successor.left = None\n if node.right != successor:\n successor.right = node.right \n else:\n successor.right = None\n\n else:\n ancestor = node.left\n ancestor.parent = None\n self.root = ancestor\n del self.nodes[node.key]", "def _rotate_right(self, og_root):\r\n new_root = og_root.left\r\n og_root.left = new_root.right\r\n if new_root.right:\r\n new_root.right.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root.value == self.root.value: # og_root is tree root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.right:\r\n og_root.parent.right = new_root\r\n else:\r\n og_root.parent.left = new_root\r\n new_root.right = og_root\r\n og_root.parent = new_root", "def patch(lines):\n if not get_root():\n set_root(os.getcwd())", "def set_root(self, root):\n self.root = root\n if self.root is not None:\n correct_type(root, Tag)", "def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)", "def set_root(self, root):\n self.root = root\n self.sites = [root]", "def flatten(self, root: TreeNode) -> None:\n if root is None :\n return\n if self.node is not None :\n self.node.left = None\n self.node.right = root\n self.node = root\n right = root.right\n self.flatten(root.left)\n self.flatten(right)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n stack = []\n\n vals = []\n node = root\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n vals.append(node.val)\n node = node.right\n\n vals.sort()\n node = root\n i = 0\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n node.val = vals[i]\n i += 1\n node = node.right", "def reverse(self):\n self.root.reverse()", "def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):\n self.operator.update_subtree(old_subroot, new_subroot)", "def _insert(self, root: AVLTreeNode, key, val=None) -> AVLTreeNode:\n if not root:\n return AVLTreeNode(key, val, bf=0) # If empty root this is the root of new tree\n if key < root.key:\n left_sub_root = self._insert(root.left, key, val) # insert and update left subroot\n root.left = left_sub_root\n left_sub_root.parent = root # assign the parent\n elif key > root.key:\n right_sub_root = self._insert(root.right, key, val) # insert and update right subroot\n root.right = right_sub_root\n right_sub_root.parent = root\n else:\n return root # no duplicate keys allowed; no insertion, return current root as is\n # finally, update heights and bf's of current root after insertion completed (postorder processing)\n root.height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n root.bf = self._get_height(root.left) - self._get_height(root.right)\n return self.rebalance(root) # RE-BALANCE CURRENT ROOT (if required)", "def flatten(self, root: TreeNode) -> None:\n # User must pass a node\n if root:\n root_flatten = TreeNode(root.val)\n leaf = inOrderTreeWalk(root, root_flatten)\n root.left = None\n root.right = root_flatten.right.right", "def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError", "def recoverTree(self, root):\n # 线性空间复杂度\n # 存储树节点的值\n treeVal = []\n # 存储树的节点\n treePointer = []\n # 中序遍历\n self.inorder(root, treeVal, treePointer)\n treeVal.sort()\n for i in range(len(treeVal)):\n treePointer[i].val = treeVal[i]", "def flatten(self, root: TreeNode) -> None:\n self.previous = TreeNode()\n self.traverse(root)\n return root", "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def regenerate_tree(self, newpos):\n self.path = self.tree[newpos][2]\n self.tree = self.get_tree()\n self.pos = self.get_curpos()", "def recoverTree(self, root: TreeNode) -> None:\n self.firstNode = None\n self.secondNode = None\n self.preNode = TreeNode(float(\"-inf\"))\n\n def in_order(root):\n if not root:\n return\n in_order(root.left)\n if self.firstNode == None and self.preNode.val >= root.val:\n self.firstNode = self.preNode\n if self.firstNode and self.preNode.val >= root.val:\n self.secondNode = root\n self.preNode = root\n in_order(root.right)\n\n in_order(root)\n self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def _refresh_tree_ref(self):\n self._tree_ref = RedBlackNodeRef(\n address=self._storage.get_root_address())", "def remove_first(self) -> bool:\n #tree isempty\n if self.root is None:\n return False\n\n #root== leaf\n if self.is_leaf(self.root):\n self.root = None\n return True\n\n #root has!= right tree\n if self.root.right is None:\n self.root = self.root.left\n return True\n\n #right tree\n #right tree\n replace_node = self.root.right\n replace_parent = self.root\n left_bool = False\n while replace_node.left is not None:\n replace_parent = replace_node\n replace_node = replace_node.left\n left_bool = True\n\n # remove left\n if left_bool:\n replace_parent.left = replace_node.right\n else:\n replace_parent.right = replace_node.right\n\n # insert left into root\n replace_node.left = self.root.left\n replace_node.right = self.root.right\n self.root = replace_node\n return True", "def _correct_tree(self, current_element: Node):\r\n while True:\r\n if current_element == None or current_element.parent() == None:\r\n return None\r\n current_element = current_element.parent()\r\n b1 = current_element.balance()\r\n\r\n try:\r\n b2 = current_element.right_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.right_son().left_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if b1 in (-1, 0, 1):\r\n if current_element.parent() == None:\r\n break\r\n else:\r\n continue\r\n elif ((b1 == -2 and b2 == 1 and b3 == -1) or\r\n (b1 == -2 and b2 == 1 and b3 == 0 ) or\r\n (b1 == -2 and b2 == 1 and b3 == 1)):\r\n current_element.reset(*self._right_left(current_element))\r\n elif b1 == -2:\r\n current_element.reset(*self._right_right(current_element))\r\n break\r\n\r\n try:\r\n b2 = current_element.left_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.left_son().right_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if ((b1 == 2 and b2 == 2 and b3 == 2) or\r\n (b1 == -1 and b2 == -1 and b3 == -1) or\r\n (b1 == -1 and b2 == 0 and b3 == 1) or\r\n (b1 == 2 and b2 == -1 and b3 == 0)):\r\n current_element.reset(*self._left_right(current_element))\r\n elif b1 == 2:\r\n current_element.reset(*self._left_left(current_element))\r\n break\r\n \r\n if current_element.parent() == None:\r\n break", "def clear(self):\n self.root = None", "def apply(self, tree):\n raise NotImplementedError()", "def put(self, key, value):\n if key is None:\n return\n self.root = put_in_subtree(self.root, key, value)\n self.root.colour = False # make sure that the root is black", "def write_root(self, root: Position) -> None:\n # Write only the body of the root.\n self.write_body(root)\n # Write all nodes of the tree, except ignored nodes.\n self.level_offset = self.compute_level_offset(root)\n self.root_level = root.level()\n p = root.threadNext() # Returns a copy.\n after = root.nodeAfterTree()\n while p and p != after:\n h = p.h.rstrip()\n if g.match_word(h, 0, '@ignore-tree'):\n p.moveToNodeAfterTree()\n continue\n if g.match_word(h, 0, '@ignore-node'):\n p.moveToThreadNext()\n continue\n if not g.match_word(h, 0, '@no-head'):\n self.write_headline(p)\n self.write_body(p)\n p.moveToThreadNext()", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def _uproot(self):\n left, right = self.left, self.right\n if left is not None:\n left.parent = None\n if right is not None:\n right.parent = None\n return left, right", "def _restore_global_position(x, root_pos, root_idx=None):\n x = x + root_pos\n if root_idx is not None:\n x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)\n return x", "def root_nodes(self, node1, node2, distance):\n if node1 == node2.parent:\n upper_node = node1\n lower_node = node2\n upper_dist, lower_dist = distance, lower_node.branch - distance\n elif node2 == node1.parent:\n upper_node = node2\n lower_node = node1\n upper_dist, lower_dist = lower_node.branch - distance, distance\n else:\n raise PhyloValueError('root_nodes() requires that one of the given nodes is the parent of the other.')\n if len(self.root.children) <= 1:\n raise PhyloValueError('cannot re-root a tree where the existing root has one or no children.')\n elif len(self.root.children) == 2:\n if upper_node == self.root:\n # Just need to adjust branch lengths\n root_child = self.root.children[1] if self.root.children[0] == lower_node else self.root.children[0]\n root_child.branch += upper_dist\n lower_node.branch = lower_dist\n else:\n upper_path = self.find_path_to_root(upper_node)\n # Process the old root child after removing the root:\n root_child = self.root.children[1] if self.root.children[0] == upper_path[1] else self.root.children[0]\n root_child.branch += upper_path[1].branch\n root_child.parent = upper_path[1]\n upper_path[1].children.append(root_child)\n # Process nodes between root and upper_node:\n prev_node = upper_path[1]\n for next_node in upper_path[2:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n # Process upper_node, lower_node, and the new root\n upper_node.parent = lower_node.parent = self.root\n upper_node.children.remove(lower_node)\n self.root.children = [node1, node2] # Keeps the argument order\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n else: # If the root has 3 children it means it's an unrooted tree\n new_root = self.new_tree_node()\n new_root.branch = self.root.branch # Transfers any existing root branch\n if upper_node != self.root:\n upper_path = self.find_path_to_root(upper_node)\n prev_node = self.root\n for next_node in upper_path[1:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n upper_node.children.remove(lower_node)\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n new_root.children.append(upper_node)\n new_root.children.append(lower_node)\n upper_node.parent = lower_node.parent = new_root\n self.root = new_root\n self.process_tree_nodes()", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def replace_subtree(self, tree, update_tree=True):\n if self.parent is None: # Changing the whole tree\n self.__dict__ = tree.__dict__\n else:\n if self is self.parent.left_subtree:\n self.parent.left_subtree = tree\n else:\n self.parent.right_subtree = tree\n if update_tree:\n self.update_tree()\n return self", "def _restore_root_target_weight(target_weight, root_weight, root_idx=None):\n if root_idx is not None:\n root_weight = np.full(target_weight.shape[0], root_weight, dtype=target_weight.dtype)\n target_weight = np.insert(target_weight, root_idx, root_weight[:, None], axis=1)\n return target_weight", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def set_root(self, xpath):\n if xpath[:2] is not '//':\n # Add the // to the front of the string if it isn't there\n self.root = self.tree.xpath('//{}'.format(xpath))\n self.base = self.root[0].base\n return self.root\n self.root = self.tree.xpath(xpath)\n self.base = self.root[0].base\n return self.root", "def _root():\n return 0", "def temporary(self, path):\r\n if path is None:\r\n raise ValueError('Can only temporarily establish a build root given a path.')\r\n prior = self._root_dir\r\n self._root_dir = path\r\n try:\r\n yield\r\n finally:\r\n self._root_dir = prior", "def delete(self, val):\n\n\t\tself.root = self.deleteHelper(self.root, val)\n\t\tself.numNodes = 0\n\t\tif self.root:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif node.left:\n\t\t\t\t\tQ.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tQ.append(node.right)\n\t\t\t\tself.numNodes += 1", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def root(self):\n return self._make_position(self._root)", "def invert_binary_tree(root):\n if root is None:\n return None\n left = invert_binary_tree(root.left)\n right = invert_binary_tree(root.right)\n root.left = right\n root.right = left\n return root", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)", "def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})", "def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def reset(self):\r\n self._root_dir = None", "def __root(T: \"Graph\"):\n T_copy = T.copy()\n\n # Leaves are removed from the copy untill 1 or 2 vertices remain\n while len(T_copy.vertices) > 2:\n vertices_to_remove = []\n for v in T_copy.vertices:\n if v.degree == 1:\n vertices_to_remove.append(v)\n for v in vertices_to_remove:\n T_copy.del_vertex(v)\n\n root_labels = []\n for v in T_copy.vertices:\n root_labels.append(v.label)\n\n # From the original tree, the roots are returned\n T_root = []\n for v in T.vertices:\n if v.label in root_labels:\n T_root.append(v)\n\n return T_root", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n if root.left and root.left.val > root.val:\n root.left.val, root.val = root.val, root.left.val\n return\n if root.right and root.right.val < root.val:\n root.right.val, root.val = root.val, root.right.val\n return\n self.recoverTree(root.left)\n self.recoverTree(root.right)", "def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val" ]
[ "0.71675247", "0.7090078", "0.7052984", "0.69619083", "0.67892647", "0.6614594", "0.6551307", "0.65119416", "0.65119416", "0.65119416", "0.65119416", "0.64319086", "0.6413781", "0.6397017", "0.6397017", "0.6370123", "0.63613737", "0.63289756", "0.6325578", "0.630594", "0.6276573", "0.6276068", "0.6253161", "0.62525576", "0.62385803", "0.6237354", "0.6232268", "0.6217579", "0.6213837", "0.61863106", "0.61863106", "0.61863106", "0.6183783", "0.61445457", "0.6143329", "0.61304396", "0.6126261", "0.60904014", "0.60833347", "0.60833347", "0.6072852", "0.6069458", "0.60677487", "0.60657173", "0.60646737", "0.60569376", "0.6038713", "0.6026558", "0.60191596", "0.6004276", "0.59969795", "0.597867", "0.59723973", "0.596083", "0.59515494", "0.5944736", "0.5935968", "0.5934551", "0.5917017", "0.5916604", "0.5905968", "0.5900963", "0.5893328", "0.58823925", "0.5877697", "0.5851188", "0.5844727", "0.5843256", "0.5837958", "0.5836801", "0.5825156", "0.5819012", "0.5802323", "0.5797879", "0.5797414", "0.57964545", "0.5795283", "0.5789455", "0.5788347", "0.5778249", "0.577619", "0.5772213", "0.57689595", "0.5764992", "0.57637554", "0.57631475", "0.57561177", "0.5753912", "0.5748594", "0.57437444", "0.5736856", "0.57364017", "0.5734364", "0.57332695", "0.5728285", "0.57270014", "0.5725962", "0.5716089", "0.5711195", "0.57061964", "0.5691237" ]
0.0
-1
This a True test to see if the column is selected
def testGetColumnSolution(self): actionlist = [1,2,3,4,5] for action in actionlist: if action == 1: val = getColumnSelection(action) self.assertEqual(val,"bookID") if action == 2: val = getColumnSelection(action) self.assertEqual(val,"bookAuthor") if action == 3: val = getColumnSelection(action) self.assertEqual(val,"ISBN") if action == 4: val = getColumnSelection(action) self.assertEqual(val,"numPurchased") if action == 5: val = getColumnSelection(action) self.assertEqual(val,"numCheckedOut") if action == 6: val = getColumnSelection(action) self.assertEqual(val,"bookTitle") if action == 7: val = getColumnSelection(action) self.assertEqual(val,"bookPrice")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_select_type(self) -> bool:\n row_type = self.get_type()\n return row_type.startswith('select')", "def is_select(self) -> bool:\n return self.statement.is_select", "def is_select_one(self) -> bool:\n select_one_starts = (\n 'select_one ',\n 'select_one_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_one_starts)", "def test_select_columns(self):\n self.insert()\n data = self.tbl.select()\n assert (u'id',) + tuple(data.columns) == self.tbl.columns", "def _column_selected(self):\n #get selections from ui\n selection_index = self._ckl_columns.GetSelection()\n table_selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n #get ids\n variable_id = self._columns[selection_index][0]\n table_id = self._tables[table_selection_index][0]\n\n #get selected items - format strings only exist for selected items\n selected_items = [self._columns[index][0] for index in self._ckl_columns.GetCheckedItems()]\n\n #save previous format string (if it exists)\n if self._column_selected_previous != -1 and self._columns[self._column_selected_previous][0] in selected_items:\n format_pattern = self._entry_formatstring.GetValue()\n self._datafile.query(sciplot.database.Query(\"UPDATE TableColumn SET FormatPattern = (?) WHERE VariableID = (?) AND TableID = (?);\", [format_pattern, self._columns[self._column_selected_previous][0], table_id], 0))\n\n #load new format string if applicable\n if variable_id in selected_items:\n value = self._datafile.query(sciplot.database.Query(\"SELECT FormatPattern FROM TableColumn WHERE VariableID = (?) AND TableID = (?);\", [variable_id, table_id], 1))\n self._entry_formatstring.SetValue(value[0][0][0])\n else:\n self._entry_formatstring.SetValue(\"\")\n\n self._column_selected_previous = self._ckl_columns.GetSelection()", "def requires_selection(self) -> bool:\n return True", "def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]", "def has_column(self, column):\n if column == '*':\n return True\n for c in self.columns:\n if column == c.data.name:\n return True\n return False", "def _column_exists(self, tbname, colname):\n self._check_file(tbname)\n tb = tbtool()\n tb.open(tbname)\n cols = tb.colnames()\n tb.close()\n return (colname in cols)", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def filter_column(col, row):\n return col == column", "def is_selected(self) -> bool:\r\n return self.selected", "def selectable(cls):\n return True", "def _validate_select_where(self):", "def column_selection_change():\n d = curdoc()\n _remove_fig(d)\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n source = d.get_model_by_name(sind)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=1)\n sel_cols = d.get_model_by_name(COLUMN_MULTISELECT).value\n columns = [ TableColumn(field=c, title=c) for c in sel_cols ]\n data_table = DataTable(source=source, columns=columns, width=500, height=500)\n table_widget = widgetbox(data_table, name=FIGURE_MODEL)\n d.add_root(table_widget)", "def valid_column(self, col: int) -> bool:\n\n return self.check_bounds(0, col) and self.grid[0][col] == \" \"", "def checkcolumnstest(chosen_columns, chosen_df):\n if not all([item in chosen_columns for item in chosen_df.columns]):\n raise ValueError('Columns do not match')", "def becomes_single_column(self):\n return self.becomes_column() and not self.is_gps()", "def is_selected(self):\n return self.container['is_selected']", "def check_cols_methane(name):\n return True if name in ['SampleDay', 'SampleHour', 'Decimal Year',\n 'Peak Area 1', 'Peak Area 2', 'Run median', 'Daily Median'] else False", "def testBadGetColumnSolution(self):\n actionlist = [\"ISBN\",9,8,10,\"5\",\"\",\"1\"]\n for action in actionlist:\n val = getColumnSelection(action)\n self.assertFalse(val)", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True", "def is_selected(self) -> bool:\n return self.proto.is_selected", "def __contains__(self, column):\n if isinstance(column, orb.Column):\n return self.__model == column.schema().model() and self.__column == column.name()\n else:\n return column == self.__column", "def _column_selection_change(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n selected_columns_indexes = [self._columns[i][0] for i in list(self._ckl_columns.GetCheckedItems())]\n database_columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n\n to_add = []\n to_remove = []\n\n for i in selected_columns_indexes:\n if i not in database_columns_indexes:\n to_add.append(i)\n \n for i in database_columns_indexes:\n if i not in selected_columns_indexes:\n to_remove.append(i)\n \n queries = []\n for variable_id in to_add:\n queries.append(sciplot.database.Query(\"INSERT INTO TableColumn (TableID, VariableID, FormatPattern) VALUES ((?), (?), (?));\", [table_id, variable_id, \"*.*\"], 0)) #add new column to table with a generic format string\n \n for variable_id in to_remove:\n queries.append(sciplot.database.Query(\"DELETE FROM TableColumn WHERE VariableID = (?);\", [variable_id], 0)) #remove unselected column from the database\n \n self._datafile.query(queries)\n\n self.refresh_table() #update table to reflect the changed columns", "def _table_selected(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n\n #update table column selection\n columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n new_checked_items = []\n column_ids = [tup[0] for tup in self._columns]\n\n for variable_id in columns_indexes:\n new_checked_items.append(column_ids.index(variable_id))\n\n self._ckl_columns.SetCheckedItems(new_checked_items)\n\n #update displayed table data\n self.refresh_table()", "def test_select_column(self):\n self.assertEqual(\"(SELECT * FROM A WHERE id > 0)\",\n grammar._NESTED_SELECT.parseString(\"(SELECT * FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"(SELECT COUNT(*) FROM A WHERE id > 0)\",\n grammar._NESTED_SELECT.parseString(\"(SELECT COUNT(*) FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"EXISTS(SELECT * FROM A WHERE id > 0)\",\n grammar._NESTED_CALL.parseString(\"EXISTS(SELECT * FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"MAX(*)\",\n grammar._NESTED_CALL.parseString(\"MAX(*)\")[0])\n self.assertEqual(\"count\",\n grammar._SELECT_COLUMN.parseString(\"`count`\").name[0])\n\n self.assertEqual([\"count\", \"max\", \"id\"],\n [x[-1] for x in\n grammar._SELECT_COLUMN_LIST.parseString(\n \"(SELECT COUNT(*) FROM A) AS `count`, MAX(*) AS `max`, id\").columns])", "def IsColumnMode(self):\n return self.VertEdit.Enabled", "def is_target_buy_policies_grid_column_present(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-title='%s']\" % (self.target_buy_policies_grid_div_id, column_name))\n return self.is_element_present(column_locator)", "def is_reference_rates_grid_accept_column_checkbox(self):\n is_checkbox = False\n column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-title='%s']\" % (self.vendor_price_list_detail_reference_rates_grid_div_id, self.accept_column_name))\n column_element = self.wait().until(EC.presence_of_element_located(column_locator), 'column locator not found before specified time out')\n column_index = int(column_element.get_attribute(\"data-index\")) + 1\n column_type_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::div[@class='k-grid-content']/descendant::tr/td[%s]/input\" % (self.vendor_price_list_detail_reference_rates_grid_div_id, str(column_index)))\n column_type_element = self.wait().until(EC.presence_of_element_located(column_type_locator), 'column type locator not found before specified time out')\n column_type = column_type_element.get_attribute('type')\n if column_type == \"checkbox\":\n is_checkbox = True\n return is_checkbox", "def checkIfColumnControlledVocab(self, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n valid_controlled_column=0\n db_output=con.cursor().callproc('check_if_column_controlled',\n [column_name.upper(),\\\n valid_controlled_column])\n if db_output[1]==0:\n return False\n else:\n return True\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def IsSelected(self, item):\r\n\r\n return item.IsSelected()", "def select (a_data,a_column) :\n return a_data[a_column]", "def autoselect(self):\n # type: () -> bool\n return self._autoselect", "def is_selected(self, selector):\n el = self.locate_element(selector)\n return el.is_selected()", "def TestInput( data, options ) :\n columns = data.columns\n return all( x in columns for x in options)", "def is_selected(self):\n return self._selected", "def is_selected(self):\n return self._selected", "def IsColumnShown(self, column):\r\n\r\n return self._header_win.GetColumn(column).IsShown()", "def is_choose_col(self,l,new_col):\n\t\tfor col in l:\n\t\t\tcor=np.corrcoef(col,new_col)\n\t\t\tif cor[0,1]>0.8:\n\t\t\t\treturn False\n\t\treturn True", "def __contains__(self, item):\r\n if isinstance(item, six.string_types):\r\n return item in self.table._columns\r\n else:\r\n return item in self", "def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool:\n\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(\n c\n ) and not self.target.c.contains_column(c):\n return False\n return True", "def IsColumnEditable(self, column):\r\n\r\n return self._header_win.GetColumn(column).IsEditable()", "def valid_col_tester(self, state):\n vert_state = self.cols(state)\n for line in vert_state:\n line_index = vert_state.index(line)\n vert_word = self.check_word(vert_state[line_index])\n if not(vert_word):\n return False\n return True", "def is_expected_grid_column_present(self, expected_column_name):\n grid_column_locator = (By.XPATH, \"//th[@data-title='%s']\" %(expected_column_name))\n return self.is_element_present(grid_column_locator)", "def is_specific_column_present(self, grid_div_id, column_name):\n specific_column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-field='%s']\" % (grid_div_id, column_name))\n return self.is_element_present(specific_column_locator)", "def column_selection(type1, cat):\n col_selection = []\n for col in cat.colnames:\n if col == \"_RAJ2000\":\n continue\n if col == \"_DEJ2000\":\n continue\n desc = cat[col].info.description\n f = any([(ban in desc) for ban in BANNED_KEYWORDS])\n if f is False:\n col_selection.append(col)\n return col_selection", "def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True", "def is_initially_selected(self, value):\n return value in self._get_selected_values_set()", "def is_selected(self):\n return self._element_call(lambda: self.el.is_selected)", "def has_column(self, column_name):\n return column_name in self._columns", "def has_column(self, column_name):\n return column_name in self._columns", "def visible(self):\r\n return self.column.visible", "def is_specific_column_on_vendor_profile_grid_present(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, 'divCustomerDialedDigit')]/descendant::a[text()='%s']\" % column_name)\n return self.is_element_present(column_locator)", "def ask_for_field(self, row, col):\n field = self.map.fields[row][col]\n # return the field kind, team, and if there is an entity or not\n return field.passable, field.team, field.entity is not None", "def is_select(status):\n if not status:\n return False\n return status.split(None, 1)[0].lower() == 'select'", "def is_column_load(self) -> bool:\n opts = self._orm_compile_options()\n return opts is not None and opts._for_refresh_state", "def hasSelectedText(self):\n return self.textCursor().hasSelection()", "def IsColumnShown(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n\r\n return self._columns[column].IsShown()", "def column_selected(self, event):\n\n selected_column = self.dropdown_menu.GetStringSelection()\n\n self.draw_hist(selected_column, self.df[selected_column])", "def selected(self):\n\n return self.element().is_selected() if self.exists() else False", "def select(self):\n pass", "def select(self):\n pass", "def select(self):\r\n pass", "def value(self):\n return self.element.is_selected()", "def test_no_column(self):\n\n self.assertRaises(ValueError, self.table.where, 'True')", "def is_specific_column_present_in_workflow_tab(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, 'divOutboundWorkFlowGrid_')]/descendant::div[@class='k-grid-header']/descendant::th[@data-title='%s']\" % column_name)\n return self.is_element_present(column_locator)", "def isClicked(self, row, col):\n return self.clicked[row, col] == 1", "def __is_selected_frame(self, frame_index):\n return frame_index == self.selected_index", "def test_select_field():", "def isSelected(*args):", "def isSelected(*args):", "def column_exists(self, column_name):\n return column_name in self.columns", "def selected(self):\n\n return self.infodock.is_instruction_selected(self.addr)", "def is_country_column_present_in_re_analysis_page(self):\n return self.is_specific_column_present(self.re_analysis_grid_div_id, self.column_name_country)", "def IsSelected(self):\r\n\r\n return self._hasHilight != 0", "def select(self):\n return", "def check_column(self, table_name: str, column_name: str) -> bool:\n try:\n insp = reflection.Inspector.from_engine(self.engine)\n for col in insp.get_columns(table_name):\n if column_name in col[\"name\"]:\n return True\n return False\n except Exception as err:\n logger.error(\"check_column [error] -> %s\" % err)\n return False", "def capture(self, proposed_row, proposed_column):\n return True", "def __contains__(self, item):\r\n if isinstance(item, six.string_types):\r\n return item in self.iternames()\r\n else:\r\n # let's assume we were given a column\r\n return item in self.iterall()", "def isselected(values, feature, parent):\r\n layername=values[0]\r\n fid = feature.id()\r\n layers = QgsMapLayerRegistry.instance().mapLayers()\r\n try:\r\n layer = layers[layername]\r\n except KeyError:\r\n try:\r\n layer = [l for l in layers.iteritems() if l[1].name() == layername][0][1]\r\n except IndexError:\r\n parent.setEvalErrorString( u'No layer with id or name {} found'.format( layername ) )\r\n return False\r\n\r\n return fid in layer.selectedFeaturesIds()", "def check_for_column(self, column_name):\n if column_name not in self.data.columns:\n raise RuntimeError(\"Source {} has no '{}' column\".format(\n self.name, column_name))", "def contains_col(self, col_name):\n fmt_name = ColNameFormatter.fmt(col_name)\n col_in_solar = fmt_name in self.__solar_cols\n col_in_wind = fmt_name in self.__wind_cols\n return col_in_solar or col_in_wind", "def is_select_multiple(self) -> bool:\n select_multiple_starts = (\n 'select_multiple ',\n 'select_multiple_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_multiple_starts)", "def becomes_column(self):\n row_type = self.get_type()\n # Note: \"begin repeat\" does become a column\n non_columns = ('begin group', 'end group', 'end repeat')\n return row_type not in non_columns", "def row1_invariant(self, target_col):\n # replace with your code\n if self.lower_row_invariant(1, target_col):\n return True\n return False", "def has_changes(self):\n return self.has_state_change(\n \"select_col.value\", \"condition.value\", \"input_val.value\"\n )", "def is_country_column_present_in_compare_price_list_pop_up(self):\n return self.is_specific_column_present(self.compare_price_list_rate_grid_div_id, self.column_name_country)", "def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")", "def filter_row(col, rw):\n return rw == row", "def cellSelected(self):\n\n x = self.tableWidget.currentRow()\n y = self.tableWidget.currentColumn()\n if y != self.CAT_COLUMN:\n return\n catText = str(self.tableWidget.item(x, y).text())\n #print(x,y, catText)\n self.tableWidget.item(x, y).setSelected(False)\n for row, code in enumerate(self.codes):\n if code['category'] == catText:\n self.tableWidget.item(row, self.CODE_COLUMN).setSelected(True)", "def joy_select(event: EventType, widget: WidgetType) -> bool:\n return event.button == JOY_BUTTON_SELECT", "def get_selected_columns(self):\n self._export_mode = 'columns'\n self._counter_update_data += 1", "def test_single(self):\n df = self.df.head(1).copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)", "def is_select_external(self) -> bool:\n external_starts = (\n 'select_one_external ',\n 'select_multiple_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in external_starts)", "def check_column(self, column_name, table, verbose=True): \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n \n CHECK_COLUMN_COMMAND = \"SHOW COLUMNS FROM {0} LIKE '{1}'\".format(table, column_name)\n \n self.cursor.execute(CHECK_COLUMN_COMMAND)\n \n exists=False\n for row in self.cursor:\n exists = True\n break\n \n if verbose and exists: print(\"Column with label '{0}' found in table '{1}'\".format(column_name, table))\n elif verbose: print(\"Column with label '{0}' not found in table '{1}'\".format(column_name, table)) \n \n return exists", "def _select(self,X,y=None):\n return X.loc[:,self.columns], y", "def checking_columns(dataframe, column, function=lambda x: x, handle=lambda x: x, *args, **kwargs):\n # subsets column from dataframe trying to catch exceptions\n try:\n dataframe.loc[:, column]\n return function(*args, **kwargs)\n except KeyError as e:\n print(\"No {} column found in {}\".format(column, dataframe.columns))\n return handle(e)", "def handleTableSelectionChange(self):\n self.selectEntireRow()\n self.showSelectedDataset()", "def are_there_available_columns_to_play(self):\n available_columns = self.get_available_columns()\n return self._state.n_neutral_markers != 3 and len(available_columns) > 0" ]
[ "0.6952466", "0.6665726", "0.6658204", "0.6457513", "0.64384896", "0.6400926", "0.63496137", "0.6185076", "0.61790794", "0.6170231", "0.6149248", "0.6016119", "0.5973125", "0.59719217", "0.59619623", "0.59614056", "0.593753", "0.59325904", "0.59170127", "0.59004337", "0.5900361", "0.58984524", "0.5883453", "0.58720464", "0.585617", "0.58555984", "0.5854736", "0.58401346", "0.5821751", "0.5820942", "0.5815331", "0.5805168", "0.5788854", "0.57717294", "0.5763572", "0.5761865", "0.5756187", "0.57406604", "0.57406604", "0.5740251", "0.5737544", "0.5733075", "0.572253", "0.57220584", "0.5718566", "0.5711199", "0.5697978", "0.56873953", "0.5683307", "0.5672208", "0.5670423", "0.5652635", "0.5652635", "0.563725", "0.5627998", "0.5617231", "0.561228", "0.560565", "0.5599171", "0.55939007", "0.55845165", "0.55782664", "0.55768496", "0.55768496", "0.55679524", "0.55651283", "0.5544578", "0.5544325", "0.554013", "0.55212724", "0.5517617", "0.55122375", "0.55122375", "0.5511529", "0.5510315", "0.5505001", "0.54988605", "0.5495251", "0.5493098", "0.5491964", "0.54880464", "0.5479332", "0.5465585", "0.54655087", "0.5461259", "0.5455054", "0.54498696", "0.54374206", "0.5436343", "0.5430547", "0.5428643", "0.5417974", "0.5416197", "0.54159194", "0.54156363", "0.5405066", "0.54038924", "0.5403646", "0.54035306", "0.53854245", "0.5372456" ]
0.0
-1
This a False test to see if the column is selected
def testBadGetColumnSolution(self): actionlist = ["ISBN",9,8,10,"5","","1"] for action in actionlist: val = getColumnSelection(action) self.assertFalse(val)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_select_type(self) -> bool:\n row_type = self.get_type()\n return row_type.startswith('select')", "def is_select(self) -> bool:\n return self.statement.is_select", "def requires_selection(self) -> bool:\n return True", "def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]", "def is_select_one(self) -> bool:\n select_one_starts = (\n 'select_one ',\n 'select_one_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_one_starts)", "def _column_selected(self):\n #get selections from ui\n selection_index = self._ckl_columns.GetSelection()\n table_selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n #get ids\n variable_id = self._columns[selection_index][0]\n table_id = self._tables[table_selection_index][0]\n\n #get selected items - format strings only exist for selected items\n selected_items = [self._columns[index][0] for index in self._ckl_columns.GetCheckedItems()]\n\n #save previous format string (if it exists)\n if self._column_selected_previous != -1 and self._columns[self._column_selected_previous][0] in selected_items:\n format_pattern = self._entry_formatstring.GetValue()\n self._datafile.query(sciplot.database.Query(\"UPDATE TableColumn SET FormatPattern = (?) WHERE VariableID = (?) AND TableID = (?);\", [format_pattern, self._columns[self._column_selected_previous][0], table_id], 0))\n\n #load new format string if applicable\n if variable_id in selected_items:\n value = self._datafile.query(sciplot.database.Query(\"SELECT FormatPattern FROM TableColumn WHERE VariableID = (?) AND TableID = (?);\", [variable_id, table_id], 1))\n self._entry_formatstring.SetValue(value[0][0][0])\n else:\n self._entry_formatstring.SetValue(\"\")\n\n self._column_selected_previous = self._ckl_columns.GetSelection()", "def test_select_columns(self):\n self.insert()\n data = self.tbl.select()\n assert (u'id',) + tuple(data.columns) == self.tbl.columns", "def _validate_select_where(self):", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def has_column(self, column):\n if column == '*':\n return True\n for c in self.columns:\n if column == c.data.name:\n return True\n return False", "def becomes_single_column(self):\n return self.becomes_column() and not self.is_gps()", "def filter_column(col, row):\n return col == column", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def valid_column(self, col: int) -> bool:\n\n return self.check_bounds(0, col) and self.grid[0][col] == \" \"", "def selectable(cls):\n return True", "def _column_exists(self, tbname, colname):\n self._check_file(tbname)\n tb = tbtool()\n tb.open(tbname)\n cols = tb.colnames()\n tb.close()\n return (colname in cols)", "def test_no_column(self):\n\n self.assertRaises(ValueError, self.table.where, 'True')", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True", "def _column_selection_change(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n selected_columns_indexes = [self._columns[i][0] for i in list(self._ckl_columns.GetCheckedItems())]\n database_columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n\n to_add = []\n to_remove = []\n\n for i in selected_columns_indexes:\n if i not in database_columns_indexes:\n to_add.append(i)\n \n for i in database_columns_indexes:\n if i not in selected_columns_indexes:\n to_remove.append(i)\n \n queries = []\n for variable_id in to_add:\n queries.append(sciplot.database.Query(\"INSERT INTO TableColumn (TableID, VariableID, FormatPattern) VALUES ((?), (?), (?));\", [table_id, variable_id, \"*.*\"], 0)) #add new column to table with a generic format string\n \n for variable_id in to_remove:\n queries.append(sciplot.database.Query(\"DELETE FROM TableColumn WHERE VariableID = (?);\", [variable_id], 0)) #remove unselected column from the database\n \n self._datafile.query(queries)\n\n self.refresh_table() #update table to reflect the changed columns", "def column_selection_change():\n d = curdoc()\n _remove_fig(d)\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n source = d.get_model_by_name(sind)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=1)\n sel_cols = d.get_model_by_name(COLUMN_MULTISELECT).value\n columns = [ TableColumn(field=c, title=c) for c in sel_cols ]\n data_table = DataTable(source=source, columns=columns, width=500, height=500)\n table_widget = widgetbox(data_table, name=FIGURE_MODEL)\n d.add_root(table_widget)", "def checkcolumnstest(chosen_columns, chosen_df):\n if not all([item in chosen_columns for item in chosen_df.columns]):\n raise ValueError('Columns do not match')", "def IsColumnMode(self):\n return self.VertEdit.Enabled", "def select (a_data,a_column) :\n return a_data[a_column]", "def valid_col_tester(self, state):\n vert_state = self.cols(state)\n for line in vert_state:\n line_index = vert_state.index(line)\n vert_word = self.check_word(vert_state[line_index])\n if not(vert_word):\n return False\n return True", "def check_cols_methane(name):\n return True if name in ['SampleDay', 'SampleHour', 'Decimal Year',\n 'Peak Area 1', 'Peak Area 2', 'Run median', 'Daily Median'] else False", "def is_selected(self) -> bool:\r\n return self.selected", "def column_selection(type1, cat):\n col_selection = []\n for col in cat.colnames:\n if col == \"_RAJ2000\":\n continue\n if col == \"_DEJ2000\":\n continue\n desc = cat[col].info.description\n f = any([(ban in desc) for ban in BANNED_KEYWORDS])\n if f is False:\n col_selection.append(col)\n return col_selection", "def test_select_column(self):\n self.assertEqual(\"(SELECT * FROM A WHERE id > 0)\",\n grammar._NESTED_SELECT.parseString(\"(SELECT * FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"(SELECT COUNT(*) FROM A WHERE id > 0)\",\n grammar._NESTED_SELECT.parseString(\"(SELECT COUNT(*) FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"EXISTS(SELECT * FROM A WHERE id > 0)\",\n grammar._NESTED_CALL.parseString(\"EXISTS(SELECT * FROM A WHERE id > 0)\")[0])\n self.assertEqual(\"MAX(*)\",\n grammar._NESTED_CALL.parseString(\"MAX(*)\")[0])\n self.assertEqual(\"count\",\n grammar._SELECT_COLUMN.parseString(\"`count`\").name[0])\n\n self.assertEqual([\"count\", \"max\", \"id\"],\n [x[-1] for x in\n grammar._SELECT_COLUMN_LIST.parseString(\n \"(SELECT COUNT(*) FROM A) AS `count`, MAX(*) AS `max`, id\").columns])", "def autoselect(self):\n # type: () -> bool\n return self._autoselect", "def checkIfColumnControlledVocab(self, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n valid_controlled_column=0\n db_output=con.cursor().callproc('check_if_column_controlled',\n [column_name.upper(),\\\n valid_controlled_column])\n if db_output[1]==0:\n return False\n else:\n return True\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True", "def becomes_column(self):\n row_type = self.get_type()\n # Note: \"begin repeat\" does become a column\n non_columns = ('begin group', 'end group', 'end repeat')\n return row_type not in non_columns", "def select(self):\r\n pass", "def select(self):\n pass", "def select(self):\n pass", "def check_for_column(self, column_name):\n if column_name not in self.data.columns:\n raise RuntimeError(\"Source {} has no '{}' column\".format(\n self.name, column_name))", "def is_initially_selected(self, value):\n return value in self._get_selected_values_set()", "def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool:\n\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(\n c\n ) and not self.target.c.contains_column(c):\n return False\n return True", "def __contains__(self, column):\n if isinstance(column, orb.Column):\n return self.__model == column.schema().model() and self.__column == column.name()\n else:\n return column == self.__column", "def is_selected(self) -> bool:\n return self.proto.is_selected", "def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]", "def select(self):\n return", "def column_selected(self, event):\n\n selected_column = self.dropdown_menu.GetStringSelection()\n\n self.draw_hist(selected_column, self.df[selected_column])", "def is_column_load(self) -> bool:\n opts = self._orm_compile_options()\n return opts is not None and opts._for_refresh_state", "def _table_selected(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n\n #update table column selection\n columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n new_checked_items = []\n column_ids = [tup[0] for tup in self._columns]\n\n for variable_id in columns_indexes:\n new_checked_items.append(column_ids.index(variable_id))\n\n self._ckl_columns.SetCheckedItems(new_checked_items)\n\n #update displayed table data\n self.refresh_table()", "def is_target_buy_policies_grid_column_present(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-title='%s']\" % (self.target_buy_policies_grid_div_id, column_name))\n return self.is_element_present(column_locator)", "def row1_invariant(self, target_col):\n # replace with your code\n if self.lower_row_invariant(1, target_col):\n return True\n return False", "def execute_select_dataframe_columns(dataframe, select_dataframe_columns):\n\n logging.debug(\n '>>>>>>>>> Using select dataframe columns strategy <<<<<<<<<<<<')\n\n dataframe_columns = list(dataframe.columns)\n diff_selected_columns = list(\n set(select_dataframe_columns) - set(dataframe_columns))\n\n if len(diff_selected_columns) == 0:\n filtered_dataframe_columns = dataframe if len(\n select_dataframe_columns) == 0 else dataframe[select_dataframe_columns]\n else:\n raise ValueError('The provided columns [%s] does not exists.' % ', '.join(\n diff_selected_columns))\n\n return filtered_dataframe_columns", "def is_reference_rates_grid_accept_column_checkbox(self):\n is_checkbox = False\n column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-title='%s']\" % (self.vendor_price_list_detail_reference_rates_grid_div_id, self.accept_column_name))\n column_element = self.wait().until(EC.presence_of_element_located(column_locator), 'column locator not found before specified time out')\n column_index = int(column_element.get_attribute(\"data-index\")) + 1\n column_type_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::div[@class='k-grid-content']/descendant::tr/td[%s]/input\" % (self.vendor_price_list_detail_reference_rates_grid_div_id, str(column_index)))\n column_type_element = self.wait().until(EC.presence_of_element_located(column_type_locator), 'column type locator not found before specified time out')\n column_type = column_type_element.get_attribute('type')\n if column_type == \"checkbox\":\n is_checkbox = True\n return is_checkbox", "def IsColumnEditable(self, column):\r\n\r\n return self._header_win.GetColumn(column).IsEditable()", "def is_selected(self):\n return self.container['is_selected']", "def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )", "def checking_columns(dataframe, column, function=lambda x: x, handle=lambda x: x, *args, **kwargs):\n # subsets column from dataframe trying to catch exceptions\n try:\n dataframe.loc[:, column]\n return function(*args, **kwargs)\n except KeyError as e:\n print(\"No {} column found in {}\".format(column, dataframe.columns))\n return handle(e)", "def is_select(status):\n if not status:\n return False\n return status.split(None, 1)[0].lower() == 'select'", "def bad_column_positions(self, x):\n return x.is_null()", "def _select(self,X,y=None):\n return X.loc[:,self.columns], y", "def hasSelectedText(self):\n return self.textCursor().hasSelection()", "def is_choose_col(self,l,new_col):\n\t\tfor col in l:\n\t\t\tcor=np.corrcoef(col,new_col)\n\t\t\tif cor[0,1]>0.8:\n\t\t\t\treturn False\n\t\treturn True", "def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns", "def is_selected(self):\n return self._selected", "def is_selected(self):\n return self._selected", "def test_empty_cols_allowed(self):\n self.test_table.allow_empty_columns = True\n self.test_table.change_header(Path=1, SectionType=3, Value=4)\n self.assertEqual(self.test_table._header, [\"Path\", None, \"SectionType\",\n \"Value\"])", "def filter_row(col, rw):\n return rw == row", "def iscolumn(token):\n\n # Columns are not operators, logic separators, literals or sort order tokens\n return (\n token\n and not Token.isoperator(token)\n and not Token.islogicseparator(token)\n and not Token.isliteral(token)\n and not Token.issortorder(token)\n )", "def is_expected_grid_column_present(self, expected_column_name):\n grid_column_locator = (By.XPATH, \"//th[@data-title='%s']\" %(expected_column_name))\n return self.is_element_present(grid_column_locator)", "def __contains__(self, item):\r\n if isinstance(item, six.string_types):\r\n return item in self.table._columns\r\n else:\r\n return item in self", "def __is_selected_frame(self, frame_index):\n return frame_index == self.selected_index", "def is_specific_column_present(self, grid_div_id, column_name):\n specific_column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-field='%s']\" % (grid_div_id, column_name))\n return self.is_element_present(specific_column_locator)", "def validateModelCol(self):\n \n ret = False\n \n dc = self.__args['datacolumn'].upper() \n if \"MODEL\" in dc or dc == 'ALL':\n ret = True\n\n return ret", "def are_there_available_columns_to_play(self):\n available_columns = self.get_available_columns()\n return self._state.n_neutral_markers != 3 and len(available_columns) > 0", "def has_group_cols(self):\n return len(self.group_cols) != 0", "def cellSelected(self):\n\n x = self.tableWidget.currentRow()\n y = self.tableWidget.currentColumn()\n if y != self.CAT_COLUMN:\n return\n catText = str(self.tableWidget.item(x, y).text())\n #print(x,y, catText)\n self.tableWidget.item(x, y).setSelected(False)\n for row, code in enumerate(self.codes):\n if code['category'] == catText:\n self.tableWidget.item(row, self.CODE_COLUMN).setSelected(True)", "def handleTableSelectionChange(self):\n self.selectEntireRow()\n self.showSelectedDataset()", "def has_column(self, column_name):\n return column_name in self._columns", "def has_column(self, column_name):\n return column_name in self._columns", "def IsSelected(self, item):\r\n\r\n return item.IsSelected()", "def get_selected_columns(self):\n self._export_mode = 'columns'\n self._counter_update_data += 1", "def TestInput( data, options ) :\n columns = data.columns\n return all( x in columns for x in options)", "def IsSelected(self):\r\n\r\n return self._hasHilight != 0", "def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False", "def test_implicit_col(self):\n\n # If implicit columns didn't work, a ``NameError`` would be raised.\n self.assertRaises(TypeError, self.table.where, 'c_int32')\n # If overriding didn't work, no exception would be raised.\n self.assertRaises(TypeError, self.table.where,\n 'c_bool', {'c_bool': self.table.cols.c_int32})\n # External variables do not override implicit columns.\n\n def where_with_locals():\n c_int32 = self.table.cols.c_bool # this wouldn't cause an error\n self.assertIsNotNone(c_int32)\n self.table.where('c_int32')\n self.assertRaises(TypeError, where_with_locals)", "def capture(self, proposed_row, proposed_column):\n return True", "def _dataframe_column_check(df: DataFrame, compulsory_columns: Sequence) -> None:\n if not set(compulsory_columns).issubset(df.columns):\n diff = set(compulsory_columns).difference(df.columns)\n msg = (\n \"The following compulsory column(s) are missing from the \"\n f\"DataFrame: {diff}\"\n )\n raise ValueError(msg)", "def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")", "def ask_for_field(self, row, col):\n field = self.map.fields[row][col]\n # return the field kind, team, and if there is an entity or not\n return field.passable, field.team, field.entity is not None", "def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True", "def check_columns():\n global ongoing_game\n column_1 = board[0] == board[3] == board[6] != \"*\"\n column_2 = board[1] == board[4] == board[7] != \"*\"\n column_3 = board[2] == board[5] == board[8] != \"*\"\n if column_1 or column_2 or column_3:\n ongoing_game = False\n if column_1:\n return board[0]\n elif column_2:\n return board[1]\n elif column_3:\n return board[2]\n else:\n return None", "def visible(self):\r\n return self.column.visible", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def test_select_field():", "def is_country_column_present_in_re_analysis_page(self):\n return self.is_specific_column_present(self.re_analysis_grid_div_id, self.column_name_country)", "def contains_col(self, col_name):\n fmt_name = ColNameFormatter.fmt(col_name)\n col_in_solar = fmt_name in self.__solar_cols\n col_in_wind = fmt_name in self.__wind_cols\n return col_in_solar or col_in_wind", "def IsColumnShown(self, column):\r\n\r\n return self._header_win.GetColumn(column).IsShown()", "def find_selected(self):\r\n return None", "def allows_move(self, column):\n return ( (column >= 0)\n and (column < self.width)\n and (self.data[0][column] == \" \"))", "def get_first_selection(table, column_name):\n def replace(entry):\n if pd.isnull(entry):\n return None\n else:\n return re.sub(r',.*', '', entry)\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)", "def is_specific_column_on_vendor_profile_grid_present(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, 'divCustomerDialedDigit')]/descendant::a[text()='%s']\" % column_name)\n return self.is_element_present(column_locator)", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def IsColumnShown(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n\r\n return self._columns[column].IsShown()", "def allowsMove(self, col):\n try:\n int(col)\n except:\n return False\n col = int(col)\n if col in list(range(self.width)):\n if self.board[0][col] == '':\n return True\n else:\n return False\n else:\n return False" ]
[ "0.68835926", "0.6650075", "0.64459693", "0.64009845", "0.63434935", "0.6285487", "0.6278165", "0.6227736", "0.6068364", "0.606204", "0.603624", "0.59898543", "0.59765804", "0.591728", "0.5915179", "0.59128124", "0.5842638", "0.58308476", "0.5822645", "0.58154434", "0.579638", "0.5793185", "0.5745053", "0.5737983", "0.5717814", "0.56792444", "0.5677647", "0.56678784", "0.5664763", "0.5659755", "0.56333", "0.56213397", "0.5621045", "0.5613185", "0.5613185", "0.5606922", "0.56034714", "0.5591075", "0.55846584", "0.5581053", "0.5577221", "0.55414414", "0.5529179", "0.5525963", "0.55199444", "0.5518179", "0.55167365", "0.5488876", "0.54803795", "0.54720354", "0.54704124", "0.5469619", "0.54685336", "0.5447216", "0.5443348", "0.54326904", "0.5430405", "0.5428934", "0.54167217", "0.54132426", "0.54132426", "0.54131377", "0.5411186", "0.54047173", "0.54030424", "0.54026914", "0.5397852", "0.5370572", "0.53670996", "0.5365927", "0.53648776", "0.536277", "0.5357487", "0.53558886", "0.53558886", "0.5349639", "0.53492296", "0.5342312", "0.53351367", "0.53346574", "0.53343", "0.5331583", "0.5331243", "0.5327053", "0.5324374", "0.53241223", "0.532084", "0.53182596", "0.53070885", "0.53058577", "0.5302632", "0.5302281", "0.530069", "0.53006035", "0.5297137", "0.52943915", "0.5280935", "0.5280889", "0.5280303", "0.5269948" ]
0.6274609
7
First concat state `indexes`, `preds` and `target` since they were stored as lists. After that, compute list of groups that will help in keeping together predictions about the same query. Finally, for each group compute the `_metric` if the number of positive targets is at least 1, otherwise behave as specified by `self.empty_target_action`.
def compute(self) -> Tensor: if self.samples: return self.average_precisions.float() / self.total else: # pred_image_indices = torch.cat(self.pred_image_indices, dim=0) pred_probs = torch.cat(self.pred_probs, dim=0) pred_labels = torch.cat(self.pred_labels, dim=0) pred_bboxes = torch.cat(self.pred_bboxes, dim=0) # target_image_indices = torch.cat(self.target_image_indices, dim=0) target_labels = torch.cat(self.target_labels, dim=0) target_bboxes = torch.cat(self.target_bboxes, dim=0) # pred_index = torch.nonzero((pred_labels == 1)) # pred_probs = pred_probs[pred_index] # pred_bboxes = pred_bboxes[pred_index] # target_index = torch.nonzero((target_labels == 1)) # target_bboxes = target_bboxes[target_index] # _, index_sorted = torch.sort(pred_probs) # pred_bboxes = pred_bboxes[index_sorted].cpu().detach().numpy() # target_bboxes = target_bboxes.cpu().detach().numpy() pred_probs = pred_probs.cpu().detach().numpy() pred_labels = pred_labels.cpu().detach().numpy() pred_bboxes = pred_bboxes.cpu().detach().numpy() target_labels = target_labels.cpu().detach().numpy() target_bboxes = target_bboxes.cpu().detach().numpy() pred_probs = pred_probs[pred_labels == 1] pred_bboxes = pred_bboxes[pred_labels == 1] target_bboxes = target_bboxes[target_labels == 1] preds_sorted_idx = np.argsort(pred_probs)[::-1] pred_bboxes = pred_bboxes[preds_sorted_idx] x, y = calculate_precision_recall(target_bboxes, pred_bboxes) if len(x) >= 2: return auc(x, y) else: return 0 # return mean_average_precision( # pred_image_indices, # pred_probs, # pred_labels, # pred_bboxes, # target_image_indices, # target_labels, # target_bboxes, # self.iou_threshold, # self.ap_calculation, # )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_summaries(self):\n max_outputs = 3\n summaries = []\n\n # images\n # ------------------------------------------------\n summary_input_shape = image_utils.get_image_summary_shape(self._input_shape_visualisation)\n\n # input images\n input_summary_reshape = tf.reshape(self._input_values, summary_input_shape, name='input_summary_reshape')\n input_summary_op = tf.summary.image('input_images', input_summary_reshape, max_outputs=max_outputs)\n summaries.append(input_summary_op)\n\n # degraded, target and completed images, and histograms where relevant\n target = self._dual.get_op('target')\n degraded = self._dual.get_op('degraded')\n decoding_op = self.get_decoding_op()\n\n output_hist = tf.summary.histogram(\"output\", decoding_op)\n summaries.append(output_hist)\n\n input_hist = tf.summary.histogram(\"input\", self._input_values)\n summaries.append(input_hist)\n\n # network output when presented with blank\n blank_output_first = self._dual.get_op('blank_output_first')\n blank_first = tf.summary.image('blank_first', tf.reshape(blank_output_first, summary_input_shape))\n summaries.append(blank_first)\n\n blank_output_last = self._dual.get_op('blank_output_last')\n blank_last = tf.summary.image('blank_last', tf.reshape(blank_output_last, summary_input_shape))\n summaries.append(blank_last)\n \n with tf.name_scope('optimize'):\n completed_summary_reshape = tf.reshape(decoding_op, summary_input_shape, 'completed_summary_reshape')\n summaries.append(tf.summary.image('b_completed', completed_summary_reshape))\n\n if self._hparams.bt_degrade:\n degraded_summary_reshape = tf.reshape(degraded, summary_input_shape, 'degraded_summary_reshape')\n summaries.append(tf.summary.image('a_degraded', degraded_summary_reshape))\n\n target_summary_reshape = tf.reshape(target, summary_input_shape, 'target_summary_reshape')\n summaries.append(tf.summary.image('c_target', target_summary_reshape))\n\n # display slow weights as images and distributions\n with tf.name_scope('slow-weights'):\n w = self._dual.get_op('w')\n add_square_as_square(summaries, w, 'w')\n\n w_hist = tf.summary.histogram(\"w\", w)\n summaries.append(w_hist)\n\n alpha = self._dual.get_op('alpha')\n add_square_as_square(summaries, alpha, 'alpha')\n\n alpha_hist = tf.summary.histogram(\"alpha\", alpha)\n summaries.append(alpha_hist)\n\n if self._hparams.bias:\n bias = self._dual.get_op('bias')\n bias_image_shape, _ = image_utils.square_image_shape_from_1d(self._hparams.filters)\n bias_image = tf.reshape(bias, bias_image_shape, name='bias_summary_reshape')\n summaries.append(tf.summary.image('bias', bias_image))\n\n bias_hist = tf.summary.histogram(\"bias\", bias)\n summaries.append(bias_hist)\n\n # eta\n eta_op = self._dual.get_op('eta')\n eta_scalar = tf.reduce_sum(eta_op)\n eta_summary = tf.summary.scalar('eta', eta_scalar)\n summaries.append(eta_summary)\n\n # x_shift\n x_shift_op = self._dual.get_op('x_shift')\n xs_scalar = tf.reduce_sum(x_shift_op)\n xs_summary = tf.summary.scalar('x_shift', xs_scalar)\n summaries.append(xs_summary)\n\n # display fast weights (eta and hebbian), as image, scalars and histogram\n with tf.name_scope('fast-weights'):\n\n # as images\n hebb = self._dual.get_op('hebb')\n add_square_as_square(summaries, hebb, 'hebb')\n\n # as scalars\n hebb_summary = tf_build_stats_summaries_short(hebb, 'hebb')\n summaries.append(hebb_summary)\n\n # as histograms\n hebb_hist = tf.summary.histogram(\"hebb\", hebb)\n summaries.append(hebb_hist)\n\n hebb_per_neuron = tf.reduce_sum(tf.abs(hebb), 0)\n hebb_per_neuron = tf.summary.histogram('hebb_pn', hebb_per_neuron)\n summaries.append(hebb_per_neuron)\n\n # outer products\n outer_first = self._dual.get_op('outer_first')\n outer_last = self._dual.get_op('outer_last')\n add_square_as_square(summaries, outer_first, 'outer_first')\n add_square_as_square(summaries, outer_last, 'outer_last')\n\n # optimization related quantities\n with tf.name_scope('optimize'):\n # loss\n loss_op = self.get_loss_op()\n loss_summary = tf.summary.scalar('loss', loss_op)\n summaries.append(loss_summary)\n\n # losses as an image\n losses = self._dual.get_op(\"losses\")\n shape = losses.get_shape().as_list()\n volume = np.prod(shape[1:])\n losses_image_shape, _ = image_utils.square_image_shape_from_1d(volume)\n losses_image = tf.reshape(losses, losses_image_shape)\n summaries.append(tf.summary.image('losses', losses_image))\n\n input_stats_summary = tf_build_stats_summaries_short(self._input_values, 'input-stats')\n summaries.append(input_stats_summary)\n\n return summaries", "def aggregate(self, metric, target, lambd=1, theta=500):\n\n\t\t# Check that target and metric are valid and store them in memory\n\t\tassert metric in self.metrics, 'Metric is invalid: %r' % metric\n\t\t#assert target in self.targets, 'Target is invalid: %r' % target\n\t\t\n\t\t\"\"\"\n\t\tTODO: Accomodate multi-target aggregations\n\t\t\n\t\tassert isinstance(target, list)\n\t\tassert target.all() in self.targets, 'One or more targets are invalid: %r' % target\n\t\t\n\t\tif len(target) > 1:\n\t\t\tself.agg_target = \"_\".join(target)\n\t\t\tdata_array = self.data_array\n\t\t\t#.assign_coords(target=self.agg_target).expand_dims('target')\n\t\telse:\n\t\t\tself.agg_target = target\n\t\t\tdata_array = self.data_array\n\t\t\"\"\"\n\t\t\n\t\t\n\t\tself.agg_target = target\n\t\tself.agg_metric = metric\n\t\t\n\n\t\tself.rankings, self.thresholds = rank_by_feature(self.data_array, self.agg_target, self.agg_metric)\n\n\t\tr = self.rankings\n\t\tk = self.thresholds\n\n\t\tw = [np.zeros(len(self.thresholds))]\n\t\t# Compute the number of within-threshold ranking functions associated with each item\n\t\tbelow_thresh = r <= k\n\t\tnx_vec = below_thresh.sum(axis=1)\n\n\t\t# Iterating through the items (x), use \"good\" ranking functions and their rankings \n\t\tfor nx, r_i in zip(nx_vec, r*below_thresh):\n\t\t\tif nx >= theta:\n\t\t\t\tmu = sum(r_i) / nx\n\t\t\t\t# Compute the update for each feature (i) based on whether or not r_ix \n\t\t\t\t# is above threshold\n\t\t\t\tupdate = np.array([k_i + 1 if r_ix==0 else r_ix for r_ix, k_i in zip(r_i, k)])\n\t\t\t\tdelta = (update - mu)**2\n\t\t\t\t\n\t\t\t\tw.append(w[-1] + lambd * delta)\n\t\t# Normalize each w_t to sum to 1\n\t\tw = np.array([w_t / np.linalg.norm(w_t, ord=1) for w_t in w[1:]])\n\t\t# Keep the final set of weights\n\t\ttry:\n\t\t\tself.weights = w[-1]\n\t\texcept:\n\t\t\t#return nx\n\t\t\traise Exception('no features above threshold')\n\t\t\n\t\treturn {mp: wt for mp, wt in zip(self.metapaths, self.weights)}", "def _average_duplicates(outputs, target, batch_first=True):\r\n batch_size = target.size(0)\r\n reduce_dim = 1 if batch_first else 0\r\n if batch_first:\r\n outputs = outputs.view(batch_size, -1, *outputs.shape[1:])\r\n else:\r\n outputs = outputs.view(-1, batch_size, *outputs.shape[1:])\r\n outputs = outputs.mean(dim=reduce_dim)\r\n return outputs", "def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):\n batch_size = len(target_labels)\n self.batch_size_per_image = self._batch_size_per_image * batch_size\n\n target_labels_batch = torch.cat(target_labels, dim=0)\n\n positive = torch.where(target_labels_batch >= 1)[0]\n negative = torch.where(target_labels_batch == 0)[0]\n\n num_pos = self.get_num_pos(positive)\n pos_idx = self.select_positives(\n positive, num_pos, target_labels_batch, fg_probs)\n\n num_neg = self.get_num_neg(negative, num_pos)\n neg_idx = self.select_negatives(\n negative, num_neg, target_labels_batch, fg_probs)\n\n # Comb Head with sampling concatenates masks after sampling so do not split them here\n # anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]\n # return pos_idx.split(anchors_per_image, 0), neg_idx.split(anchors_per_image, 0)\n return [pos_idx], [neg_idx]", "def Allreduce4Group2(net, blobs, reduced_affix, gpu_indices):\n a, b, c, d = blobs\n gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices\n # a_reduced <- a+b, c_reduced <- c + d\n a_reduced = net.Add(\n [a, b],\n str(a) + reduced_affix,\n device_option=OnGPU(gpu_a)\n )\n c_reduced = net.Add(\n [c, d],\n str(c) + reduced_affix,\n device_option=OnGPU(gpu_c)\n )\n # copy from c_reduce(gpu_c) to c_reduce_copy(gpu_a)\n c_reduced_copy = c_reduced.Copy(\n [],\n str(c_reduced) + '_copy',\n device_option=OnGPU(gpu_a)\n )\n # a_reduced <- a_reduced + c_reduced_copy\n a_reduced = a_reduced.Add(c_reduced_copy, a_reduced, device_option=OnGPU(gpu_a))\n # broadcast a_reduced to c_reduced\n c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))\n # broadcast to b and d\n b_reduced = a_reduced.Copy(\n [],\n str(b) + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n d_reduced = c_reduced.Copy(\n [],\n str(d) + reduced_affix,\n device_option=OnGPU(gpu_d)\n )\n return a_reduced, b_reduced, c_reduced, d_reduced", "def _per_batch_set_op(set_op, x):\n x = tuple(math_ops.cast(i, dtypes.int64) for i in x)\n pred_begin, pred_end, pred_label, gold_begin, gold_end, gold_label = x\n # Combine spans together so they can be compared as one atomic unit. For\n # example:\n # If pred_begin = [0, 3, 5], pred_end = [2, 4, 6]\n # gold_begin = [0, 5], gold_end = [2, 7]\n # Then we combine the spans into:\n # pred = [[0, 2], [3, 4], [5, 6]]\n # gold = [[0, 2], [5, 7]]\n #\n # In the sets operation, we want [0, 2] to be treated as one atomic comparison\n # unit (both begin=0 and end=2 offsets must match). Conversely, partial\n # matches (like [5, 6] and [5, 7]) are not a match.\n #\n # This is done by constructing a SparseTensor (containing span begin, end,\n # label points) for predictions and labels.\n pred_begin = array_ops.expand_dims(pred_begin, 1)\n pred_end = array_ops.expand_dims(pred_end, 1)\n gold_begin = array_ops.expand_dims(gold_begin, 1)\n gold_end = array_ops.expand_dims(gold_end, 1)\n # Because the last dimension is ignored in comparisons for tf.sets operations,\n # we add an unused last dimension.\n unused_last_pred_dim = array_ops.zeros_like(pred_begin)\n unused_last_gold_dim = array_ops.zeros_like(gold_begin)\n pred_indices = array_ops.concat([pred_begin, pred_end, unused_last_pred_dim],\n 1)\n gold_indices = array_ops.concat([gold_begin, gold_end, unused_last_gold_dim],\n 1)\n\n # set_ops require the bounding shape to match. Find the bounding shape\n # with the max number\n max_shape = math_ops.reduce_max(\n array_ops.concat([pred_indices, gold_indices], 0), 0)\n max_shape = max_shape + array_ops.ones_like(max_shape)\n\n pred = sparse_tensor.SparseTensor(pred_indices, pred_label, max_shape)\n pred = sparse_ops.sparse_reorder(pred)\n gold = sparse_tensor.SparseTensor(gold_indices, gold_label, max_shape)\n gold = sparse_ops.sparse_reorder(gold)\n results = set_op(pred, gold).indices\n num_results = control_flow_ops.cond(\n array_ops.size(results) > 0,\n true_fn=lambda: array_ops.shape(results)[0],\n false_fn=lambda: constant_op.constant(0))\n return num_results", "def populate(self, batches='all', verbose=True):\n\n dk = self.name\n meta = self._meta\n data = self._data\n stack = qp.Stack(name='aggregations', add_data={dk: (data, meta)})\n batches = stack._check_batches(dk, batches)\n for name in batches:\n batch = meta['sets']['batches'][name]\n xys = batch['x_y_map']\n fs = batch['x_filter_map']\n fy = batch['y_filter_map']\n my = batch['yks']\n total_len = len(xys) + len(batch['y_on_y'])\n for idx, xy in enumerate(xys, start=1):\n x, y = xy\n if x == '@':\n if fs[y[0]] is None:\n fi = 'no_filter'\n else:\n fi = {fs[y[0]]: {fs[y[0]]: 0}}\n stack.add_link(dk, fi, x='@', y=y)\n else:\n if fs[x] is None:\n fi = 'no_filter'\n else:\n fi = {fs[x]: {fs[x]: 0}}\n stack.add_link(dk, fi, x=x, y=y)\n if verbose:\n done = float(idx) / float(total_len) *100\n print('\\r', end=' ')\n time.sleep(0.01)\n print('Batch [{}]: {} %'.format(name, round(done, 1)), end=' ')\n sys.stdout.flush()\n for idx, y_on_y in enumerate(batch['y_on_y'], len(xys)+1):\n if fy[y_on_y] is None:\n fi = 'no_filter'\n else:\n fi = {fy[y_on_y]: {fy[y_on_y]: 1}}\n stack.add_link(dk, fi, x=my[1:], y=my)\n if verbose:\n done = float(idx) / float(total_len) *100\n print('\\r', end=' ')\n time.sleep(0.01)\n print('Batch [{}]: {} %'.format(name, round(done, 1)), end=' ')\n sys.stdout.flush()\n if verbose:\n print('\\n')\n return stack", "def calculate_single_cycle_gan_metrics(source, target):\n metric_results = MetricsContainer(len(source))\n for i, x in enumerate(source):\n scores = torch.mean(torch.abs(x.view(1, -1) - target), dim=1)\n _, indices = torch.sort(scores, descending=False)\n indices = indices.cpu().numpy()\n rank = np.nonzero(indices == i)[0][0] + 1\n metric_results.update(rank)\n return metric_results.get_results()", "def smartdatasplit(target, *xs, **kw):\n random = kw[\"random\"] if \"random\" in kw else False\n keepmincount = kw[\"keepmincount\"] if \"keepmincount\" in kw else 1\n holdmincount = kw[\"holdmincount\"] if \"holdmincount\" in kw else 1\n xs = (target,) + xs\n assert([x.shape[0] for x in xs].count(xs[0].shape[0]) == len(xs))\n batsize = xs[0].shape[0]\n globcounts = {}\n # gather class usage stats\n for i in range(batsize):\n k = target[i]\n if k not in globcounts:\n globcounts[k] = 0\n globcounts[k] += 1\n # create new datas\n keepsize = 0\n holdsize = 0\n holdcounts = {}\n keepcounts = {}\n for k in globcounts:\n if globcounts[k] >= keepmincount + holdmincount:\n holdsize += holdmincount\n holdcounts[k] = holdmincount\n keepsize += globcounts[k] - holdmincount\n keepcounts[k] = globcounts[k] - holdmincount\n keepxs = [np.zeros((keepsize,) + x.shape[1:], dtype=x.dtype) for x in xs]\n holdxs = [np.zeros((holdsize,) + x.shape[1:], dtype=x.dtype) for x in xs]\n # populate datas\n idxs = np.arange(0, batsize)\n if random:\n np.random.shuffle(idxs)\n kidx = 0\n hidx = 0\n for i in range(batsize):\n idx = idxs[i]\n tidx = target[idx]\n if tidx in holdcounts:\n if holdcounts[tidx] > 0:\n holdcounts[tidx] -= 1\n for x, y in zip(holdxs, xs):\n x[kidx, ...] = y[idx, ...]\n kidx += 1\n elif keepcounts[tidx] > 0:\n keepcounts[tidx] -= 1\n for x, y in zip(keepxs, xs):\n x[hidx, ...] = y[idx, ...]\n hidx += 1\n else:\n print \"sum ting wong\"\n return tuple(keepxs), tuple(holdxs)", "def _finalize_targets(target_values, binarize_target, num_classes):\n\n target_values[target_values == target_val_utils.DEAD_STORM_INTEGER] = 0\n\n if binarize_target:\n target_values = (target_values == num_classes - 1).astype(int)\n num_classes_to_predict = 2\n else:\n num_classes_to_predict = num_classes + 0\n\n if num_classes_to_predict == 2:\n print('Fraction of {0:d} examples in positive class: {1:.3f}'.format(\n len(target_values), numpy.mean(target_values)\n ))\n return target_values\n\n target_matrix = keras.utils.to_categorical(\n target_values, num_classes_to_predict)\n\n class_fractions = numpy.mean(target_matrix, axis=0)\n print('Fraction of {0:d} examples in each class: {1:s}\\n'.format(\n len(target_values), str(class_fractions)\n ))\n\n return target_matrix", "def _binary_groups_stat_scores(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n num_groups: int,\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:\n if validate_args:\n _binary_stat_scores_arg_validation(threshold, \"global\", ignore_index)\n _binary_stat_scores_tensor_validation(preds, target, \"global\", ignore_index)\n _groups_validation(groups, num_groups)\n\n preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)\n groups = _groups_format(groups)\n\n indexes, indices = torch.sort(groups.squeeze(1))\n preds = preds[indices]\n target = target[indices]\n\n split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()\n\n group_preds = list(torch.split(preds, split_sizes, dim=0))\n group_target = list(torch.split(target, split_sizes, dim=0))\n\n return [_binary_stat_scores_update(group_p, group_t) for group_p, group_t in zip(group_preds, group_target)]", "def _groups_reduce(\n group_stats: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]\n) -> Dict[str, torch.Tensor]:\n return {f\"group_{group}\": torch.stack(stats) / torch.stack(stats).sum() for group, stats in enumerate(group_stats)}", "def _build_train_op(self):\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n\n replay_chosen_qs = []\n for i in range(len(self.gammas)):\n replay_chosen_q = tf.reduce_sum(\n self._replay_net_outputs.q_values[i] * replay_action_one_hot,\n reduction_indices=1,\n name='replay_chosen_q_{}'.format(i))\n replay_chosen_qs.append(replay_chosen_q)\n\n targets = self._build_target_q_op()\n loss = 0.\n\n for i, (target,\n replay_chosen_q) in enumerate(zip(targets, replay_chosen_qs)):\n gamma_loss = tf.losses.huber_loss(\n tf.stop_gradient(target),\n replay_chosen_q,\n reduction=tf.losses.Reduction.NONE)\n\n loss += gamma_loss\n if self.summary_writer is not None:\n tf.summary.scalar('Losses/GammaLoss_{}'.format(i),\n tf.reduce_mean(gamma_loss))\n\n # Divide by the number of gammas to preserve scale.\n loss = loss / self.number_of_gammas\n\n if self.summary_writer is not None:\n with tf.variable_scope('Losses'):\n tf.summary.scalar('HuberLoss', tf.reduce_mean(loss))\n\n def clip_if_not_none(grad, clip_norm=5.):\n \"\"\"Clip the gradient only if not None.\"\"\"\n if grad is None:\n return grad\n return tf.clip_by_norm(grad, clip_norm)\n\n if self.gradient_clipping_norm is not None:\n # Clip gradients to test stability.\n grads_and_vars = self.optimizer.compute_gradients(tf.reduce_mean(loss))\n clipped_gradients = [\n (clip_if_not_none(grad, clip_norm=self.gradient_clipping_norm), var)\n for grad, var in grads_and_vars\n ]\n\n return self.optimizer.apply_gradients(clipped_gradients)\n else:\n return self.optimizer.minimize(tf.reduce_mean(loss))", "def estimate_metrics(\n self,\n all_labels,\n all_preds\n ):\n n_predictions = len(all_preds)\n\n for metric in self.metrics:\n # report everything but loss\n if metric.__name__ is not \"loss\":\n if isinstance(all_preds[0], list):\n result = np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])\n else:\n result = metric(all_labels, all_preds)\n \n if metric.__name__ in self.multi_batch_metrics:\n self.multi_batch_metrics[metric.__name__].append(result)\n self.multi_batch_metrics[\"len_\" + metric.__name__].append(\n n_predictions)\n else:\n self.multi_batch_metrics[metric.__name__] = [result]\n self.multi_batch_metrics[\"len_\" + metric.__name__] = [n_predictions]", "def batch_statistics(outputs, targets, iou_threshold):\n batch_metrics = []\n for sample_i in range(len(outputs)):\n\n if outputs[sample_i] is None:\n continue\n\n if outputs[sample_i]['boxes'] is None:\n continue\n\n output = outputs[sample_i]\n pred_boxes = output['boxes']\n pred_scores = output['scores'] if len(output['scores'].shape) == 1 else output['scores'][:,0]\n pred_labels = output['labels'] if len(output['labels'].shape) == 1 else output['labels'][:,0]\n\n true_positives = np.zeros(pred_boxes.shape[0])\n\n annotations = targets[sample_i]\n target_labels = annotations['labels']\n target_boxes = annotations['boxes']\n\n if len(target_boxes):\n detected_boxes = []\n\n for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)):\n\n # If targets are found break\n if len(detected_boxes) == len(pred_boxes):\n break\n\n # Ignore if label is not one of the target labels\n if pred_label not in target_labels:\n continue\n\n iou, box_index = bbox_iou(pred_box.unsqueeze(0), target_boxes).max(0)\n if iou >= iou_threshold and box_index not in detected_boxes:\n true_positives[pred_i] = 1\n detected_boxes += [box_index]\n batch_metrics.append([true_positives, pred_scores.cpu(), pred_labels.cpu()])\n return batch_metrics", "def getResult(targets, i=None):", "def _build_summary_op(self, results=None, features=None, labels=None):\n summary_op = []\n for summary in self.summaries:\n if summary == summarizer.SummaryOptions.ACTIVATIONS:\n activations = get_tracked(tf.GraphKeys.ACTIVATIONS)\n summary_op += summarizer.add_activations_summary(activations)\n elif summary == summarizer.SummaryOptions.VARIABLES:\n variables = tf.trainable_variables()\n summary_op += summarizer.add_trainable_vars_summary(variables)\n elif summary == summarizer.SummaryOptions.GRADIENTS and self._clip_gradients > 0.0:\n summary_op += summarizer.add_gradients_summary(self._grads_and_vars)\n elif summary == summarizer.SummaryOptions.LOSS:\n summary_op += summarizer.add_loss_summaries(self._total_loss, self._loss)\n elif summary == summarizer.SummaryOptions.LEARNING_RATE:\n summary_op += summarizer.add_learning_rate_summaries()\n elif summary == summarizer.SummaryOptions.IMAGE_INPUT:\n summary_op += summarizer.add_image_summary(features, op_name='inputs')\n elif summary == summarizer.SummaryOptions.IMAGE_RESULT:\n summary_op += summarizer.add_image_summary(results, op_name='results')\n\n # no need to tf.summary.merge(summary_op), for now we merge all at hook level\n return summary_op", "def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):\n anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]\n fg_probs = fg_probs.split(anchors_per_image, 0)\n\n pos_idx = []\n neg_idx = []\n for img_labels, img_fg_probs in zip(target_labels, fg_probs):\n positive = torch.where(img_labels >= 1)[0]\n negative = torch.where(img_labels == 0)[0]\n\n num_pos = self.get_num_pos(positive)\n pos_idx_per_image_mask = self.select_positives(\n positive, num_pos, img_labels, img_fg_probs)\n pos_idx.append(pos_idx_per_image_mask)\n\n num_neg = self.get_num_neg(negative, num_pos)\n neg_idx_per_image_mask = self.select_negatives(\n negative, num_neg, img_labels, img_fg_probs)\n neg_idx.append(neg_idx_per_image_mask)\n\n return pos_idx, neg_idx", "def update(self, preds: Tensor, target: Tensor, groups: Tensor) -> None:\n if self.task == \"demographic_parity\":\n if target is not None:\n rank_zero_warn(\"The task demographic_parity does not require a target.\", UserWarning)\n target = torch.zeros(preds.shape)\n\n group_stats = _binary_groups_stat_scores(\n preds, target, groups, self.num_groups, self.threshold, self.ignore_index, self.validate_args\n )\n\n self._update_states(group_stats)", "def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):\n anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]\n fg_probs = fg_probs.split(anchors_per_image, 0)\n\n pos_idx = []\n neg_idx = []\n for img_labels, img_fg_probs in zip(target_labels, fg_probs):\n negative = torch.where(img_labels == 0)[0]\n\n # positive anchor sampling\n pos_idx_per_image_mask = (img_labels >= 1).to(dtype=torch.uint8)\n pos_idx.append(pos_idx_per_image_mask)\n\n num_neg = int(self.negative_ratio * pos_idx_per_image_mask.sum())\n # protect against not enough negative examples and sample at least one neg if possible\n num_neg = min(negative.numel(), max(num_neg, 1))\n neg_idx_per_image_mask = self.select_negatives(\n negative, num_neg, img_labels, img_fg_probs)\n neg_idx.append(neg_idx_per_image_mask)\n\n return pos_idx, neg_idx", "def _squad_update(preds: Dict[str, str], target: List[Dict[str, List[Dict[str, List[Dict[str, Any]]]]]]) ->Tuple[Tensor, Tensor, Tensor]:\n f1 = tensor(0.0)\n exact_match = tensor(0.0)\n total = tensor(0)\n for article in target:\n for paragraph in article['paragraphs']:\n for qa in paragraph['qas']:\n total += 1\n if qa['id'] not in preds:\n rank_zero_warn(f\"Unanswered question {qa['id']} will receive score 0.\")\n continue\n ground_truths = list(map(lambda x: x['text'], qa['answers']))\n pred = preds[qa['id']]\n exact_match += _metric_max_over_ground_truths(_compute_exact_match_score, pred, ground_truths)\n f1 += _metric_max_over_ground_truths(_compute_f1_score, pred, ground_truths)\n return f1, exact_match, total", "def metrics_group():", "def compute_batch_metrics(y_true, y_pred, num_labels = 4): \n \n # Declarating list to store results\n acc = []\n pre = []\n rec = []\n det = []\n rmse = []\n \n for batch in np.arange(y_true.shape[0]):\n \n # Declarating list to store individual results\n batch_acc = []\n batch_pre = []\n batch_rec = []\n batch_det = []\n batch_rmse = []\n \n for label in np.arange(num_labels):\n \n # Computing and storing metrics for each class\n batch_acc.append(accuracy_score(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_pre.append(precision_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_rec.append(recall_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_det.append(detection_rate(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_rmse.append(sqrt(mse(y_true[batch, label, :], y_pred[batch, label, :])))\n \n # Storing mean results of the instance\n acc.append(np.mean(batch_acc))\n pre.append(np.mean(batch_pre))\n rec.append(np.mean(batch_rec))\n det.append(np.mean(batch_det))\n rmse.append(np.mean(batch_rmse))\n \n # Returning mean of all results\n return np.mean(acc), np.mean(pre), np.mean(rec), np.mean(det), np.mean(rmse)", "def __init__(\n self,\n queries: Tensor,\n query_labels: Sequence[int],\n targets: Tensor,\n target_labels: Sequence[int],\n known_classes: IntTensor,\n distance: str = 'cosine',\n metrics: Sequence[Union[str, ClassificationMetric]] = [\n 'binary_accuracy', 'f1score'\n ], # noqa\n tb_logdir: str = None,\n k: int = 1,\n matcher: Union[str, ClassificationMatch] = 'match_nearest',\n distance_thresholds: Optional[FloatTensor] = None):\n super().__init__()\n self.targets = targets\n self.target_labels = target_labels\n self.distance = distance\n self.evaluator = MemoryEvaluator()\n # typing requires this weird formulation of creating a new list\n self.metrics: List[ClassificationMetric] = ([\n make_classification_metric(m) for m in metrics\n ])\n self.k = k\n self.matcher = matcher\n if distance_thresholds is not None:\n self.distance_thresholds = distance_thresholds\n else:\n self.distance_thresholds = tf.constant([math.inf])\n\n if tb_logdir:\n tb_logdir = str(Path(tb_logdir) / 'index/')\n self.tb_writer = tf.summary.create_file_writer(tb_logdir)\n print('TensorBoard logging enable in %s' % tb_logdir)\n else:\n self.tb_writer = None\n\n query_labels = tf.convert_to_tensor(query_labels)\n query_labels = tf.cast(query_labels, dtype='int32')\n\n # Create separate validation sets for the known and unknown classes\n known_classes = tf.cast(known_classes, dtype='int32')\n known_classes = tf.reshape(known_classes, (-1))\n\n # Use broadcasting to do a y X known_classes equality check. By adding\n # a dim to the start of known_classes and a dim to the end of y, this\n # essentially checks `for ck in known_classes: for cy in y: ck == cy`.\n # We then reduce_any to find all rows in y that match at least one\n # class in known_classes.\n # See https://numpy.org/doc/stable/user/basics.broadcasting.html\n broadcast_classes = tf.expand_dims(known_classes, axis=0)\n broadcast_labels = tf.expand_dims(query_labels, axis=-1)\n known_mask = tf.math.reduce_any(broadcast_classes == broadcast_labels,\n axis=1)\n known_idxs = tf.squeeze(tf.where(known_mask))\n unknown_idxs = tf.squeeze(tf.where(~known_mask))\n\n with tf.device(\"/cpu:0\"):\n self.queries_known = tf.gather(queries, indices=known_idxs)\n self.query_labels_known = (tf.gather(query_labels,\n indices=known_idxs))\n # Expand to 2D if we only have a single example\n if tf.rank(self.queries_known) == 1:\n self.queries_known = (tf.expand_dims(self.queries_known,\n axis=0))\n self.query_labels_known = (tf.expand_dims(\n self.query_labels_known, axis=0))\n\n self.queries_unknown = tf.gather(queries, indices=unknown_idxs)\n self.query_labels_unknown = (tf.gather(query_labels,\n indices=unknown_idxs))\n # Expand to 2D if we only have a single example\n if tf.rank(self.queries_unknown) == 1:\n self.queries_unknown = (tf.expand_dims(self.queries_unknown,\n axis=0))\n self.query_labels_unknown = (tf.expand_dims(\n self.query_labels_unknown, axis=0))", "def _index_group_with_subgroup(self, **kwargs):\n\n log.setLevel(self.log_level)\n # get a list of all the uri to index\n uri_list = kwargs.get('uri_list', self.get_uri_list())\n if not uri_list:\n log.info(\"0 items to index\")\n return\n # results = results[:100]\n # Start processing through uri\n batch_file = os.path.join(CFG.dirs.logs, \"batch_list.txt\")\n # with open(batch_file, \"w\") as fo:\n # fo.write(\"{\")\n log.info(\"'%s' items to index\", len(uri_list))\n self.time_start = datetime.datetime.now()\n batch_size = kwargs.get(\"batch_size\", 12000)\n if len(uri_list) > batch_size:\n batch_end = batch_size\n else:\n batch_end = len(uri_list)\n batch_start = 0\n batch_num = 1\n self.batch_data = {}\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n self.batch_uris = {}\n self.batch_uris[batch_num] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n end = False\n last = False\n final_list = []\n expand_index = kwargs.get(\"expand_index\", True)\n while not end:\n log.debug(\"batch %s: %s-%s\", batch_num, batch_start, batch_end)\n sub_batch = []\n j = 0\n for i in range(batch_start, batch_end):\n # for i, subj in enumerate(uri_list[batch_start:batch_end]):\n qry_size = kwargs.get(\"qry_size\", 1000)\n if j < qry_size:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n if j == qry_size -1 or i == batch_end - 1:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n # with open(batch_file, \"a\") as fo:\n # fo.write(json.dumps({str('%s-%s' % (batch_num, i+1)):\n # [item[0].sparql\n # for item in sub_batch]})[1:-1]+\",\\n\")\n if not kwargs.get(\"no_threading\", False):\n th = threading.Thread(name=batch_start + i + 1,\n target=self._index_sub,\n args=(sub_batch,\n i+1,\n batch_num,))\n th.start()\n else:\n self._index_sub(sub_batch, i+1, batch_num)\n j = 0\n final_list += sub_batch\n sub_batch = []\n else:\n j += 1\n log.debug(datetime.datetime.now() - self.time_start)\n if not kwargs.get(\"no_threading\", False):\n main_thread = threading.main_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n action_list = []\n for key, items in self.batch_data[batch_num].items():\n if key == 'main':\n es_worker = self.es_worker\n else:\n es_worker = self.other_indexers[key]\n action_list += es_worker.make_action_list(items)\n result = self.es_worker.bulk_save(action_list)\n final_list += self.batch_uris[batch_num]\n self._update_triplestore(result, action_list)\n del action_list\n del self.batch_uris[batch_num]\n del self.batch_data[batch_num]\n try:\n del pyrdf.memorized\n pyrdf.memorized = {}\n except AttributeError:\n pass\n while gc.collect() > 0:\n pass\n # pdb.set_trace()\n batch_end += batch_size\n batch_start += batch_size\n if last:\n end = True\n if len(uri_list) <= batch_size:\n batch_end = len(uri_list)\n last = True\n batch_num += 1\n self.batch_uris[batch_num] = []\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n log.debug(datetime.datetime.now() - self.time_start)\n # with open(batch_file, 'rb+') as fo:\n # fo.seek(-2, os.SEEK_END)\n # fo.truncate()\n # # fo.close()\n # fo.write(\"}\".encode())", "def test_accumulation(preds, targets, exact_match, f1):\n squad_metric = SQuAD()\n for pred, target in zip(preds, targets):\n squad_metric.update(preds=[pred], target=[target])\n metrics_score = squad_metric.compute()\n\n _assert_tensor(metrics_score[\"exact_match\"])\n _assert_tensor(metrics_score[\"f1\"])\n _assert_allclose(metrics_score[\"exact_match\"], torch.mean(torch.tensor(exact_match)))\n _assert_allclose(metrics_score[\"f1\"], torch.mean(torch.tensor(f1)))", "def equal_opportunity(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Dict[str, torch.Tensor]:\n num_groups = torch.unique(groups).shape[0]\n group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)\n\n transformed_group_stats = _groups_stat_transform(group_stats)\n\n return _compute_binary_equal_opportunity(**transformed_group_stats)", "def get_ddqn_targets(qsa_target, q_targets, mask, estimator, next_states):\n with torch.no_grad():\n next_q_values = estimator(next_states)\n argmax_actions = next_q_values.max(1, keepdim=True)[1]\n qsa_target[mask] = q_targets.gather(1, argmax_actions)\n return qsa_target", "def make_discrete(n_rows_population=500, \n n_rows_peripheral=125000, \n random_state=None,\n aggregation=aggregations.Count):\n random = np.random.RandomState(random_state)\n\n population_table = pd.DataFrame()\n population_table[\"column_01\"] = random.randint(0, 10, n_rows_population).astype(np.str)\n population_table[\"join_key\"] = np.arange(n_rows_population)\n population_table[\"time_stamp_population\"] = random.rand(n_rows_population)\n\n peripheral_table = pd.DataFrame()\n peripheral_table[\"column_01\"] = random.randint(-11, 11, n_rows_peripheral)\n peripheral_table[\"join_key\"] = random.randint(0, n_rows_population, n_rows_peripheral) \n peripheral_table[\"time_stamp_peripheral\"] = random.rand(n_rows_peripheral)\n\n # Compute targets\n temp = peripheral_table.merge(\n population_table[[\"join_key\", \"time_stamp_population\"]],\n how=\"left\",\n on=\"join_key\"\n )\n\n # Apply some conditions\n temp = temp[\n (temp[\"time_stamp_peripheral\"] <= temp[\"time_stamp_population\"]) &\n (temp[\"column_01\"] > 0.0)\n ]\n\n # Define the aggregation\n temp = _aggregate(temp, aggregation, \"column_01\", \"join_key\")\n\n temp = temp.rename(index=str, columns={\"column_01\": \"targets\"})\n\n population_table = population_table.merge(\n temp,\n how=\"left\",\n on=\"join_key\"\n )\n\n del temp\n\n population_table = population_table.rename(\n index=str, columns={\"time_stamp_population\": \"time_stamp\"})\n\n peripheral_table = peripheral_table.rename(\n index=str, columns={\"time_stamp_peripheral\": \"time_stamp\"})\n\n # Replace NaN targets with 0.0 - target values may never be NaN!.\n population_table.targets = np.where(\n np.isnan(population_table['targets']), \n 0, \n population_table['targets'])\n\n return population_table, peripheral_table", "def compute(self, recs, truth, *, include_missing=False):\n _log.info('analyzing %d recommendations (%d truth rows)', len(recs), len(truth))\n\n rec_key, truth_key = _df_keys(recs.columns, truth.columns, self.group_cols)\n\n t_ident, t_data = self._number_truth(truth, truth_key)\n r_ident, r_data = self._number_recs(recs, truth_key, rec_key, t_ident)\n\n timer = Stopwatch()\n\n _log.info('collecting metric results')\n bulk_res = []\n ind_metrics = []\n for mf, mn, margs in self.metrics:\n if hasattr(mf, 'bulk_score') and 'rank' in r_data.columns:\n _log.debug('bulk-scoring %s', mn)\n mbs = mf.bulk_score(r_data, t_data, **margs).to_frame(name=mn)\n assert mbs.index.name == 'LKRecID'\n bulk_res.append(mbs)\n else:\n ind_metrics.append((mf, mn, margs))\n if bulk_res:\n bulk_res = ft.reduce(lambda df1, df2: df1.join(df2, how='outer'), bulk_res)\n else:\n bulk_res = None\n\n def worker(rdf):\n rk, tk = rdf.name\n tdf = t_data.loc[tk]\n res = pd.Series(dict((mn, mf(rdf, tdf, **margs)) for (mf, mn, margs) in ind_metrics))\n return res\n\n if ind_metrics:\n _log.debug('applying individual metrics')\n groups = r_data.groupby(['LKRecID', 'LKTruthID'], sort=False)\n if hasattr(groups, 'progress_apply'):\n ind_res = groups.progress_apply(worker)\n else:\n ind_res = groups.apply(worker)\n ind_res = ind_res.reset_index('LKTruthID', drop=True)\n\n if bulk_res is not None:\n res = bulk_res.join(ind_res)\n else:\n res = ind_res\n else:\n res = bulk_res\n\n _log.debug('transforming results')\n res = r_ident.join(res, on='LKRecID').drop(columns=['LKRecID', 'LKTruthID'])\n\n _log.info('measured %d lists in %s', len(res), timer)\n\n if include_missing:\n _log.info('filling in missing user info (%d initial rows)', len(res))\n ug_cols = [c for c in rec_key if c not in truth_key]\n tcount = truth.groupby(truth_key)['item'].count().to_frame('ntruth')\n _log.debug('truth counts:\\n%s', tcount)\n if ug_cols:\n _log.debug('regrouping by %s to fill', ug_cols)\n _log.debug('pre-group series:\\n%s', res)\n\n rdict = {}\n\n for key, df in res.groupby(ug_cols):\n df2 = df.drop(columns=ug_cols).join(tcount, how='outer', on=truth_key)\n rdict[key] = df2\n\n res = pd.concat(rdict, names=ug_cols)\n _log.debug('joined result:\\n%s', res)\n res = res.reset_index(ug_cols)\n res.reset_index(inplace=True, drop=True)\n _log.debug('final joined result:\\n%s', res)\n\n else:\n _log.debug('no ungroup cols, directly merging to fill')\n res = res.join(tcount, how='outer', on=truth_key)\n _log.debug('final columns: %s', res.columns)\n _log.debug('index levels: %s', res.index.names)\n _log.debug('expanded to %d rows', len(res))\n res['ntruth'] = res['ntruth'].fillna(0)\n res['nrecs'] = res['nrecs'].fillna(0)\n\n return res.set_index(rec_key)", "def per_target_transform(y_pred, y_true):\r\n # - `y_pred` must be in the following shape (batch_size, num_categories, ...), float32 possibility\r\n # - `y_true` must be in the following shape (batch_size, ...) or (batch_size, num_categories, ...), int64.\r\n assert y_pred.ndim - y_true.ndim in [0, 1]\r\n assert y_pred.ndim > 2, 'only image can be transformed to per_target metric'\r\n\r\n device = y_pred.device\r\n num_classes = y_pred.shape[1]\r\n assert num_classes == 2, 'now only support binary classes'\r\n\r\n # reduce num_categories axis\r\n if y_pred.ndim == y_true.ndim:\r\n y_true = torch.argmax(y_true, 1)\r\n y_pred = torch.argmax(y_pred, 1)\r\n\r\n def _is_match(center_1, area_1, center_2, area_2):\r\n ndim = len(center_1)\r\n if sum([(center_1[i] - center_2[i]) ** 2 for i in range(ndim)]) ** 0.5 < (\r\n 0.62 * (area_1 ** (1 / ndim) + area_2 ** (1 / ndim))): # for 3d case using 0.62 factor\r\n return True\r\n return False\r\n\r\n per_target_preds = []\r\n per_target_trues = []\r\n # split batch\r\n for y_p, y_t in zip(y_pred, y_true):\r\n assert y_p.shape == y_t.shape\r\n # pred Morph Close\r\n y_p = torch.unsqueeze(torch.unsqueeze(y_p, 0), 0).type(torch.float32)\r\n kernel_size = 7\r\n padding = 3\r\n # Dilated\r\n y_p = torch.nn.MaxPool3d(kernel_size, stride=1, padding=padding)(y_p)\r\n # Eroded\r\n y_p = 1.0 - torch.nn.MaxPool3d(kernel_size, stride=1, padding=padding)(1.0 - y_p)\r\n y_p = torch.squeeze(torch.squeeze(y_p, 0), 0).type(torch.int64)\r\n\r\n y_p = y_p.detach().cpu().numpy()\r\n y_t = y_t.detach().cpu().numpy()\r\n region_area_threshold = 10\r\n y_p_label = measure.label(y_p)\r\n y_p_props = measure.regionprops(y_p_label)\r\n y_p_props = [item for item in y_p_props if item.area > region_area_threshold] # reduce small noise\r\n y_t_label = measure.label(y_t)\r\n y_t_props = measure.regionprops(y_t_label)\r\n y_t_props = [item for item in y_t_props if item.area > region_area_threshold] # reduce small noise\r\n\r\n t_matches = []\r\n target_pred = []\r\n target_true = []\r\n for i in range(len(y_p_props)):\r\n i_match = False\r\n for j in range(len(y_t_props)):\r\n if _is_match(y_p_props[i].centroid, y_p_props[i].area, y_t_props[j].centroid, y_t_props[j].area):\r\n i_match = True\r\n t_matches.append(j)\r\n if not i_match: # false positive\r\n target_pred.append(1)\r\n target_true.append(0)\r\n t_matches = set(t_matches)\r\n for _ in range(len(t_matches)): # true positive\r\n target_pred.append(1)\r\n target_true.append(1)\r\n for _ in range(len(y_t_props) - len(t_matches)): # false negative\r\n target_pred.append(0)\r\n target_true.append(1)\r\n\r\n per_target_preds.append(target_pred)\r\n per_target_trues.append(target_true)\r\n max_len = max([len(item) for item in per_target_preds])\r\n if max_len == 0:\r\n max_len = 1 # add one true negative if no targets\r\n for i in range(len(per_target_preds)):\r\n for _ in range(max_len - len(per_target_preds[i])): # pseudo true negative to unify batch len\r\n per_target_preds[i].append(0)\r\n per_target_trues[i].append(0)\r\n per_target_preds = torch.tensor(per_target_preds, dtype=torch.int64, device=device)\r\n per_target_trues = torch.tensor(per_target_trues, dtype=torch.int64, device=device)\r\n per_target_preds = one_hot(per_target_preds, 2, axis=1)\r\n per_target_trues = one_hot(per_target_trues, 2, axis=1)\r\n return per_target_preds, per_target_trues", "def calculate_rank_metrics(model, evaluation_data, train_data, opt_data, filtered=False):\n model.eval()\n with torch.no_grad():\n all_hashed = None\n if filtered:\n all_triples = np.concatenate((train_data, opt_data, evaluation_data), axis=0)\n # hash triples to compare them efficiently (not possible with torch tensors)\n all_hashed = np.apply_along_axis(hash_triples, 1, all_triples)\n\n entity_number = model.e_num\n metric_results = MetricsContainer(len(evaluation_data))\n all_entities = np.arange(entity_number).reshape(-1, 1)\n if model.device.type == \"cuda\":\n evaluation_data = tqdm(evaluation_data) # show progress bar for large datasets\n for x in evaluation_data:\n # corrupt evaluation triples by replacing both head and tail with all other entities\n corrupted_subjects_tail = np.repeat([x[1:]], entity_number, axis=0)\n corrupted_subjects = np.concatenate([all_entities, corrupted_subjects_tail], axis=1)\n corrupted_objects_head = np.repeat([x[:2]], entity_number, axis=0)\n corrupted_objects = np.concatenate([corrupted_objects_head, all_entities], axis=1)\n corrupted_triples = np.concatenate([corrupted_subjects, corrupted_objects], axis=0)\n\n if all_hashed is not None:\n data_hashed = np.apply_along_axis(hash_triples, 1, corrupted_triples)\n valid_indices = np.in1d(data_hashed, all_hashed, invert=True)\n data = corrupted_triples[valid_indices]\n # Add the test triple x which was removed by the filter\n data = np.append(data, [x], axis=0)\n index = len(data)-1\n else:\n # Remove duplicate occurrence of test triple x\n mask_index = np.nonzero(all_entities == x[2])[0][0]\n mask = np.ones(len(corrupted_triples), dtype=bool)\n mask[entity_number + mask_index] = False\n data = corrupted_triples[mask]\n index = np.nonzero(all_entities == x[0])[0][0]\n\n # score the evaluation triple and the corrupted triples and calculate the rank of the evaluation triple\n data = torch.tensor(data, dtype=torch.long, device=model.device)\n scores = model.score_triples(data).detach().flatten()\n _, indices = torch.sort(scores, descending=False)\n indices = indices.cpu().numpy()\n rank = np.nonzero(indices == index)[0][0] + 1\n metric_results.update(rank)\n return metric_results.get_results()", "def update(self, preds: Tensor, target: Tensor, groups: Tensor) -> None:\n group_stats = _binary_groups_stat_scores(\n preds, target, groups, self.num_groups, self.threshold, self.ignore_index, self.validate_args\n )\n\n self._update_states(group_stats)", "def get_targets(self,\n anchor_list: List[List[Tensor]],\n valid_flag_list: List[List[Tensor]],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None,\n unmap_outputs: bool = True) -> tuple:\n num_imgs = len(batch_img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if batch_gt_instances_ignore is None:\n batch_gt_instances_ignore = [None] * num_imgs\n (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n all_bbox_weights, pos_inds_list, neg_inds_list,\n sampling_results_list) = multi_apply(\n self._get_targets_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore,\n unmap_outputs=unmap_outputs)\n # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n # When using sampling method, avg_factor is usually the sum of\n # positive and negative priors. When using `PseudoSampler`,\n # `avg_factor` is usually equal to the number of positive priors.\n avg_factor = sum(\n [results.avg_factor for results in sampling_results_list])\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets,\n num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights,\n num_level_anchors)\n return (anchors_list, labels_list, label_weights_list,\n bbox_targets_list, bbox_weights_list, avg_factor)", "def _loss(self, nbrs, nbrs_y, query, query_y):\n\n def get_idx(q_nbrs, q_mem):\n \"\"\"Gets the index of sample in memory for computing loss.\n\n We first look to see if the query label can be found in the\n retrieved neighbours, and if not, look to memory for a key with\n the same value.\n\n We keep track of a boolean mask, which indicates whether or not we\n were able to find a sample with a label that matches the query.\n \"\"\"\n\n # Whether a matching sample can be found in neighbours or memory\n any_match_nbrs = T.any(q_nbrs, axis=1)\n any_match_mem = T.any(q_mem, axis=1)\n any_match = T.or_(any_match_nbrs, any_match_mem)\n\n # Look in neighbours then memory for corresponding sample.\n # If from neighbours, we need to retrieve the full mem idx.\n rows = T.arange(nbrs.shape[0])\n idx = T.switch(any_match_nbrs,\n nbrs[rows, tensor_choose_k(q_nbrs, self.rng, k=1)],\n tensor_choose_k(q_mem, self.rng, k=1, random=True))\n\n return (idx, any_match)\n\n # Make the labels broadcastable for indexing\n query_y_2d = T.reshape(query_y, (-1, 1))\n\n query_in_nbrs = T.eq(query_y_2d, nbrs_y) #(n_queries, self.k_nbrs)\n query_in_mem = T.eq(query_y_2d, T.reshape(self.V, (1, -1)))\n\n positive = get_idx(query_in_nbrs, query_in_mem)\n pos_loss = T.sum(query*self.K[positive[0]], axis=1)*positive[1]\n\n negative = get_idx(T.invert(query_in_nbrs), T.invert(query_in_mem))\n neg_loss = T.sum(query*self.K[negative[0]], axis=1)*negative[1]\n\n # Only return the positive components\n return T.maximum(0, neg_loss - pos_loss + self.alpha)", "def _build_train_op(self):\n batch_size = tf.shape(self._replay.rewards)[0]\n\n target_quantile_values = tf.stop_gradient(\n self._build_target_quantile_values_op())\n # Reshape to self.num_tau_prime_samples x batch_size x 1 since this is\n # the manner in which the target_quantile_values are tiled.\n target_quantile_values = tf.reshape(target_quantile_values,\n [self.num_tau_prime_samples,\n batch_size, 1])\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_tau_prime_samples x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of target_quantile_values:\n # batch_size x num_tau_prime_samples x 1.\n target_quantile_values = tf.transpose(target_quantile_values, [1, 0, 2])\n\n # Shape of indices: (num_tau_samples x batch_size) x 1.\n # Expand dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n indices = tf.range(self.num_tau_samples * batch_size)[:, None]\n\n # Expand the dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n reshaped_actions = self._replay.actions[:, None]\n reshaped_actions = tf.tile(reshaped_actions, [self.num_tau_samples, 1])\n # Shape of reshaped_actions: (num_tau_samples x batch_size) x 2.\n reshaped_actions = tf.concat([indices, reshaped_actions], axis=1)\n\n chosen_action_quantile_values = tf.gather_nd(\n self._replay_net_quantile_values, reshaped_actions)\n # Reshape to self.num_tau_samples x batch_size x 1 since this is the manner\n # in which the quantile values are tiled.\n chosen_action_quantile_values = tf.reshape(chosen_action_quantile_values,\n [self.num_tau_samples,\n batch_size, 1])\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_tau_samples x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of chosen_action_quantile_values:\n # batch_size x num_tau_samples x 1.\n chosen_action_quantile_values = tf.transpose(\n chosen_action_quantile_values, [1, 0, 2])\n\n # Shape of bellman_erors and huber_loss:\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n bellman_errors = target_quantile_values[\n :, :, None, :] - chosen_action_quantile_values[:, None, :, :]\n # The huber loss (see Section 2.3 of the paper) is defined via two cases:\n # case_one: |bellman_errors| <= kappa\n # case_two: |bellman_errors| > kappa\n huber_loss_case_one = (\n tf.cast(tf.abs(bellman_errors) <= self.kappa, tf.float32) *\n 0.5 * bellman_errors ** 2)\n huber_loss_case_two = (\n tf.cast(tf.abs(bellman_errors) > self.kappa, tf.float32) *\n self.kappa * (tf.abs(bellman_errors) - 0.5 * self.kappa))\n huber_loss = huber_loss_case_one + huber_loss_case_two\n\n # Reshape replay_quantiles to batch_size x num_tau_samples x 1\n replay_quantiles = tf.reshape(\n self._replay_net_quantiles, [self.num_tau_samples, batch_size, 1])\n replay_quantiles = tf.transpose(replay_quantiles, [1, 0, 2])\n\n # Tile by num_tau_prime_samples along a new dimension. Shape is now\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n # These quantiles will be used for computation of the quantile huber loss\n # below (see section 2.3 of the paper).\n replay_quantiles = tf.cast(\n tf.tile(replay_quantiles[:, None, :, :],\n [1, self.num_tau_prime_samples, 1, 1]), tf.float32)\n # Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.\n quantile_huber_loss = (tf.abs(replay_quantiles - tf.stop_gradient(\n tf.cast(bellman_errors < 0, tf.float32))) * huber_loss) / self.kappa\n # Sum over current quantile value (num_tau_samples) dimension,\n # average over target quantile value (num_tau_prime_samples) dimension.\n # Shape: batch_size x num_tau_prime_samples x 1.\n loss = tf.reduce_sum(quantile_huber_loss, axis=2)\n # Shape: batch_size x 1.\n loss = tf.reduce_mean(loss, axis=1)\n\n update_priorities_op = tf.no_op()\n with tf.control_dependencies([update_priorities_op]):\n if self.summary_writer is not None:\n with tf.variable_scope('Losses'):\n tf.summary.scalar('QuantileLoss', tf.reduce_mean(loss))\n return self.optimizer.minimize(tf.reduce_mean(loss)), tf.reduce_mean(loss)", "def precomp_target_queries(self, triplet_queries):\n self.eval()\n\n triplet_queries_idx = np.zeros((len(triplet_queries),3), dtype=np.int)\n queries_sro = Variable(torch.zeros(len(triplet_queries),3)).long()\n\n for count,triplet_query in enumerate(triplet_queries):\n\n subjectname, predicate, objectname = triplet_query.split('-')\n sub_cat = self.classes.word2idx[subjectname]\n obj_cat = self.classes.word2idx[objectname]\n rel_cat = self.predicates.word2idx[predicate]\n\n triplet_queries_idx[count,0] = sub_cat\n triplet_queries_idx[count,1] = rel_cat\n triplet_queries_idx[count,2] = obj_cat\n\n queries_sro[count,0] = self.idx_to_vocab['s'][sub_cat]\n queries_sro[count,2] = self.idx_to_vocab['o'][obj_cat]\n queries_sro[count,1] = self.idx_to_vocab['r'][rel_cat]\n\n if torch.cuda.is_available():\n queries_sro = queries_sro.cuda() \n\n\n return queries_sro, triplet_queries_idx", "def split_regression(self, dataset, targets):\n index = np.argsort(targets[:, 0])\n dataset, targets = dataset[index], targets[index]\n indices = [([], []) for _ in range(self.folds)]\n for i in range(0, len(targets), self.folds):\n if i + self.folds < len(targets):\n for j in range(self.folds):\n for k in range(self.folds):\n if k == j and k + i < len(targets):\n indices[j][1].append(i+k)\n elif k != j:\n indices[j][0].append(i+k)\n return indices, dataset, targets", "def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[Any, Any, Any, Any]:\n tn, fp, fn, tp, support = super().update(outputs=outputs, targets=targets)\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=tp, fp=fp, fn=fn, support=support, zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def update(self):\n if len(self.memory) < self.BATCH_SIZE * 20:\n return\n\n # == EXPERIENCE REPLAY ==\n transitions = self.memory.sample(self.BATCH_SIZE)\n # Transpose the batch (see https://stackoverflow.com/a/19343/3343043\n # for detailed explanation). This converts batch-array of Transitions\n # to Transition of batch-arrays.\n batch = Transition(*zip(*transitions))\n (non_final_mask, non_final_state_nxt, state, action, reward, g_x,\n l_x) = self.unpack_batch(batch)\n\n # == get Q(s,a) ==\n # `gather` reguires that idx is Long and input and index should have the\n # same shape with only difference at the dimension we want to extract.\n # value out[i][j][k] = input[i][j][ index[i][j][k] ], which has the\n # same dim as index\n # -> state_action_values = Q [ i ][ action[i] ]\n # view(-1): from mtx to vector\n state_action_values = (\n self.Q_network(state).gather(dim=1, index=action).view(-1)\n )\n\n # == get a' ==\n # u', d' = argmin_u' argmax_d' Q_policy(s', u', d')\n # a' = tuple2Int(u', d')\n with torch.no_grad():\n num_non_final = non_final_state_nxt.shape[0]\n state_nxt_action_values = self.Q_network(non_final_state_nxt)\n Q_mtx = state_nxt_action_values.detach().reshape(\n num_non_final, self.numActionList[0], self.numActionList[1]\n )\n # minmax values and indices\n pursuerValues, colIndices = Q_mtx.max(dim=-1)\n _, rowIdx = pursuerValues.min(dim=-1)\n colIdx = colIndices[np.arange(num_non_final), rowIdx]\n action_nxt = [\n actionIndexTuple2Int((r, c), self.numActionList)\n for r, c in zip(rowIdx, colIdx)\n ]\n action_nxt = (torch.LongTensor(action_nxt).to(self.device).view(-1, 1))\n\n # == get expected value ==\n state_value_nxt = torch.zeros(self.BATCH_SIZE).to(self.device)\n\n with torch.no_grad(): # V(s') = Q_tar(s', a'), a' is from Q_policy\n if self.double_network:\n Q_expect = self.target_network(non_final_state_nxt)\n else:\n Q_expect = self.Q_network(non_final_state_nxt)\n state_value_nxt[non_final_mask] = \\\n Q_expect.gather(dim=1, index=action_nxt).view(-1)\n\n # == Discounted Reach-Avoid Bellman Equation (DRABE) ==\n if self.mode == \"RA\":\n expected_state_action_values = (\n torch.zeros(self.BATCH_SIZE).float().to(self.device)\n )\n # Q(s, u) = V( f(s,u) )\n non_terminal = torch.max(\n g_x[non_final_mask],\n torch.min(l_x[non_final_mask], state_value_nxt[non_final_mask]),\n )\n terminal = torch.max(l_x, g_x)\n\n # normal state\n expected_state_action_values[non_final_mask] = (\n non_terminal * self.GAMMA + terminal[non_final_mask] *\n (1 - self.GAMMA)\n )\n # terminal state\n final_mask = torch.logical_not(non_final_mask)\n if self.terminalType == \"g\":\n expected_state_action_values[final_mask] = g_x[final_mask]\n elif self.terminalType == \"max\":\n expected_state_action_values[final_mask] = terminal[final_mask]\n else:\n raise ValueError(\"invalid terminalType\")\n\n # == regression: Q(s, a) <- V(s) ==\n self.Q_network.train()\n loss = smooth_l1_loss(\n input=state_action_values,\n target=expected_state_action_values.detach(),\n )\n\n # == backpropagation ==\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.Q_network.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n self.update_target_network()\n\n return loss.item()", "def update(self, labels, preds):\n #labels, preds = check_label_shapes(labels, preds, True)\n\n for label, pred_label in zip(labels, preds):\n if len(pred_label.shape) > 2:\n pred_label = mx.nd.reshape(pred_label, shape=[-1, pred_label.shape[-1]])\n label = mx.nd.reshape(pred_label, shape=[-1])\n\n # Using argpartition here instead of argsort is safe because\n # we do not care about the order of top k elements. It is\n # much faster, which is important since that computation is\n # single-threaded due to Python GIL.\n pred_label = np.argpartition(pred_label.asnumpy().astype('float32'), -self.top_k)\n label = label.asnumpy().astype('int32')\n check_label_shapes(label, pred_label)\n num_dims = len(pred_label.shape)\n mask = (label != self.ignore_label).astype(np.int32)\n num_samples = mask.sum()\n\n num_classes = pred_label.shape[1]\n top_k = min(num_classes, self.top_k)\n for j in range(top_k):\n num_correct = ((pred_label[:, num_classes - 1 - j].flat == label.flat) * mask).sum()\n self.sum_metric += num_correct\n self.global_sum_metric += num_correct\n\n self.num_inst += num_samples\n self.global_num_inst += num_samples", "def target_mean(train,test,train_index=None,holdout_index=None,col=[],\n target='click',num_folds=5,seed=23):\n feature_name='new_features'\n if holdout_index is None:\n train_cv = train.copy()\n holdout = None\n else:\n if train_index is None:\n warnings.warn('train index is None. Now need to calculate. If you parse the value, it will be more efficient ')\n train_index = list(set(train.index) - set(holdout_index))\n train_cv = train.loc[train_index].copy()\n holdout = train.loc[holdout_index].copy()\n holdout_list = []\n sf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n train_return = train[col].copy()\n test_return = test[col].copy()\n train_return[feature_name] = np.nan\n test_return[feature_name] = np.nan\n test_list = []\n \n val_index_monitor = []\n for t_index,v_index in sf.split(train_cv,train_cv[target]):\n history = train_cv.iloc[t_index].copy()\n mapping = history.groupby(col)[target].mean().reset_index().rename({target:feature_name},axis=1)\n val = train_cv.iloc[v_index].copy()\n val_index_monitor.extend(list(val.index))\n train_return.loc[val.index,feature_name] = val[col].merge(mapping,how='left',left_on=col,right_on=col).drop(col,axis=1)[feature_name].values\n if holdout is not None:\n holdout_list.append(holdout[col].merge(mapping,how='left',left_on=col,right_on=col).drop(col,axis=1)[feature_name].values)\n test_list.append(test[col].merge(mapping,how='left',left_on=col,right_on=col).drop(col,axis=1)[feature_name].values)\n if holdout is not None:\n train_return.loc[holdout.index,feature_name] = np.mean(np.array(holdout_list),axis=0)\n test_return[feature_name] = np.mean(np.array(test_list),axis=0)\n val_index_monitor.extend(list(holdout.index))\n return train_return[feature_name].values,test_return[feature_name].values", "def compute_metrics(\n self,\n preds: Dict[str, torch.Tensor],\n targets: Dict[str, torch.Tensor],\n phase: str,\n ) -> Dict[str, torch.Tensor]:\n if phase == \"train\":\n metrics_dict = self.train_metrics\n elif phase == \"val\":\n metrics_dict = self.val_metrics\n elif phase == \"test\":\n metrics_dict = self.test_metrics\n\n ret = {}\n for metric_name, metric in metrics_dict.items():\n if metric is not None:\n branch = metric_name.split(\"_\")[0]\n ret[metric_name] = metric(preds[branch], targets[branch])\n\n return ret", "def reduce_by_labels(values, labels, weights=None, target_labels=None,\n red_op='mean', axis=0, dtype=None):\n\n if axis == 1 and values.ndim == 1:\n axis = 0\n\n if target_labels is None:\n uq_tl = np.unique(labels)\n idx_back = None\n else:\n uq_tl, idx_back = np.unique(target_labels, return_inverse=True)\n\n if weights is not None:\n weights = np.atleast_2d(weights)\n\n v2d = np.atleast_2d(values)\n if axis == 1:\n v2d = v2d.T\n\n if isinstance(red_op, str):\n fred = _get_redop(red_op, weights=weights, axis=1)\n else:\n fred = red_op\n\n if dtype is None:\n dtype = np.float64\n if red_op in {'min', 'max', 'sum', 'mode'}:\n dtype = values.dtype\n\n mapped = np.empty((v2d.shape[0], uq_tl.size), dtype=dtype)\n for i, lab in enumerate(uq_tl):\n mask = labels == lab\n wm = None if weights is None else weights[:, mask]\n\n if isinstance(red_op, str):\n mapped[:, i] = fred(v2d[:, mask], wm)\n\n else:\n for idx in range(v2d.shape[0]):\n mapped[idx, i] = fred(v2d[idx, mask], wm)\n\n if idx_back is not None:\n mapped = mapped[:, idx_back]\n\n if axis == 1:\n mapped = mapped.T\n\n if values.ndim == 1:\n return mapped[0]\n return mapped", "def get_data_stats(sharded_list, center_at_mut=True):\n data = []\n all_elements = []\n labels = []\n\n for i, sharded in enumerate(sharded_list):\n for shard_num, shard_df in sharded.iter_shards():\n labels_df = sharded.read_shard(shard_num, key='labels')\n\n for ensemble_name, ensemble_df in shard_df.groupby(['ensemble']):\n all_elements.extend(ensemble_df.element.values)\n label_info = labels_df[labels_df.ensemble == ensemble_name].squeeze()\n\n for subunit_name in ['original', 'mutated']:\n struct_df = ensemble_df[ensemble_df.subunit == subunit_name]\n pos = struct_df[['x', 'y', 'z']].astype(np.float32)\n mutation_center = __get_mutation_center(\n struct_df, label_info, center_at_mut)\n\n max_dist = util.get_max_distance_from_center(pos, mutation_center)\n num_atoms = struct_df.shape[0]\n data.append((ensemble_name, subunit_name, max_dist, num_atoms))\n\n labels.append((i, shard_num, label_info.label))\n\n all_elements_df = pd.DataFrame(all_elements, columns=['element'])\n unique_elements = all_elements_df.element.unique()\n print('Unique elements ({:}): {:}'.format(len(unique_elements), unique_elements))\n print('\\nElement counts:')\n print(all_elements_df.element.value_counts())\n print('\\n')\n\n all_labels_df = pd.DataFrame(labels, columns=['sharded', 'shard_num', 'label'])\n print('\\nLabel by dataset:')\n print(all_labels_df.groupby(['sharded', 'shard_num']).label.value_counts())\n print('\\n')\n print(all_labels_df.label.value_counts())\n\n df = pd.DataFrame(data, columns=['ensemble', 'subunit', 'max_dist', 'num_atoms'])\n df = df.sort_values(by=['max_dist', 'num_atoms'],\n ascending=[False, False]).reset_index(drop=True)\n print(df.describe())\n\n print(df[df.max_dist < 50].shape[0]*100.0/df.shape[0])\n return df", "def _get_aggregated_results(self):\n gradients = self.gradients\n client_traj_infos = flatten_lists(self.client_traj_infos)\n client_opt_infos = self._combine_client_opt_infos(self.client_opt_infos)\n \n self.gradients = []\n self.client_traj_infos = []\n self.client_opt_infos = []\n\n return gradients, client_traj_infos, client_opt_infos", "def main(targets):\n # Parse through the datasets and select only relevant columns\n cpu_df = data_exploration.parse_cpu_data(\"data/raw/hw_metric_histo.csv000\")\n sys_df = data_exploration.parse_sys_data(\"data/raw/system_sysinfo_unique_normalized.csv000\")\n\n # Create a new reference to the optimized DataFrame\n optimized_df = data_exploration.optimize_dataframe(cpu_df)\n\n # grab the specific column \"HW::CORE:C0:PERCENT\" as a feature\n cpu = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:C0:PERCENT:\")\n\n # grab the specific column \"HW::CORE:TEMPERATURE:CENTIGRADE\" as a feature\n temp = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:TEMPERATURE:CENTIGRADE:\")\n\n # grab the GUIDs from each dataset and put them into lists\n sys_guid = data_exploration.get_guid(sys_df, 'guid')\n hw_guid = data_exploration.get_guid(cpu_df, 'guid')\n\n # checking for the GUID overlap in both datasets\n syshw_overlap = [guid for guid in sys_guid if guid in hw_guid]\n\n # objective is to create a dataframe of only matching GUIDs\n hwcpu_match = data_exploration.get_cpu_guid(cpu, syshw_overlap)\n\n # only grabbing the relevant columns to be matched on\n hwtemp_match = data_exploration.get_temp_guid(temp, syshw_overlap)\n\n # instantiating our dataframes to be joined\n hwtemp = pd.DataFrame(hwtemp_match.groupby('guid')['temp_mean'].mean())\n hwcpu = pd.DataFrame(hwcpu_match.groupby('guid')['utilization_mean'].mean())\n\n # joining our matched dataframes together, only using relevant columns\n combined = sys_df.join(hwcpu, on=['guid'], how='left')\n combined = combined.join(hwtemp, on=['guid'], how='left')\n combined = combined.drop(columns=['guid', 'model_normalized', \"processornumber\"])\n\n # create copy of our joined dataframe to be used for modelling\n feature_columns = combined.copy()\n\n # selecting only relevant columns to use for features\n feature_columns = feature_columns[['os','cpu_family', 'cpuvendor',\n 'graphicscardclass', 'persona']]\n\n # creating a completely one-hot encoded dataframe only containing relevant columns\n dummy = pd.get_dummies(feature_columns)\n\n # converting our categorical variables to be predicted on into numerical values\n cleanup_nums = {'persona': {'Web User': 0, 'Casual User': 1, 'Gamer':2, 'Casual Gamer': 3,\n 'Office/Productivity':4, 'Content Creator/IT': 5,\n 'Communication': 6, 'Win Store App User': 7, 'Entertainment': 8,\n 'File & Network Sharer':9, 'Unknown': 10}}\n\n # replacing the values in the column 'persona' to be numerical\n encode_persona = combined['persona'].to_frame().replace(cleanup_nums)\n\n # putting our old means back into the dummy dataframe\n dummy['util_mean'] = combined['utilization_mean']\n dummy['temp_mean'] = combined['temp_mean']\n # dummy = dummy.drop(columns=['persona'])\n dummy['persona'] = encode_persona['persona']\n\n dummy = dummy.dropna()\n nona_test = dummy.copy()\n\n # we want to predict on Y\n Y = nona_test['persona']\n X = nona_test.drop(columns=['persona'])\n\n # creating our test/train split\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n # all the models we are going to use\n names = [\"Nearest_Neighbors\", \"Linear_SVM\", \"Polynomial_SVM\", \"RBF_SVM\", \"Gradient_Boosting\"]\n\n # all of our predictors scaled to the degree of our datasets\n classifiers = [KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(kernel=\"poly\", degree=3, C=0.025),\n SVC(kernel=\"rbf\", C=1, gamma=2),\n GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)]\n\n scores = []\n # we write in our accuracy scores to [scores]\n for name, clf in zip(names, classifiers):\n clf.fit(X_train, Y_train)\n score = clf.score(X_test, Y_test)\n scores.append(score)\n\n show = data_exploration.get_model_scores(names, scores)\n model_scores = data_exploration.plot_graphical_model_scores(show)", "def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals", "def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor", "def form_cand_queries_batch(self, batch_input, gram, additional_neg_batch=0):\n N = batch_input['pair_objects'].size(0)\n\n # For each gram s,r,o get unique list of positive labels in the batch\n if gram in ['s','r','o']:\n\n labels = batch_input['labels_' + gram]\n\n cat_batch = []\n idx = []\n for j in range(N):\n\n cats = (labels[j,:]==1).nonzero().data[:,0].tolist()\n\n count = 0\n cat = cats[0]\n while count < len(cats) and cats[count]>-1:\n cat = cats[count]\n if cat not in cat_batch:\n idx.append(tuple([j,len(cat_batch)]))\n cat_batch.append(cat)\n else:\n idx.append(tuple([j,cat_batch.index(cat)]))\n count += 1\n\n # Add negatives at random (later can refine and add hard negatives)\n if additional_neg_batch>0:\n neg_cat_sampled = np.random.randint(0, len(self.vocab[gram]), size=additional_neg_batch) # can be duplicate, it is ok\n\n # Append the ones that are not positive for any example in the batch\n for neg_cat in neg_cat_sampled:\n if neg_cat not in cat_batch:\n cat_batch.append(neg_cat) \n \n\n labels_query = np.zeros((N,len(cat_batch)))\n for j in range(len(idx)):\n labels_query[idx[j][0], idx[j][1]] = 1\n\n cat_batch = Variable(torch.from_numpy(np.array(cat_batch).astype(int)))\n if self.use_gpu:\n cat_batch = cat_batch.cuda()\n\n query = self.idx_to_vocab[gram].index_select(0, cat_batch)\n\n labels_query = Variable(torch.from_numpy(labels_query)).type(query.data.type())\n query = query.unsqueeze(0) # (M,1) -> (1,M,1)\n\n\n return query, labels_query\n\n\n # The triplets for sro are all the positives\n if gram=='sro':\n\n triplet_cat_batch = np.empty((0,3), dtype=int)\n idx_triplet = []\n for j in range(N):\n\n sub_cats = (batch_input['labels_s'][j,:]==1).nonzero().data[:,0].tolist()\n obj_cats = (batch_input['labels_o'][j,:]==1).nonzero().data[:,0].tolist()\n rel_cats = (batch_input['labels_r'][j,:]==1).nonzero().data[:,0].tolist()\n\n\n # Do not add the triplets containing __background__ -> not in vocab\n for sub_cat in sub_cats:\n for obj_cat in obj_cats:\n\n if sub_cat==0 or obj_cat==0:\n continue\n\n count = 0\n while count < len(rel_cats) and rel_cats[count]>-1:\n rel_cat = rel_cats[count]\n triplet_cat = np.array([sub_cat, rel_cat, obj_cat])\n idx_triplet_cat_batch = np.where(np.logical_and(triplet_cat_batch[:,0]==triplet_cat[0], \\\n np.logical_and(\n triplet_cat_batch[:,1]==triplet_cat[1], \\\n triplet_cat_batch[:,2]==triplet_cat[2])))[0]\n if len(idx_triplet_cat_batch)==0:\n idx_triplet.append(tuple([j,triplet_cat_batch.shape[0]]))\n triplet_cat_batch = np.vstack((triplet_cat_batch, triplet_cat))\n else:\n idx_triplet.append(tuple([j,idx_triplet_cat_batch[0]]))\n\n count += 1\n\n\n # Add negatives at random\n if additional_neg_batch>0:\n\n neg_cat_sampled_sub = np.random.randint(0, len(self.vocab['s']), size=additional_neg_batch)\n neg_cat_sampled_obj = np.random.randint(0, len(self.vocab['o']), size=additional_neg_batch)\n neg_cat_sampled_rel = np.random.randint(0, len(self.vocab['r']), size=additional_neg_batch)\n neg_cat_sampled = np.vstack((neg_cat_sampled_sub, neg_cat_sampled_rel, neg_cat_sampled_obj)).T\n\n\n # Append the ones that are not positive for any example in the batch\n for j in range(len(neg_cat_sampled)):\n\n idx_batch = np.where(np.logical_and(triplet_cat_batch[:,0]==neg_cat_sampled[j,0], \\\n np.logical_and(\n triplet_cat_batch[:,1]==neg_cat_sampled[j,1], \\\n triplet_cat_batch[:,2]==neg_cat_sampled[j,2])))[0]\n\n if len(idx_batch)==0:\n triplet_cat_batch = np.vstack((triplet_cat_batch, neg_cat_sampled[j,:]))\n\n\n labels_query_sro = np.zeros((N,triplet_cat_batch.shape[0]))\n for j in range(len(idx_triplet)):\n labels_query_sro[idx_triplet[j][0], idx_triplet[j][1]] = 1\n\n triplet_cat_batch = Variable(torch.from_numpy(triplet_cat_batch))\n if self.use_gpu:\n triplet_cat_batch = triplet_cat_batch.cuda()\n query_sro = torch.cat([ self.idx_to_vocab['s'].index_select(0,triplet_cat_batch[:,0]),\\\n self.idx_to_vocab['r'].index_select(0,triplet_cat_batch[:,1]),\\\n self.idx_to_vocab['o'].index_select(0,triplet_cat_batch[:,2])], 1)\n\n labels_query_sro = Variable(torch.from_numpy(labels_query_sro)).type(query_sro.data.type()) \n query_sro = query_sro.unsqueeze(0) # (M,3) -> (1,M,3)\n\n\n return query_sro, labels_query_sro", "def _get_targets_single(self, mask_preds: Tensor,\n gt_instances: InstanceData,\n positive_info: InstanceData):\n gt_bboxes = gt_instances.bboxes\n device = gt_bboxes.device\n gt_masks = gt_instances.masks.to_tensor(\n dtype=torch.bool, device=device).float()\n\n # process with mask targets\n pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds')\n scores = positive_info.get('scores')\n centernesses = positive_info.get('centernesses')\n num_pos = pos_assigned_gt_inds.size(0)\n\n if gt_masks.size(0) == 0 or num_pos == 0:\n return mask_preds, None, 0\n # Since we're producing (near) full image masks,\n # it'd take too much vram to backprop on every single mask.\n # Thus we select only a subset.\n if (self.max_masks_to_train != -1) and \\\n (num_pos > self.max_masks_to_train):\n perm = torch.randperm(num_pos)\n select = perm[:self.max_masks_to_train]\n mask_preds = mask_preds[select]\n pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n num_pos = self.max_masks_to_train\n elif self.topk_masks_per_img != -1:\n unique_gt_inds = pos_assigned_gt_inds.unique()\n num_inst_per_gt = max(\n int(self.topk_masks_per_img / len(unique_gt_inds)), 1)\n\n keep_mask_preds = []\n keep_pos_assigned_gt_inds = []\n for gt_ind in unique_gt_inds:\n per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind)\n mask_preds_per_inst = mask_preds[per_inst_pos_inds]\n gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds]\n if sum(per_inst_pos_inds) > num_inst_per_gt:\n per_inst_scores = scores[per_inst_pos_inds].sigmoid().max(\n dim=1)[0]\n per_inst_centerness = centernesses[\n per_inst_pos_inds].sigmoid().reshape(-1, )\n select = (per_inst_scores * per_inst_centerness).topk(\n k=num_inst_per_gt, dim=0)[1]\n mask_preds_per_inst = mask_preds_per_inst[select]\n gt_inds_per_inst = gt_inds_per_inst[select]\n keep_mask_preds.append(mask_preds_per_inst)\n keep_pos_assigned_gt_inds.append(gt_inds_per_inst)\n mask_preds = torch.cat(keep_mask_preds)\n pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds)\n num_pos = pos_assigned_gt_inds.size(0)\n\n # Follow the origin implement\n start = int(self.mask_out_stride // 2)\n gt_masks = gt_masks[:, start::self.mask_out_stride,\n start::self.mask_out_stride]\n gt_masks = gt_masks.gt(0.5).float()\n pos_mask_targets = gt_masks[pos_assigned_gt_inds]\n\n return (mask_preds, pos_mask_targets, num_pos)", "def build_targets(pred_boxes, pred_conf, pred_cls, target, anchors, num_anchors, num_classes, grid_size, ignore_thres, img_dim):\n nB = target.size(0)\n nA = num_anchors\n nC = num_classes\n nG = grid_size\n mask = torch.zeros(nB, nA, nG, nG)\n conf_mask = torch.ones(nB, nA, nG, nG)\n tx = torch.zeros(nB, nA, nG, nG)\n ty = torch.zeros(nB, nA, nG, nG)\n tw = torch.zeros(nB, nA, nG, nG)\n th = torch.zeros(nB, nA, nG, nG)\n tconf = torch.ByteTensor(nB, nA, nG, nG).fill_(0)\n tcls = torch.ByteTensor(nB, nA, nG, nG, nC).fill_(0)\n\n nGT = 0\n nCorrect = 0\n for b in range(nB):\n for t in range(target.shape[1]):\n if target[b, t].sum() == 0:\n # pad\n continue\n nGT += 1\n # Convert to position relative to box\n gx = target[b, t, 1] * nG\n gy = target[b, t, 2] * nG\n gw = target[b, t, 3] * nG\n gh = target[b, t, 4] * nG\n # Get grid box indices\n gi = int(gx)\n gj = int(gy)\n # Get shape of gt box\n gt_box = torch.FloatTensor(\n np.array([0, 0, gw, gh])).unsqueeze(0)\n # Get shape of anchor box\n anchor_shapes = torch.FloatTensor(np.concatenate(\n (np.zeros((len(anchors), 2)), np.array(anchors)), 1))\n\n # Calculate iou between gt and anchor shapes\n # 1 on 3\n anch_ious = bbox_iou(gt_box, anchor_shapes)\n # Where the overlap is larger than threshold set mask to zero (ignore)\n conf_mask[b, anch_ious > ignore_thres, gj, gi] = 0\n # Find the best matching anchor box\n\n best_n = np.argmax(anch_ious)\n # Get ground truth box\n gt_box = torch.FloatTensor(\n np.array([gx, gy, gw, gh])).unsqueeze(0)\n # Get the best prediction\n pred_box = pred_boxes[b, best_n, gj, gi].unsqueeze(0)\n # Masks\n mask[b, best_n, gj, gi] = 1\n conf_mask[b, best_n, gj, gi] = 1\n # Coordinates\n tx[b, best_n, gj, gi] = gx - gi\n ty[b, best_n, gj, gi] = gy - gj\n # Width and height\n tw[b, best_n, gj, gi] = math.log(\n gw / anchors[best_n][0] + 1e-16)\n th[b, best_n, gj, gi] = math.log(\n gh / anchors[best_n][1] + 1e-16)\n # One-hot encoding of label\n target_label = int(target[b, t, 0])\n tcls[b, best_n, gj, gi, target_label] = 1\n tconf[b, best_n, gj, gi] = 1\n\n # Calculate iou between ground truth and best matching prediction\n iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)\n pred_label = torch.argmax(pred_cls[b, best_n, gj, gi])\n score = pred_conf[b, best_n, gj, gi]\n if iou > 0.5 and pred_label == target_label and score > 0.5:\n nCorrect += 1\n\n return nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls", "def calculate_metrics(outputs, targets):\n pred = outputs\n\n # Top-k prediction for TAg\n hits_tag_top5 = compute_topk_acc(pred, targets, 5)\n hits_tag_top1 = compute_topk_acc(pred, targets, 1)\n\n return hits_tag_top5.item(), hits_tag_top1.item()", "def get_batch_statistics(outputs, targets, iou_threshold):\n batch_metrics = [] \n for sample_i in range(len(outputs)): # output 은 배열형태\n\n if outputs[sample_i] is None:\n continue\n\n output = outputs[sample_i]\n pred_boxes = output[:, :4]\n pred_scores = output[:, 4]\n pred_labels = output[:, -1]\n\n true_positives = np.zeros(pred_boxes.shape[0])\n\n annotations = targets[targets[:, 0] == sample_i] [:,1 :]\n target_labels = annotations[:, 0] if len(annotations) else []\n if len(annotations) :\n detected_boxes = []\n target_boxes = annotations[:, 1:]\n\n for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)):\n\n if len(detected_boxes) == len(annotations) :\n break\n\n if pred_label not in target_labels:\n continue\n\n iou, box_index = bbox_iou(pred_box.unsqueeze(0), target_boxes).max(0)\n if iou >= iou_threshold and box_index not in detected_boxes:\n true_positives[pred_i] = 1\n detected_boxes += [box_index]\n\n batch_metrics.append([true_positives, pred_scores, pred_labels])\n return batch_metrics", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))\n ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))\n nsentences = utils.item(sum(log.get('nsentences', 0) for log in logging_outputs))\n sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n if sample_size != ntokens:\n metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3)\n\n if len(logging_outputs) > 0 and 'ncorrect' in logging_outputs[0] \\\n and 'nbound' in logging_outputs[0] and 'nbound_pred' in logging_outputs[0] \\\n and 'total_ncorrect' in logging_outputs[0]:\n ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs)\n nbound = sum(log.get('nbound', 0) for log in logging_outputs)\n nbound_pred = sum(log.get('nbound_pred', 0) for log in logging_outputs)\n total_ncorrect = sum(log.get('total_ncorrect', 0) for log in logging_outputs)\n\n precision = 100 * ncorrect / (nbound_pred + 1e-10)\n recall = 100 * ncorrect / (nbound + 1e-10)\n\n metrics.log_scalar('accuracy', 100.0 * total_ncorrect / ntokens, ntokens, round=1)\n metrics.log_scalar('precision', precision, ntokens, round=1)\n metrics.log_scalar('recall', recall, ntokens, round=1)\n metrics.log_scalar('F1', 2 * (precision * recall) / (precision + recall + 1e-10), ntokens, round=1)", "def Allreduce4(net, blobs, reduced_affix, gpu_indices):\n a, b, c, d = blobs\n gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices\n # a_reduced <- a+b, c_reduced <- c + d\n a_reduced = net.Add(\n [a, b],\n str(a) + reduced_affix,\n device_option=OnGPU(gpu_a)\n )\n c_reduced = net.Add(\n [c, d],\n str(c) + reduced_affix,\n device_option=OnGPU(gpu_c)\n )\n # a_reduced <- a_reduced + c_reduced\n a_reduced = a_reduced.Add(c_reduced, a_reduced, device_option=OnGPU(gpu_a))\n # broadcast a_reduced to c_reduced\n c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))\n # broadcast to b and d\n b_reduced = a_reduced.Copy(\n [],\n str(b) + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n d_reduced = c_reduced.Copy(\n [],\n str(d) + reduced_affix,\n device_option=OnGPU(gpu_d)\n )\n return a_reduced, b_reduced, c_reduced, d_reduced", "def get_metrics(cfg, model, X_anchor, y_anchor, X_gal, y_gal, annoy_index, vec_dim):\n rank10_acc = 0\n rank5_acc = 0\n rank1_acc = 0\n avg_acc = 0\n vote_res = 0\n\n l2 = []\n for anchor in range(0, len(X_anchor)):\n res = get_result(get_image_features(cfg, model, X_anchor[anchor]), annoy_index)\n vote = defaultdict(int)\n # Accuracy\n correct = 0\n for i in res[:10]:\n vote[y_gal[i]] += 1\n\n max_key = max(vote, key=vote.get)\n if max_key == y_anchor[anchor]:\n vote_res += 1\n \n\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1 \n\n avg_acc += correct/len(res)\n\n # Mean Average Precision\n l1 = []\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1\n l1.append(1)\n else:\n l1.append(0)\n l2.append(l1) \n\n # Rank10 Accuracy\n for each_val in res[:10]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank10_acc += 1\n break\n \n # Rank5 Accuracy\n for each_val in res[:5]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank5_acc += 1\n break\n\n # Rank1 Accuracy\n for each_val in res[:1]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank1_acc += 1\n break\n\n print(\"Avg acc is :: {avg_acc}\".format(avg_acc = avg_acc/len(X_anchor)))\n print(\"Rank 10 acc is :: {rank10_acc}\".format(rank10_acc = rank10_acc/len(X_anchor)))\n print(\"Rank 5 acc is :: {rank5_acc}\".format(rank5_acc = rank5_acc/len(X_anchor)))\n print(\"Rank 1 acc is :: {rank1_acc}\".format(rank1_acc = rank1_acc/len(X_anchor)))\n print(\"Mean Avg Precision is :: {mAP}\".format(mAP=mean_average_precision(l2)))\n print(\"Vote res :: \", vote_res/len(X_anchor))\n\n return rank1_acc/len(X_anchor), mean_average_precision(l2)", "def _aggregation_target(self):\n ...", "def get_qald_metrics(pred_, m_labels_, ques_, mode='val'):\n rows = []\n question_rows_map = {}\n question_mention_set = set()\n for i, pred in enumerate(pred_):\n pred = pred.data.tolist()[0]\n question = ques_[i]\n if question not in question_rows_map:\n question_rows_map[ques_[i]] = []\n if pred:\n men_entity_label = '_'.join(m_labels_[i].split(';')[-1].split())\n men_entity_mention = '_'.join(m_labels_[i].split(';')[0].split())\n if '-'.join([question, men_entity_mention]) in question_mention_set:\n question_rows_map[ques_[i]][-1].add(\n ('http://dbpedia.org/resource/{}'.format(men_entity_label), pred))\n else:\n question_mention_set.add('-'.join([question, men_entity_mention]))\n question_rows_map[ques_[i]].append(set())\n question_rows_map[ques_[i]][-1].add(\n ('http://dbpedia.org/resource/{}'.format(men_entity_label), pred))\n for key, preds_list_mentions in question_rows_map.items():\n if len(preds_list_mentions) > 1:\n rows.append([key, []])\n for preds_set in preds_list_mentions:\n sorted_values = sorted(list(preds_set), key=lambda x: x[1], reverse=True)[:5]\n rows[-1][1].append(sorted_values)\n elif len(preds_list_mentions) == 1:\n sorted_values = sorted(list(preds_list_mentions[0]), key=lambda x: x[1], reverse=True)[:5]\n rows.append([key, [sorted_values]])\n else:\n rows.append([key, []])\n\n df_output = pd.DataFrame(rows, columns=['Question', 'Entities'])\n df_output['Classes'] = str([])\n\n # gold\n benchmark = pd.read_csv('../../../data/lcquad/lcquad_gt.csv')\n benchmark = benchmark.set_index('Question')\n benchmark = benchmark.replace(np.nan, '', regex=True)\n benchmark['Entities'] = benchmark['Entities'].astype(object)\n is_qald_gt = True\n\n # pred\n predictions = df_output\n # print(df_output.shape)\n predictions = predictions.set_index('Question')\n predictions['Entities'] = predictions['Entities']\n predictions['Classes'] = predictions['Classes']\n\n metrics = compute_metrics(benchmark=benchmark, predictions=predictions, limit=410, is_qald_gt=is_qald_gt,\n eval='full')\n\n scores = metrics['macro']['named']\n prec, recall, f1 = scores['precision'], scores['recall'], scores['f1']\n return prec, recall, f1, df_output", "def update_output(self, ):\n input_ids, outputs, grads, adv_tokens = self.batch_output\n\n probs = softmax(outputs, dim=-1)\n probs, labels = torch.max(probs, dim=-1)\n\n tokens = [\n self.tokenizer.convert_ids_to_tokens(input_ids_)\n for input_ids_ in input_ids\n ]\n\n embedding_grads = grads.sum(dim=2)\n \n # norm for each sequence\n norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter\n \n # normalizing\n for i, norm in enumerate(norms):\n embedding_grads[i] = torch.abs(embedding_grads[i]) / norm\n\n batch_output = []\n \n # check probs, labels shape\n labels = torch.reshape(labels, (1, -1))\n probs = torch.reshape(probs, (1, -1))\n iterator = zip(tokens, probs, embedding_grads, labels)\n\n for example_tokens, example_prob, example_grad, example_label in iterator:\n example_dict = dict()\n # as we do it by batches we has a padding so we need to remove it\n \n example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token]\n example_dict['tokens'] = example_tokens\n example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)]\n example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item()\n example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() \n\n batch_output.append(example_dict)\n\n return batch_output", "def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models", "def compare_and_accumulate(\n self, gt_panoptic_labels: List[tf.Tensor],\n pred_panoptic_labels: List[tf.Tensor]\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n gt_panoptic_label = tf.concat(gt_panoptic_labels, axis=1)\n pred_panoptic_label = tf.concat(pred_panoptic_labels, axis=1)\n return super(VideoPanopticQuality,\n self).compare_and_accumulate(gt_panoptic_label,\n pred_panoptic_label)", "def get_agg(self, x, ids):\n \n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_agg = torch.mean(x[ids == i], 0).repeat(sample_size, 1)\n \n # concatenate each group of aggregated data\n if i == 0:\n agg = sample_agg \n else:\n agg = torch.cat((agg, sample_agg), dim=0)\n \n return agg", "def update_batch_multi_env(self, batch):\n x, y_true, metadata = batch\n x = x.to(self.device)\n y_true = y_true.to(self.device)\n g = self.grouper.metadata_to_group(metadata).to(self.device)\n\n # Perform per group gradient computation and RD\n environments = torch.unique(g).tolist()\n sizes = []\n pos_acc = [] \n neg_acc = []\n total_grad = []\n\n self.pos_grad.fill_(0)\n self.neg_grad.fill_(0)\n self.total_grad.fill_(0)\n self.all_positive.fill_(1)\n self.all_negative.fill_(1)\n full_outputs = None\n for env_id, env_name in enumerate(environments):\n # get the ratio of size of env to batch\n env_ratio = torch.sum(g==env_name).item() / g.shape[0]\n self.model.zero_grad()\n\n if torch.is_tensor(x):\n outputs = self.model(x[g==env_name])\n partial_objective = self.partial_objective(outputs, y_true[g==env_name])\n if full_outputs is None:\n partial_shape = list(outputs.shape)\n partial_shape[0] = y_true.shape[0]\n full_outputs = torch.zeros(*partial_shape, dtype=outputs.dtype).to(torch.device(\"cuda\"))\n full_outputs[g==env_name] = outputs\n else:\n outputs = self.model(x)\n partial_objective = self.partial_objective(outputs[g==env_name], y_true[g==env_name])\n full_outputs = outputs\n if not partial_objective.requires_grad:\n print(\"Loss function returned 0 (possibly input is NaN\")\n else:\n partial_objective.backward()\n # Save the gradients here\n cur_pos = 0\n for i, param in enumerate(self.model.parameters()):\n p = param.grad.clone().detach()\n next_pos = cur_pos + np.prod(list(p.shape))\n\n p_neg = (p<0)*p\n p_pos = (p>0)*p\n\n self.all_positive[cur_pos:next_pos] = torch.logical_and(p.view(-1)>0, self.all_positive[cur_pos:next_pos])\n self.all_negative[cur_pos:next_pos] = torch.logical_and(p.view(-1)<0, self.all_negative[cur_pos:next_pos])\n \n self.dist_pos[env_id][0][cur_pos:next_pos] = -1.0 * p_neg.view(-1)\n self.dist_neg[env_id][0][cur_pos:next_pos] = p_pos.view(-1)\n\n self.pos_grad[cur_pos:next_pos] += p_pos.view(-1) * env_ratio\n self.neg_grad[cur_pos:next_pos] += p_neg.view(-1) * env_ratio\n self.total_grad[cur_pos:next_pos] += p.view(-1) * env_ratio\n cur_pos = next_pos\n \n if self._is_satisficing:\n for env_id, env_name in enumerate(environments):\n self.dist_pos[env_id][0] = self.dist_pos[env_id][0] * torch.abs(self.total_grad)\n self.dist_neg[env_id][0] = self.dist_neg[env_id][0] * torch.abs(self.total_grad)\n\n if self.and_mask:\n consistent_ones = torch.logical_or(self.all_positive, self.all_negative)\n final_grads = self.total_grad * consistent_ones\n else: \n marginals, _ = self.ba.optimize_rd(self.dist_pos, self.dist_neg, [self.dist_pos[0][0].shape])\n\n marginals[0][marginals[0]<0.0] = 1e-8\n marginals[0][marginals[0]>1.0] = 1.0 - 1e-8\n\n if self.without_sampling:\n pos_ones = marginals[0]\n else:\n # Sample the directions\n pos_ones = torch.bernoulli(marginals[0])\n\n if self.control_only_direction:\n magnitude = torch.abs(self.total_grad)\n direction = 2*pos_ones - 1\n final_rd_grads = magnitude * direction\n else:\n final_rd_grads = self.pos_grad*pos_ones + self.neg_grad*(1-pos_ones)\n\n\n if self.only_inconsistent:\n consistent_ones = torch.logical_or(self.all_positive, self.all_negative)\n final_grads = final_rd_grads * torch.logical_not(consistent_ones) + self.total_grad * consistent_ones\n else:\n final_grads = final_rd_grads\n\n self.model.zero_grad()\n \n results = {\n 'g': g,\n 'y_true': y_true,\n 'y_pred': full_outputs,\n 'metadata': metadata,\n }\n\n objective = self.objective(results)\n results['objective'] = objective.item()\n cur_pos = 0\n for i,p in enumerate(self.model.parameters()):\n next_pos = cur_pos + np.prod(list(p.shape))\n p.grad.data.copy_(final_grads[cur_pos:next_pos].data.view(p.shape))\n cur_pos = next_pos\n\n if self.max_grad_norm:\n clip_grad_norm_(self.model.parameters(), self.max_grad_norm)\n self.optimizer.step()\n self.step_schedulers(\n is_epoch=False,\n metrics=results,\n log_access=False)\n return results", "def get_targets(\n self,\n anchor_list,\n valid_flag_list,\n gt_bboxes_list,\n img_metas,\n gt_bboxes_ignore_list=None,\n gt_labels_list=None,\n label_channels=1,\n unmap_outputs=True,\n ):\n\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n concat_anchor_list = []\n concat_valid_flag_list = []\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n concat_anchor_list.append(torch.cat(anchor_list[i]))\n concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n results = multi_apply(\n self._get_targets_single,\n concat_anchor_list,\n concat_valid_flag_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n img_metas,\n label_channels=label_channels,\n unmap_outputs=unmap_outputs)\n\n (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,\n valid_neg_inds, sampling_result) = results\n\n # Due to valid flag of anchors, we have to calculate the real pos_inds\n # in origin anchor set.\n pos_inds = []\n for i, single_labels in enumerate(labels):\n pos_mask = (0 <= single_labels) & (\n single_labels < self.num_classes)\n pos_inds.append(pos_mask.nonzero().view(-1))\n\n gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]\n return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n gt_inds)", "def compute_metrics(self, results: Sequence[Dict]) -> Dict:\n\n preds = []\n gts = []\n for result in results:\n preds.append(result['pred_labels'])\n gts.append(result['gt_labels'])\n preds = torch.cat(preds)\n gts = torch.cat(gts)\n\n assert preds.max() < self.num_classes\n assert gts.max() < self.num_classes\n\n cared_labels = preds.new_tensor(self.cared_labels, dtype=torch.long)\n\n hits = (preds == gts)[None, :]\n preds_per_label = cared_labels[:, None] == preds[None, :]\n gts_per_label = cared_labels[:, None] == gts[None, :]\n\n tp = (hits * preds_per_label).float()\n fp = (~hits * preds_per_label).float()\n fn = (~hits * gts_per_label).float()\n\n result = {}\n if 'macro' in self.mode:\n result['macro_f1'] = self._compute_f1(\n tp.sum(-1), fp.sum(-1), fn.sum(-1))\n if 'micro' in self.mode:\n result['micro_f1'] = self._compute_f1(tp.sum(), fp.sum(), fn.sum())\n\n return result", "def group_mae(outputs: torch.Tensor, targets: torch.Tensor) -> List[Tuple[int, int, int, float, str]]:\n # groups = [\n # (-1, 1800, \"0-0.5h\"),\n # (1800, 3600, \"0.5-1h\"),\n # (3600, 7200, \"1-2h\"),\n # (7200, 10800, \"2-3h\"),\n # (10800, 14400, \"3-4h\"),\n # (14400, 18000, \"4-5h\"),\n # (18000, 21600, \"5-6h\"),\n # (21600, 25200, \"6-7h\"),\n # (25200, 28800, \"7-8h\"),\n # (28800, 32400, \"8-9h\"),\n # (32400, 36000, \"9-10h\"),\n # (36000, 39600, \"10-11h\"),\n # (39600, 43200, \"11-12\"),\n # (43200, 86400, \"12h - 1 day\"),\n # (86400, 172800, \"1 day - 2 days\"),\n # (172800, 259200, \"2 days - 3 days\"),\n # (259200, 345600, \"3 days - 4 days\"),\n # (345600, 432000, \"4 days - 5 days\"),\n # (432000, 518400, \"5 days - 6 days\"),\n # (518400, 604800, \"6 days - 1 week\"),\n # (604800, 155520000, \"1 week - 1 month\"),\n # (155520000, int(data_ranges[\"label\"][\"max\"]), \"> 1 month\")\n # ]\n groups = [\n (-1, 1800, \"0-0.5h\"),\n (1800, 3600, \"0.5-1h\"),\n (3600, 7200, \"1-2h\"),\n (7200, 10800, \"2-3h\"),\n (10800, 14400, \"3-4h\"),\n (14400, 21600, \"4-6h\"),\n (21600, 28800, \"6-8h\"),\n (28800, 36000, \"8-10h\"),\n (36000, 43200, \"10-12h\"),\n (43200, 50400, \"12-16h\"),\n (50400, 64800, \"16-20h\"),\n (64800, 86400, \"20-24h\"),\n (86400, 172800, \"1-2d\"),\n (172800, 259200, \"2-3d\"),\n (259200, 345600, \"3-4d\"),\n (345600, 432000, \"4-5d\"),\n (432000, 518400, \"5-6d\"),\n (518400, 604800, \"6-7d\"),\n (604800, 1209600, \"1-2w\"),\n (1209600, 2419200, \"2-4w\"),\n (2419200, int(data_ranges[\"label\"][\"max\"]), \"> 4w\")\n ]\n\n def scale(seconds: int) -> float:\n # half_range = (data_ranges[\"label\"][\"max\"] - data_ranges[\"label\"][\"min\"]) / 2\n # result = seconds / half_range\n # return -1 + result if seconds < half_range else result\n label_range = data_ranges[\"label\"][\"max\"]\n return seconds / label_range\n\n def process_group(x: torch.Tensor, y: torch.Tensor, group: Tuple[int, int, str]) -> Tuple[int, int, int, float,\n str]:\n criterion = nn.L1Loss(reduction=\"mean\")\n mask = (y > scale(group[0])) & (y <= scale(group[1]))\n # mask = (y > group[0]) & (y <= group[1])\n x = x[mask]\n y = y[mask]\n mae = 0.\n num_data = x.shape[0]\n if num_data > 0:\n loss = criterion(x, y)\n mae = loss.item()\n return group[0], group[1], num_data, mae, group[2]\n\n mae_groups = [process_group(outputs, targets, group) for group in groups]\n return mae_groups", "def _dad_reduce_all_gather(self, act_tensor, grad_tensor, *args, **kw):\n act_gathered = [_torch.zeros_like(act_tensor) for _ in range(_dist.get_world_size())]\n grad_gathered = [_torch.zeros_like(grad_tensor) for _ in range(_dist.get_world_size())]\n\n _dist.all_gather(act_gathered, act_tensor)\n _dist.all_gather(grad_gathered, grad_tensor)\n\n act_gathered = _torch.cat(act_gathered)\n grad_gathered = _torch.cat(grad_gathered)\n\n return act_gathered, grad_gathered", "def compute_targets(anchors, bboxes, num_classes, labels=None, negative_iou_thresh=0.3, positive_iou_thresh=0.5):\n positive_indices, ignore_indices, negative_indices, max_iou_indices = tf_compute_gt_indices(anchors, bboxes, negative_iou_thresh=0.4, positive_iou_thresh=0.5)\n \n #create the sine column for whether a anchor is background (0), an object (1), or should be ignore (-1)\n iou_sine_col = tf.zeros(anchors.get_shape()[0])\n pos_iou_sine_col = tf.zeros(anchors.get_shape()[0])\n if positive_indices.get_shape()[0]!=0:\n # we call this something else b/c we can use it to get the positive classes matrix\n pos_iou_sine_col = tf.tensor_scatter_nd_add(iou_sine_col, positive_indices, tf.ones(tf.shape(positive_indices)[0]))\n if ignore_indices.get_shape()[0]!=0:\n iou_sine_col = tf.tensor_scatter_nd_sub(pos_iou_sine_col, ignore_indices, tf.ones(tf.shape(ignore_indices)[0]))\n \n #create the class targets (N, K+1)\n def _map_class(max_iou_indices, labels):\n \"\"\"Fast way to map indexes of boxes to corresponsing labels\"\"\"\n #add on index column\n max_iou_indices = tf.stack([tf.reshape(tf.convert_to_tensor([np.arange(0, tf.shape(all_anchors)[0])]), [1, tf.shape(max_iou_indices)[0]]),\n tf.cast(tf.expand_dims(max_iou_indices, axis=0), dtype=tf.int32)], axis=0)\n max_iou_indices = tf.transpose(tf.squeeze(max_iou_indices))\n broadcasted_labels = tf.broadcast_to(labels, [tf.shape(all_anchors)[0], tf.shape(random_labels)[0]])\n anchor_classes = tf.gather_nd(broadcasted_labels, temp)\n return anchor_classes\n\n if num_classes<=2:\n classification_targets = tf.transpose(tf.stack([pos_iou_sine_col, iou_sine_col], axis=0))\n else:\n assert labels is not None, \"Labels as tensor of ints for each bbox need to be passed if multiple classes\"\n # map the bbox index that each anchor overlaps with the most to the corresponsing label\n # this is very slow so need to come back to find a better way\n anchor_classes = _map_class(max_iou_indices, labels)\n \n # keep only the positive ones (swap with -1 since tensorflow make -1 become 0 and one-hot enconding)\n anchor_classes = tf.tensor_scatter_nd_update(anchor_classes, ignore_indices, tf.constant(-1, shape=tf.shape(ignore_indices)[0], dtype=tf.int32))\n anchor_classes = tf.tensor_scatter_nd_update(anchor_classes, negative_indices, tf.constant(-1, shape=tf.shape(negative_indices)[0], dtype=tf.int32))\n\n class_matrix = tf.one_hot(tf.cast(anchor_classes, tf.int32), num_classes)\n \n #add on the sine col\n classification_targets = tf.concat([class_matrix, tf.expand_dims(iou_sine_col, -1)], axis=1)\n \n #create regression targets (N, 4 + 1)\n #closest bounding box to each anchor\n gt_bboxes = tf.gather(bboxes, max_iou_indices) # (N, 4)\n \n regression_matrix = compute_gt_transforms(anchors, gt_bboxes, mean=0.0, std=0.2)\n #add on the sine col\n regression_targets = tf.concat([regression_matrix, tf.expand_dims(iou_sine_col, -1)], axis=1)\n return (classification_targets, regression_targets)", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n neg_elbo_sum = sum(log.get('neg_elbo', 0) for log in logging_outputs)\n recon_loss_sum = sum(log.get('recon_loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n KLz_sum = sum(log.get('KLz', 0) for log in logging_outputs)\n KLt_sum = sum(log.get('KLt', 0) for log in logging_outputs)\n KLtheta_sum = sum(log.get('KLtheta', 0) for log in logging_outputs)\n\n if 'nll_iw' in logging_outputs[0]:\n nll_iw_sum = sum(log.get('nll_iw', 0) for log in logging_outputs)\n metrics.log_scalar('nll_iw_s', nll_iw_sum / nsentences, \n nsentences, round=3, priority=4)\n metrics.log_scalar('nll_iw_t', nll_iw_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5) \n metrics.log_derived('ppl_iw', lambda meters: utils.get_perplexity(meters['nll_iw_t'].avg), priority=6)\n\n else:\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), \n sample_size, round=3, priority=3)\n\n metrics.log_scalar('neg_elbo_s', neg_elbo_sum / nsentences, \n nsentences, round=3, priority=4)\n metrics.log_scalar('recon_loss_s', recon_loss_sum / nsentences, \n nsentences, round=3, priority=4)\n\n metrics.log_scalar('neg_elbo_t', neg_elbo_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5)\n metrics.log_scalar('recon_loss_t', recon_loss_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5)\n\n metrics.log_scalar('KLz', KLz_sum / nsentences, nsentences, round=1, priority=8)\n metrics.log_scalar('KLt', KLt_sum / nsentences, nsentences, round=1, priority=8)\n metrics.log_scalar('KLtheta', KLtheta_sum / nsentences, nsentences, round=1, priority=8)\n\n metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['neg_elbo_t'].avg), priority=6)\n metrics.log_derived('recon_ppl', lambda meters: utils.get_perplexity(meters['recon_loss_t'].avg), priority=7)\n\n if 'active' in logging_outputs[0]:\n metrics.log_scalar('active', logging_outputs[0]['active'], weight=0, round=1, priority=10)\n metrics.log_scalar('percent', logging_outputs[0]['percent'], weight=0, round=2, priority=10)\n # metrics.log_scalar('nlow', logging_outputs[0]['nlow'], weight=0, priority=10)\n # metrics.log_scalar('nhigh', logging_outputs[0]['nhigh'], weight=0, priority=10)", "def __call__(self, inputs, targets):\n self._verify_data(inputs, targets)\n height, width = inputs.shape[2], inputs.shape[3]\n\n if self._num_classes is None:\n self._num_classes = self.network(inputs).shape[1]\n\n # Due to the unsupported Op of slice assignment, we use numpy array here\n targets = self._unify_targets(inputs, targets)\n\n attr_np = np.zeros(shape=(inputs.shape[0], targets.shape[1], height, width))\n\n cal_times = math.ceil(self._num_masks / self._perturbation_per_eval)\n\n for idx, data in enumerate(inputs):\n bg_data = data * 0 + self._base_value\n data = op.reshape(data, (1, -1, height, width))\n for j in range(cal_times):\n bs = min(self._num_masks - j * self._perturbation_per_eval,\n self._perturbation_per_eval)\n\n masks = self._generate_masks(data, bs)\n\n weights = masks * data + (1 - masks) * bg_data\n weights = self._activation_fn(self.network(weights))\n while len(weights.shape) > 2:\n weights = op.mean(weights, axis=2)\n\n weights = np.expand_dims(np.expand_dims(weights.asnumpy()[:, targets[idx]], 2), 3)\n\n attr_np[idx] += np.sum(weights * masks.asnumpy(), axis=0)\n\n attr_np = attr_np / self._num_masks\n\n return op.Tensor(attr_np, dtype=inputs.dtype)", "def get_batch(\n self, batch_indices, labels_important: bool\n ): # batch_indices is a list, e.g. one of labelled_set\n\n sequences, tags = [self.data[i] for i in batch_indices], [self.labels[i] for i in batch_indices]\n\n padded_sentences, lengths = pad_packed_sequence(\n pack_sequence(\n [torch.LongTensor(_) for _ in sequences], enforce_sorted=False\n ),\n batch_first=True,\n padding_value=self.padding_token,\n )\n padded_tags, _ = pad_packed_sequence(\n pack_sequence([torch.LongTensor(_) for _ in tags], enforce_sorted=False),\n batch_first=True,\n padding_value=self.empty_tag,\n )\n\n semi_supervision_mask = torch.ones(padded_tags.shape)\n\n if labels_important:\n # Fill in the words that have not been queried\n for j, sentence_tags in enumerate(padded_tags):\n sentence_index = batch_indices[j]\n for token_idx in range(int(lengths[j])):\n if token_idx in self.index.labelled_idx[sentence_index]:\n pass\n elif token_idx in self.index.temp_labelled_idx[sentence_index]:\n padded_tags[j, token_idx] = torch.tensor(self.temp_labels[sentence_index][token_idx])\n elif token_idx in self.index.unlabelled_idx[sentence_index]:\n padded_tags[j, token_idx] = torch.exp(torch.tensor(self.last_preds[sentence_index][token_idx]))\n semi_supervision_mask[\n j, token_idx\n ] = self.semi_supervision_multiplier\n else: # Padding\n continue\n\n return padded_sentences, padded_tags, lengths, semi_supervision_mask\n\n return padded_sentences, torch.tensor([]), lengths, semi_supervision_mask", "def collater(self, samples):\n batch = self.base_dataset.collater(samples)\n # In case of an empty batch, return an empty dict\n if len(batch) == 0:\n return {}\n auxiliary_targets_map = {}\n for i, s in enumerate(samples):\n auxiliary_targets_map[s['id']] = i\n sort_order = []\n for s_id in batch['id'].tolist():\n sort_order.append(auxiliary_targets_map[s_id])\n sort_order = torch.tensor(sort_order)\n auxiliary_target = torch.stack([s[\"auxiliary_target\"] for s in samples])\n batch['auxiliary_target'] = auxiliary_target.index_select(0, sort_order)\n return batch", "def build_summaries(self):\n\n # Loss summary.\n tf.summary.scalar('loss', self.loss)\n\n merged = tf.summary.merge_all()\n self.summary_op = merged\n tf.logging.info('summary op set')", "def build_summaries(self):\n\n # Loss summary.\n tf.summary.scalar('loss', self.loss)\n\n merged = tf.summary.merge_all()\n self.summary_op = merged\n tf.logging.info('summary op set')", "def calculate_batch_metrics(self):\n pass", "def make_target(self, state_index, traj):\n\n # The value target is the discounted root value of the search tree N steps\n # into the future, plus the discounted sum of all rewards until then.\n targets = []\n root_values = traj[\"root_value\"]\n rewards = traj[\"reward\"]\n child_visits = traj[\"child_visits\"]\n target_value = traj[\"target_value\"]\n obs = traj[\"cur_state\"]\n\n for current_index in range(state_index, state_index + self.unroll_step + 1):\n\n if current_index < len(root_values):\n targets.append((target_value[current_index], rewards[current_index], child_visits[current_index]))\n else:\n # States past the end of games are treated as absorbing states.\n targets.append((0, 0, []))\n return targets", "def compute_targets(rollout, action_space, last_r=0.0, gamma=0.9, lambda_=1.0):\n\n rollout = compute_advantages(rollout, last_r, gamma=gamma, lambda_=lambda_)\n rollout[\"adv_targets\"] = np.zeros((rollout.count, action_space.n))\n rollout[\"adv_targets\"][np.arange(rollout.count), rollout[\"actions\"]] = \\\n rollout[\"advantages\"]\n rollout[\"value_targets\"] = rollout[\"rewards\"].copy()\n rollout[\"value_targets\"][:-1] += gamma * rollout[\"vf_preds\"][1:]\n return rollout", "def Allreduce2(net, blobs, reduced_affix, gpu_indices):\n a, b = blobs\n gpu_a, gpu_b = gpu_indices\n a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))\n b_reduced = a_reduced.Copy(\n [],\n b + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n return a_reduced, b_reduced", "def test_where_targets():\n num_multi_targets = 0\n for where_targets_day in where_targets:\n # All inputs have a label\n assert np.all(where_targets_day.sum(axis=3).sum(axis=3).sum(axis=1).sum(axis=1) > 0)\n num_multi_targets += np.sum((where_targets_day.sum(axis=3).sum(axis=3).sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def aggregate(self, samples, input_features, dims, num_samples, support_sizes, batch_size=None,\n aggregators=None, name=None, concat=False, model_size=\"small\"):\n\n if batch_size is None:\n batch_size = self.batch_size\n\n # length: number of layers + 1\n # sampled node features xv\n hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]\n new_agg = aggregators is None\n if new_agg:\n aggregators = []\n # num_samples: each layer samples number [10,25]\n for layer in range(len(num_samples)): # in each layer\n if new_agg:\n dim_mult = 2 if concat and (layer != 0) else 1\n # aggregator at current layer\n if layer == len(num_samples) - 1:\n aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act=lambda x : x,\n dropout=self.placeholders['dropout'],\n name=name, concat=concat, model_size=model_size)\n else:\n aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1],\n dropout=self.placeholders['dropout'],\n name=name, concat=concat, model_size=model_size)\n aggregators.append(aggregator)\n else:\n aggregator = aggregators[layer]\n # hidden representation at current layer for all support nodes that are various hops away\n next_hidden = []\n # as layer increases, the number of support nodes needed decreases\n for hop in range(len(num_samples) - layer): # support node\n dim_mult = 2 if concat and (layer != 0) else 1\n neigh_dims = [batch_size * support_sizes[hop],\n num_samples[len(num_samples) - hop - 1],\n dim_mult*dims[layer]]\n h = aggregator((hidden[hop],\n tf.reshape(hidden[hop + 1], neigh_dims)))\n next_hidden.append(h)\n hidden = next_hidden\n return hidden[0], aggregators", "def test_model(preds, target):\n ### START CODE HERE (Replace instances of 'None' with your code) ###\n print(preds.shape, target.shape)\n total_log_ppx = np.sum(preds * tl.one_hot(target, preds.shape[-1]), axis= -1) # HINT: tl.one_hot() should replace one of the Nones\n print(total_log_ppx.shape)\n \n non_pad = 1.0 - np.equal(target, 0) # You should check if the target equals 0\n ppx = total_log_ppx * non_pad # Get rid of the padding\n\n log_ppx = np.sum(ppx) / np.sum(non_pad)\n ### END CODE HERE ###\n \n return -log_ppx", "def __init__(\n self,\n queries: Tensor,\n query_labels: Sequence[int],\n targets: Tensor,\n target_labels: Sequence[int],\n distance: str = 'cosine',\n metrics: Sequence[Union[str, ClassificationMetric]] = [\n 'binary_accuracy', 'f1score'\n ], # noqa\n tb_logdir: str = None,\n k: int = 1,\n matcher: Union[str, ClassificationMatch] = 'match_nearest',\n distance_thresholds: Optional[FloatTensor] = None):\n super().__init__()\n self.queries = queries\n self.query_labels: IntTensor = tf.cast(\n tf.convert_to_tensor(query_labels), dtype='int32')\n self.targets = targets\n self.target_labels = target_labels\n self.distance = distance\n self.evaluator = MemoryEvaluator()\n # typing requires this weird formulation of creating a new list\n self.metrics: List[ClassificationMetric] = ([\n make_classification_metric(m) for m in metrics\n ])\n self.k = k\n self.matcher = matcher\n if distance_thresholds is not None:\n self.distance_thresholds = distance_thresholds\n else:\n self.distance_thresholds = tf.constant([math.inf])\n\n if tb_logdir:\n tb_logdir = str(Path(tb_logdir) / 'index/')\n self.tb_writer = tf.summary.create_file_writer(tb_logdir)\n print('TensorBoard logging enable in %s' % tb_logdir)\n else:\n self.tb_writer = None", "def get_targets(self, states, j):\n a = self.get_optimal_action(states, j)\n a = np.expand_dims(a, axis=1)*1\n return {'gt_action': a}", "def update(self, phase, targets, outputs):\n iou, dice, dice_neg, dice_pos, _, _ = self.metric(outputs, targets)\n self.base_dice_scores[phase].append(dice)\n self.dice_pos_scores[phase].append(dice_pos)\n self.dice_neg_scores[phase].append(dice_neg)\n self.iou_scores[phase].append(iou)", "def _compute_sampled_logit_batched(self, input, target_idx, noise_idx):\n\n original_size = target_idx.size()\n\n # flatten the following matrix\n input = input.contiguous().view(-1, input.size(-1))\n target_idx = target_idx.view(-1)\n noise_idx = noise_idx[0, 0].view(-1)\n\n target_batch = self.emb(target_idx)\n # target_bias = self.bias.index_select(0, target_idx) # N\n target_bias = self.bias(target_idx).squeeze(1) # N\n target_score = torch.sum(input * target_batch, dim=1) + target_bias # N X E * N X E\n\n noise_batch = self.emb(noise_idx) # Nr X H\n # noise_bias = self.bias.index_select(0, noise_idx).unsqueeze(0) # Nr\n noise_bias = self.bias(noise_idx) # 1, Nr\n noise_score = torch.matmul(\n input, noise_batch.t()\n ) + noise_bias.t() # N X Nr\n return target_score.view(original_size), noise_score.view(*original_size, -1)", "def graph_helper(device, output,input,target):\n output = output.clone().squeeze()\n corrects = torch.zeros(output.shape[0])\n for i in range(output.shape[0]): # goes through each iteration\n outputi = output[i]\n golden_label = convert_to_bits(device, outputi, input)\n target = target.view(target.size(0), -1)\n corrects[i] += torch.amin(golden_label == target, dim=[0]).sum().item() # counts the number that are the same i.e. correct predictions\n correct = corrects.cpu().detach().numpy()\n return correct", "def evaluate_multiple(logits, targets, eval_cutoffs=[5, 10, 20], batch_wise=False):\n _, indices = torch.topk(logits, max(eval_cutoffs), -1)\n recall, mrr = [], []\n for k in eval_cutoffs:\n indices_k = indices[:, :k]\n targets_k = targets\n recall_k, mrr_k = get_recall(indices_k, targets_k, batch_wise), get_mrr(indices_k, targets_k, batch_wise)\n\n recall.append(recall_k)\n\n mrr.append(mrr_k)\n # print([[str(x.size()) for x in recall], str(targets.size()), str(indices_k.size())])\n return recall, mrr", "def _Moments(self, inputs, group_size):\n counts, mean_ss, variance_ss, _, = tf.nn.sufficient_statistics(\n inputs, axes=[0, 1, 2], keep_dims=False)\n self.accumulators.counts.Update(counts)\n self.accumulators.mean_ss.Update(mean_ss)\n self.accumulators.variance_ss.Update(variance_ss)\n # Distributed batch norm that computes sufficient statistics from group_size\n # replicas. This is useful when batch_size_per_replica is too small to\n # compute reliable sufficient statistics.\n if py_utils.use_tpu() and group_size > 1:\n group_assignment = None\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if num_shards is not None:\n if num_shards < group_size:\n raise ValueError('TPU shards={} less than bn_gropu_size={}.'.format(\n num_shards, group_size))\n if num_shards % group_size:\n raise ValueError(\n 'TPU shards={} not divisible by bn_group_size={}.'.format(\n num_shards, group_size))\n num_groups = num_shards // group_size\n group_assignment = []\n for g in range(num_groups):\n replica_ids = [g * group_size + i for i in range(group_size)]\n group_assignment.append(replica_ids)\n counts *= group_size\n mean_ss = tf.contrib.tpu.cross_replica_sum(mean_ss, group_assignment)\n variance_ss = tf.contrib.tpu.cross_replica_sum(variance_ss,\n group_assignment)\n # At each micro-step, batch_mean and batch_variance are computed\n # to normalize inputs. But they are not used to update moving_mean and\n # moving_variance variables until the last micro batch.\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n return mean, variance", "def make_categorical(n_rows_population=500, \n n_rows_peripheral=125000, \n random_state=None,\n aggregation=aggregations.Count):\n random = np.random.RandomState(random_state)\n\n population_table = pd.DataFrame()\n population_table[\"column_01\"] = random.randint(0, 10, n_rows_population).astype(np.str)\n population_table[\"join_key\"] = np.arange(n_rows_population)\n population_table[\"time_stamp_population\"] = random.rand(n_rows_population)\n\n peripheral_table = pd.DataFrame()\n peripheral_table[\"column_01\"] = random.randint(0, 10, n_rows_peripheral).astype(np.str)\n peripheral_table[\"join_key\"] = random.randint(0, n_rows_population, n_rows_peripheral) \n peripheral_table[\"time_stamp_peripheral\"] = random.rand(n_rows_peripheral)\n\n # Compute targets\n temp = peripheral_table.merge(\n population_table[[\"join_key\", \"time_stamp_population\"]],\n how=\"left\",\n on=\"join_key\"\n )\n\n # Apply some conditions\n temp = temp[\n (temp[\"time_stamp_peripheral\"] <= temp[\"time_stamp_population\"]) &\n (temp[\"column_01\"] != \"1\") &\n (temp[\"column_01\"] != \"2\") &\n (temp[\"column_01\"] != \"9\")\n ]\n\n # Define the aggregation\n temp = _aggregate(temp, aggregation, \"column_01\", \"join_key\")\n\n temp = temp.rename(index=str, columns={\"column_01\": \"targets\"})\n\n population_table = population_table.merge(\n temp,\n how=\"left\",\n on=\"join_key\"\n )\n\n del temp\n\n population_table = population_table.rename(\n index=str, columns={\"time_stamp_population\": \"time_stamp\"})\n\n peripheral_table = peripheral_table.rename(\n index=str, columns={\"time_stamp_peripheral\": \"time_stamp\"})\n\n # Replace NaN targets with 0.0 - target values may never be NaN!.\n population_table.targets = np.where(\n np.isnan(population_table['targets']), \n 0, \n population_table['targets'])\n\n return population_table, peripheral_table", "def _Moments(self, inputs, group_size):\n counts, mean_ss, variance_ss, _, = tf.nn.sufficient_statistics(\n inputs, axes=[0, 1, 2], keepdims=False)\n self.accumulators.counts.Update(counts)\n self.accumulators.mean_ss.Update(mean_ss)\n self.accumulators.variance_ss.Update(variance_ss)\n # Distributed batch norm that computes sufficient statistics from group_size\n # replicas. This is useful when batch_size_per_replica is too small to\n # compute reliable sufficient statistics.\n if py_utils.use_tpu() and group_size > 1:\n group_assignment = None\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if num_shards is not None:\n if num_shards < group_size:\n raise ValueError('TPU shards={} less than bn_gropu_size={}.'.format(\n num_shards, group_size))\n if num_shards % group_size:\n raise ValueError(\n 'TPU shards={} not divisible by bn_group_size={}.'.format(\n num_shards, group_size))\n num_groups = num_shards // group_size\n group_assignment = []\n for g in range(num_groups):\n replica_ids = [g * group_size + i for i in range(group_size)]\n group_assignment.append(replica_ids)\n counts *= group_size\n mean_ss = tf.tpu.cross_replica_sum(mean_ss, group_assignment)\n variance_ss = tf.tpu.cross_replica_sum(variance_ss, group_assignment)\n # At each micro-step, batch_mean and batch_variance are computed\n # to normalize inputs. But they are not used to update moving_mean and\n # moving_variance variables until the last micro batch.\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n return mean, variance", "def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()", "def regroup_dataset(labels):\r\n batch_y = labels.copy()\r\n for i, label in enumerate(labels):\r\n if label in [0, 15, 19]:\r\n batch_y[i]=0\r\n if label in [1, 2, 3, 4, 5,]:\r\n batch_y[i]=1\r\n if label in [6]:\r\n batch_y[i]=2\r\n if label in [7,8,9,10]:\r\n batch_y[i]=3\r\n if label in [11,12,13,14]:\r\n batch_y[i]=4\r\n if label in [16,17,18]:\r\n batch_y[i]=5\r\n \r\n print('regrouped label', batch_y.shape)\r\n return batch_y", "def reduce_metrics(logging_outputs) -> None:\n\n loss_sum = utils.item(sum(log.get(\"loss\", 0) for log in logging_outputs))\n ntokens = utils.item(sum(log.get(\"ntokens\", 0) for log in logging_outputs))\n nsentences = utils.item(\n sum(log.get(\"nsentences\", 0) for log in logging_outputs)\n )\n sample_size = utils.item(\n sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n )\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\"ntokens\", ntokens)\n metrics.log_scalar(\"nsentences\", nsentences)\n if sample_size != ntokens:\n metrics.log_scalar(\n \"nll_loss\", loss_sum / ntokens / math.log(2), ntokens, round=3\n )", "def compute_metrics(self, model, infer, prior, recon,\n targets, mask, lengths, order, args):\n metrics = dict()\n if type(lengths) != torch.Tensor:\n lengths = torch.FloatTensor(lengths).to(args.device)\n # Compute and store KLD and reconstruction losses\n metrics['kld_loss'] = model.kld_loss(infer, prior, mask).item()\n metrics['rec_loss'] = model.rec_loss(targets, recon, mask,\n args.rec_mults).item()\n\n for m in list(recon.keys()): targets[m][torch.isnan(targets[m])] = 0\n # Compute mean squared error in 2D space for each time-step\n tdims = targets[m].dim()\n mse = sum([(recon[m][0]-targets[m]).pow(2).sum(dim=list(range(2, tdims))) for m in list(recon.keys())])\n # Average across timesteps, for each sequence\n def time_avg(val):\n val[1 - mask.squeeze(-1)] = 0.0\n return val.sum(dim = 0) / lengths\n metrics['mse'] = time_avg(mse)[order].tolist()\n return metrics", "def _mer_update(preds: Union[str, List[str]], target: Union[str, List[str]]) ->Tuple[Tensor, Tensor]:\n if isinstance(preds, str):\n preds = [preds]\n if isinstance(target, str):\n target = [target]\n errors = tensor(0, dtype=torch.float)\n total = tensor(0, dtype=torch.float)\n for pred, tgt in zip(preds, target):\n pred_tokens = pred.split()\n tgt_tokens = tgt.split()\n errors += _edit_distance(pred_tokens, tgt_tokens)\n total += max(len(tgt_tokens), len(pred_tokens))\n return errors, total", "def _get_batch_of_transformed_samples(self, indices: np.array):\n x, y = super()._get_batch_of_transformed_samples(indices)\n x['dpool_index'] = _dynamic_pooling_index(\n x['length_left'],\n x['length_right'],\n self._fixed_length_left,\n self._fixed_length_right,\n self._compress_ratio_left,\n self._compress_ratio_right\n )\n return (x, y)", "def _get_batch_of_transformed_samples(self, indices: np.array):\n x, y = super()._get_batch_of_transformed_samples(indices)\n x['dpool_index'] = _dynamic_pooling_index(\n x['length_left'],\n x['length_right'],\n self._fixed_length_left,\n self._fixed_length_right,\n self._compress_ratio_left,\n self._compress_ratio_right\n )\n return (x, y)", "def constellaqc(denovo_groups, annotated_groups):\n known_feat = np.unique(annotated_groups.loc[:, 'group'])\n pred_group = np.unique(denovo_groups.loc[:, 'group'])\n\n scores = []\n\n for anno in known_feat:\n # anno_bool_index = annotated_groups.loc[:, 'group'] == anno\n anno_group_calls = denovo_groups.loc[annotated_groups.loc[:, 'group'] == anno, 'group'].values\n # print(anno, 'count: ', np.sum(anno_bool_index))\n score_row = []\n for denovo in pred_group:\n score_row.append(np.sum(anno_group_calls == denovo))\n scores.append(score_row)\n\n scores = pd.DataFrame(scores, index=known_feat, columns=pred_group)\n\n if params.debug is not None:\n print('Known Feature-Predicted Group Scoring Matrix:\\n')\n print(scores)\n\n anno_sum = []\n anno_no = []\n anno_error = []\n ni = []\n\n for anno in known_feat:\n anno_sum.append(np.sum(scores.loc[anno, :].values))\n anno_no.append(np.sum(scores.loc[anno, :].values != 0))\n anno_error.append(np.sum(scores.loc[anno, :].values != 0) - 1)\n ni.append(1)\n pred_sum = []\n pred_no = []\n pred_error = []\n nj = []\n\n for denovo in pred_group:\n pred_sum.append(np.sum(scores.loc[:, denovo].values))\n pred_no.append(np.sum(scores.loc[:, denovo].values != 0))\n pred_error.append(np.sum(scores.loc[:, denovo].values != 0) - 1)\n nj.append(1)\n\n anno_valid = np.array(anno_sum) - ni - np.array(anno_error)\n # pred_valid = np.array(pred_sum) - nj - np.array(pred_error)\n\n v_sum = np.sum(anno_valid)\n s_sum = np.sum(anno_error)\n c_sum = np.sum(pred_error)\n total = v_sum + s_sum + c_sum\n\n print('\\n\\nValid Call Rate: ', round(100 * (v_sum / total), 2), '%')\n print('Splitting Call Rate: ', round(100 * (s_sum / total), 2), '%')\n print('Clumping Call Rate: ', round(100 * (c_sum / total), 2), '%')", "def get_eval_metric_ops(targets, predictions, tensors):\n # TODO(seominjoon): yp should also consider no answer case.\n yp1 = tf.expand_dims(predictions['yp1'], -1)\n yp2 = tf.expand_dims(predictions['yp2'], -1)\n answer_mask = tf.sequence_mask(targets['num_answers'])\n start_correct = tf.reduce_any(\n tf.equal(targets['word_answer_starts'], yp1) & answer_mask, 1)\n end_correct = tf.reduce_any(\n tf.equal(targets['word_answer_ends'], yp2) & answer_mask, 1)\n correct = start_correct & end_correct\n em = tf.py_func(\n _enum_fn(_exact_match_score, dtype='float32'), [\n predictions['a'], targets['answers'], predictions['has_answer'],\n answer_mask\n ], 'float32')\n f1 = tf.py_func(\n _enum_fn(_f1_score, dtype='float32'), [\n predictions['a'], targets['answers'], predictions['has_answer'],\n answer_mask\n ], 'float32')\n\n eval_metric_ops = {\n 'acc1': tf.metrics.mean(tf.cast(start_correct, 'float')),\n 'acc2': tf.metrics.mean(tf.cast(end_correct, 'float')),\n 'acc': tf.metrics.mean(tf.cast(correct, 'float')),\n 'em': tf.metrics.mean(em),\n 'f1': tf.metrics.mean(f1),\n }\n\n for key in tensors:\n if key.startswith('skim_rate_'):\n skim_rate = tf.py_func(\n _enum_fn(\n lambda x: x,\n dtype='float32'), [tensors[key]], 'float32')\n eval_metric_ops[key] = tf.metrics.mean(skim_rate)\n\n return eval_metric_ops", "def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)" ]
[ "0.55227154", "0.5356214", "0.5345962", "0.53439975", "0.52835166", "0.5247036", "0.5236918", "0.52299756", "0.5213777", "0.5200016", "0.518836", "0.5177476", "0.5166062", "0.51618797", "0.51440424", "0.51244515", "0.5098735", "0.5092885", "0.5085687", "0.50846714", "0.50765085", "0.50583243", "0.5052692", "0.5036996", "0.5029611", "0.5020674", "0.50069505", "0.50055397", "0.5004131", "0.49966487", "0.49950776", "0.49811578", "0.49727893", "0.49642986", "0.49565023", "0.49534956", "0.4952817", "0.49493217", "0.4945282", "0.49426508", "0.49401528", "0.49289933", "0.4927605", "0.4922945", "0.4917563", "0.49165735", "0.48989654", "0.48947877", "0.48892304", "0.48809534", "0.48762915", "0.48757735", "0.48614872", "0.48570833", "0.48564756", "0.48540342", "0.48426986", "0.48426446", "0.4839334", "0.48281735", "0.48275408", "0.48218712", "0.48218223", "0.4818567", "0.48047227", "0.4801538", "0.48014095", "0.48001814", "0.47949332", "0.47948027", "0.47893798", "0.47892553", "0.47823536", "0.4776393", "0.4776393", "0.47707525", "0.47693658", "0.47677717", "0.4766171", "0.4755979", "0.47535834", "0.4749646", "0.47418338", "0.47418275", "0.4740288", "0.47373575", "0.47351635", "0.47329912", "0.47324076", "0.4731889", "0.47274464", "0.4727389", "0.4726233", "0.47244528", "0.4720132", "0.4715874", "0.47156137", "0.47156137", "0.4715425", "0.47135586", "0.4708553" ]
0.0
-1
Read a speech file by name.
def load_file(self, filename): path = os.path.join(self.path_to_sentences, filename) log.info('Reading file %s', path) _, int_sentence = scipy.io.wavfile.read(path) sent = int_sentence.T / np.iinfo(int_sentence.dtype).min if self.force_mono and sent.ndim == 2: return sent[1] else: return sent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFile(filename):\r\n speechFile = open(filename, \"r\")\r\n speech = speechFile.read()\r\n speechFile.close()\r\n return speech", "def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample", "def read(name):\n\n return open(name).read()", "def read( self, song_file_name ):\n song_file = open( song_file_name )\n content = song_file.read()\n return self.split( content )", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def read_file(name):\n with open(name, 'r') as my_file:\n return my_file.read().encode('utf-8')", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(*file_name: str) -> str:\n with open(os.path.join(HERE, *file_name)) as f:\n return f.read()", "def read(path):", "def read(self, name: str) -> str:\n path = self.get_path(name)\n if not os.path.exists(path):\n return \"\"\n\n with open(path, \"r\") as fh:\n return fh.read()", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def read(file_name):\n with io.open(os.path.join(os.path.dirname(__file__), file_name),\n encoding='utf-8') as f:\n return f.read()", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def process_audio_file(self, file_name):\n sig, sr = librosa.load(file_name, mono=True)\n return self._extract_function(sig, sr)", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()", "def load(name):\n with pyglet.resource.file(f'sounds/{name}.wav', 'rb') as f:\n return pygame.mixer.Sound(f)", "def read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as fp:\n return fp.read()", "def read_file(self, file_name: str)-> str:\n if not os.path.exists(file_name):\n raise IOError(\"The File {} doesn't exists!\".format(file_name))\n\n with open(file_name) as file:\n return file.read().strip()", "def process_file(self, file_name):\n logger.info(f'Recognising speech for {file_name}')\n wf = wave.open(file_name, \"rb\")\n # Check to see if the audio file can be read by the Vosk model\n if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != \"NONE\":\n raise Exception(f'Invalid file format for {file_name}')\n rec = KaldiRecognizer(self.model, wf.getframerate())\n results = []\n while True:\n data = wf.readframes(config.frame_to_read)\n # If the data we have read is empty then we are at the end of the file\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())\n # Result can contain an empty text string but no result list\n if len(result['text']) > 0:\n # If we reach here we have accepted the translation of a section of text\n results.extend(result['result'])\n result = json.loads(rec.FinalResult())\n # Add to results list\n if len(result['text']) > 0:\n results.extend(result['result'])\n logger.info(f'Processed speech, captured {len(results)} results')\n return results", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def read(self, filename):\n raise NotImplementedError", "def read_file(file_name):\n directory = get_directory()\n file_path = os.path.join(directory, file_name + \".txt\")\n with open(file_path, \"r\") as file:\n read_file = json.load(file)\n return read_file", "def read_file(file_name):\r\n\r\n if file_name.find('.md') == -1:\r\n file_name += '.md'\r\n\r\n with open(file_name, 'r', encoding='utf-8') as file:\r\n file_data = file.read()\r\n\r\n return file_data", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def read_file(self, fname, name):\r\n self.filename = name\r\n if fname != \".\":\r\n self.fname = f\"{fname}\\\\\"\r\n self.pathread = os.path.join(self.p, self.fname)\r\n else:\r\n self.pathread = self.p\r\n try:\r\n self.path = os.path.join(self.pathread, self.filename)\r\n with open(self.path, 'r') as read:\r\n self.data = read.readlines()\r\n except Exception as error:\r\n return error\r\n finally:\r\n send = \" \".join(self.data)\r\n return send", "def readFromFile(filename):\n raise NotImplementedError", "def load_sample(filename):\n return open(os.path.join(SAMPLES, filename)).read()", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def read_from_file(self, filename: str) -> None:", "def read_file(file_name):\n return open(os.path.join(os.path.dirname(os.path.dirname(__file__)), file_name)).read()", "def read_audio_from_path(path: str) ->Optional[TorchAudioTuple]:\n bytes_obj = get_bytes_obj_from_path(path)\n return read_audio_from_bytes_obj(bytes_obj)", "def readFile(self, name):\n\t\ttry:\n\t\t\tf = open(name, 'r')\n\t\t\tlines = f.readlines()\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\treturn None\n\n\t\treturn join(lines, \"\")", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def read(self, path: str) -> str:\n raise NotImplementedError", "def read(self, filename: Union[str, Path]) -> Music:\n return read_musicxml(filename)", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def read_file(path):\n # Mystery arguments:\n strictness = False\n # Read the string:\n return _iterate_bibtexsource(_bibtex.open_file(path, strictness))", "def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n\")\n content = file.read()\n return content", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def read_speeches(filename):\n\n # Open a speech file\n speech_file = open(filename)\n\n # Create a new dictionary\n speech_dict = {}\n\n # Iterate over lines\n for line in speech_file:\n # Replace whitespace, including /n, at the end of a line with a single space\n line = line.rstrip() + ' '\n\n # Given that a title begins with #\n if line.startswith('#'):\n # Remove '# ' at the beginning and ': ' at the end, to be used as a title\n title = line[2:-2]\n # Assign the tile as a key in the dictionary\n speech_dict[title] = ''\n # A speech line does not begins with #\n else:\n # Not begins with [ either\n if line.startswith('[') is False:\n # Append the speech line to the already existing string of the corresponding title\n # The tile variable is kept from the previous loop(s)\n speech_dict[title] += line\n\n # Close the file\n speech_file.close()\n\n return speech_dict", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n ret = self.read_file_object(file_obj, file_format=file_format)\n file_obj.close()\n return ret", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def read_text(self, name: str) -> str:\n raise NotImplementedError()", "def get_text(self, file_number):\n\n with io.open(self.file_name.format(file_number), 'rb') as audio_file:\n content = audio_file.read()\n audio = types.RecognitionAudio(content=content)\n response = self.client.recognize(self.config, audio)\n texts = self._format_response(response)\n return texts", "def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)", "def read_file(path):\n assert_is_string(path)\n f = open(path, \"r\")\n data = f.read()\n f.close()\n return data", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n return self.read_file_object(file_obj, file_format=file_format)", "def read_from_file(path):\n with io.open(path, 'rb') as ios:\n return read(ios)", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def do_readsourcefile(self, file_name):\r\n self.file_name = file_name\r\n f = open(self.file_name, \"r\")\r\n print(f.read())\r\n f.close()", "def load_text_file(file_name: str) -> str:\r\n try:\r\n with open(file_name, encoding='windows-1251') as file_object:\r\n return file_object.read()\r\n except FileNotFoundError as err:\r\n print(f\"{err}\\n\"\r\n f\"Please make sure the file you are trying to open exists!\")\r\n quit()", "def open_file(file_name):\n pass", "def load_audio(path):\r\n if path[-4:] == \".wav\":\r\n fs, data = load_wav(path)\r\n\r\n elif path[-4:] == \".mp3\":\r\n fs, data = load_mp3(path)\r\n\r\n else:\r\n raise ValueError(\"Wrong file format, use mp3 or wav\")\r\n\r\n return fs, data", "def transcode_from_file(path, sample_rate):\n # Instantiates a client\n client = speech.SpeechClient()\n # Loads the audio into memory\n with io.open(path, 'rb') as audio_file:\n content = audio_file.read()\n audio = types.RecognitionAudio(content=content)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=sample_rate,\n language_code='ja-JP')\n # Detects speech in the audio file\n response = client.recognize(config, audio)\n try:\n logger.debug(\n f'Transcript: {response.results[0].alternatives[0].transcript}')\n return response.results[0].alternatives[0].transcript\n except Exception as e:\n logger.warning(f'Caught exception:{e}')\n return \"\"", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()", "def read(self, filename, normalize=True):\n if self.gcp == False:\n\n\t\t filepath = self.mixed_dir + filename\n\t\t sf, time_signal = wavfile.read(filepath, mmap=True)\n\n else:\n\n blob = list(self.bucket.list_blobs(prefix=filename))[0]\n # download blob as string\n file_as_string = blob.download_as_string()\n sf, time_signal = wavfile.read(io.BytesIO(file_as_string), mmap=True)\n\n\t\tif normalize == True:\n\t\t\t\n # normalization, assuming 2^15 is the highest possible quantization\n\t\t\ttime_signal = time_signal/np.power(2,15)\n\n\t\treturn time_signal", "def read(*parts):\n with codecs.open(os.path.join(HERE, *parts), \"rb\", \"utf-8\") as f:\n return f.read()", "def read(*parts):\n with codecs.open(os.path.join(HERE, *parts), \"rb\", \"utf-8\") as f:\n return f.read()", "def read(*parts):\n with codecs.open(os.path.join(HERE, *parts), \"rb\", \"utf-8\") as f:\n return f.read()", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def read_file(self, path):\n with open(path) as f:\n return self.read_file_obj(f)", "def _read_one_line_file(name):\n with open(name, \"rb\") as file:\n data = file.read()\n return data.decode('utf-8').strip()", "def readfile(path: Union[str, Path]) -> str:\n with open(path) as infile:\n return infile.read()", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def read(path):\n with open(path) as f:\n return f.read()", "def read(self, path, size, offset, fh, *args, **pargs):\n with self.rwlock:\n if(path in self._open_subtracks):\n real = False\n # Update the last accessed time.\n self._open_subtracks[path]['Last Access'] = time.time()\n # Store the requested offset.\n self._open_subtracks[path]['Positions'][fh] = offset\n else:\n real = True\n if(real):\n # For all non-FLACCue files, just access it normally.\n os.lseek(fh, offset, 0)\n return os.read(fh, size)\n # Wait for the file to finish opening.\n while(True):\n with(self.rwlock):\n self._open_subtracks[path]['Last Access'] = time.time()\n if(self._open_subtracks[path]['Audio'] is not None):\n audio = self._open_subtracks[path]['Audio']\n break\n time.sleep(0.1)\n # Return the data requested.\n if(offset > len(audio)):\n # If we're looking near the end of the file,\n # handle the fact that compression could change the size.\n reported_size = self.getattr(path)['st_size']\n if(offset < reported_size):\n offset = len(audio) - (reported_size - offset)\n return audio[offset:offset+size].tobytes()", "def choose_song(my_name):\n my_name = my_name.split(ET)[ZERO]\n path = ''\n for filename in os.listdir(str(Path.cwd()) + '/songs'):\n name = filename.split('\\\\')[-1]\n name = name.split('.')[ZERO]\n name = name.split(ET)[ZERO]\n if filename.endswith(\".wav\") and my_name == name:\n path = str(Path.cwd()) + r'\\songs\\%s' % filename\n if os.path.exists(path):\n return path\n else:\n return ERROR", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def load_wav_file(file_path: str):\n rate, data = wavfile.read(file_path)\n return rate, data", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n students = f.read().splitlines()\n return students", "def read(self, file_name, *, file_type=None):\n self.logger.info('Reading KG correlations from %s',file_name)\n with make_reader(file_name, file_type, self.logger) as reader:\n self._read(reader)", "def open_and_read_file(file_path):\n\n # your code goes here\n file_name = (open(file_path)).read()\n return file_name", "def load_scipy (self, path):\r\n sr, sound = scipy.io.wavfile.read(path)\r\n \r\n return sound", "def read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as f:\n return f.read()", "def read(*parts):\n return codecs.open(os.path.join(HERE, *parts), 'r').read()", "def read_file(file_path: str) -> str:\n try:\n with open(file=file_path, mode='r', encoding=\"utf8\") as f:\n return f.read()\n\n except FileNotFoundError:\n raise FileNotFoundError(f'No text file was found at location {file_path}')", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def read(*args):\n return io.open(os.path.join(HERE, *args), encoding=\"utf-8\").read()", "def get(name):\n\n filename = find(name)\n if filename == None:\n return name\n return open(filename).read()", "def read(ftype, inDir, inSuffix, startTime, endTime):\n\n\tif ftype == 'ryan': return readRyan(inDir, inSuffix, startTime, endTime)\t\n\telif ftype == 'segmotion': return readSegmotion(inDir, inSuffix, startTime, endTime)\n\telif ftype == 'probsevere': return readProbSevere(inDir, inSuffix, startTime, endTime)", "def read_file(path): #TODO implementme, handling paths more intelligently\n f = open(path, \"r\")\n string = f.read()\n f.close()\n return string", "def file_read(path: str) -> str:\n if os.path.isfile(path):\n while True:\n try:\n with open(path, \"r\") as fptr:\n return fptr.read()\n except PermissionError:\n pass\n return \"\"", "def read(infile):\n _, ext = os.path.splitext(infile)\n ext = ext.strip('.')\n return read_funcs[ext](infile)", "def get_data(path):\n if path.endswith('.mp3'):\n path = prepare_file(path, path.rstrip('mp3')+'wav')\n x, sr = librosa.load(path, duration=30)\n\n else:\n x, sr = librosa.load(path, duration=30)\n directory, file_name = os.path.split(path)\n return x, sr, file_name", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def load_file(file_name):\n with open(file_name,\"r\") as f:\n return f.readlines()", "def read_file_name(command):\n try:\n my_file.read_filename(command[1])\n except FileNotFoundError:\n print('The file {} cannot be found'.format(command[1]))", "def read(*parts: str) -> str:\n with codecs.open(os.path.join(HERE, *parts), \"rb\", \"utf-8\") as f:\n return f.read()", "def load_sound(self, filename):\n return mixer.Sound(os.path.join(\"sounds\", filename))", "def read_raw_text(self, raw_path: str = None):\n\n if raw_path.rsplit(\".\")[-1] == \"json\":\n self.import_from_json(raw_path)\n return\n\n if raw_path is not None:\n self.raw_path = raw_path\n\n if self.raw_path is None:\n raise Exception(\"Found no file to read\")\n\n file = open(raw_path, \"r\")\n raw = file.read()\n file.close()\n\n self.sentences += get_sentences(raw, self.cM.use_spacy)\n\n self.loaded(False)", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read(*parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with codecs.open(os.path.join(here, *parts), 'r', 'utf-8') as f:\n return f.read()", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]" ]
[ "0.77624923", "0.6965475", "0.67104244", "0.6667067", "0.66525364", "0.66401553", "0.6530066", "0.64281446", "0.64041317", "0.6391962", "0.6273382", "0.62610954", "0.62465245", "0.6240044", "0.62310964", "0.62310964", "0.6208363", "0.62026536", "0.61815035", "0.61784554", "0.616423", "0.6095564", "0.608848", "0.6074764", "0.6071196", "0.60607505", "0.6059605", "0.6046869", "0.6046189", "0.6024203", "0.60233617", "0.59887475", "0.5956835", "0.5955362", "0.5923415", "0.5913971", "0.5886581", "0.5864241", "0.58488756", "0.58398986", "0.5819571", "0.5815933", "0.5798864", "0.57825136", "0.57446694", "0.57356894", "0.5724943", "0.57187754", "0.5700082", "0.57000375", "0.56968915", "0.56940025", "0.5691814", "0.56839675", "0.568049", "0.56789696", "0.5669831", "0.56623876", "0.5658236", "0.56574994", "0.5650132", "0.56428987", "0.5639713", "0.5639713", "0.5639713", "0.56361693", "0.56266683", "0.5625141", "0.5623896", "0.5617867", "0.56091195", "0.56002355", "0.5599543", "0.5598412", "0.5597633", "0.5597633", "0.55967134", "0.55965596", "0.5596277", "0.5594661", "0.55902755", "0.55893236", "0.55865556", "0.5581658", "0.5580764", "0.55773854", "0.55629325", "0.5561941", "0.55579484", "0.55544066", "0.55531937", "0.55520433", "0.55492836", "0.5544914", "0.5528629", "0.55284315", "0.5524706", "0.5523024", "0.5514575", "0.551436", "0.5504817" ]
0.0
-1
Return a list of all the files in the corpus.
def files_list(self): path = os.path.join(self.path_to_sentences) log.info("Listing files from directory: %s", path) all_files = os.listdir(path) wav_files_only = [filename for filename in all_files if filename.lower().endswith('.wav')] return wav_files_only
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def get_corpus_filenames():\n corpus_file_list = []\n corpus_files = csv.reader(open('./gothic_text_data.csv'), delimiter=\",\", quotechar='\"')\n\n for row in corpus_files:\n if row[0] != \"Title\":\n corpus_file_list.append(row[14])\n return corpus_file_list", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def get_corpus():\n corpus_raw = []\n files = os.listdir()\n\n for name in files:\n if \".txt\" in name:\n try:\n file = open(name, \"rt\", encoding='utf8')\n data_org = file.read()\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .txt file. Please ensure that the text is UTF-8 encoded.\")\n elif \".docx\" in name:\n try:\n data_org = docx2txt.process(name)\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .docx file. Please ensure that the text is UTF-8 encoded.\")\n else:\n print(\"ERROR: Cannot print non .txt or .docx files. Please verify the input folder's contents.\")\n\n return corpus_raw", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def get_files_for_processing():\n all_files = os.listdir(read_path)\n txt_files = list(filter(lambda filename: fnmatch.fnmatch(filename, '*.txt'), all_files))\n return txt_files", "def listFiles(self):\n pass", "def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]", "def get_all_files(cwd):\n return os.listdir(cwd)", "def open_files(directory):\n documents = []\n for fl in (os.listdir(directory)):\n if fl.endswith('.txt'):\n fl_path = os.path.join(directory, fl)\n with open(fl_path, 'r') as f:\n full_text = f.read()\n documents.append(full_text)\n return documents", "def retrieve_all_files(self):\n result = utilities.rscandir(\n self.folder(), ignore_dirs=[\".git\"])\n\n return result", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def get_files(self):\r\n return self._filelist", "def list_all(train_dir):\r\n path = train_dir\r\n result = []\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n result.append(fn)\r\n return result", "def files(self):\n all_files = set()\n for label in self.filesets:\n all_files.update(self.filesets[label])\n return all_files", "def get_all_file_paths_labels(data_root: str) -> list:\n\n speaker_dirs = os.listdir(data_root)\n all_files = []\n i = 0\n for d in speaker_dirs:\n files = glob.iglob(data_root + '/' + d + '/**/*.wav', recursive=True)\n files = [[f, i] for f in files]\n all_files += files\n i += 1\n all_files = sorted(all_files, key=lambda x:x[0], reverse=False)\n\n return all_files", "def all_files(self):\n return self.common_files + self.collapse_human_files + self.reseq_human_files + self.sirv_files", "def get_doc_files(extensions=MARKDOWN_EXTENSIONS + STATIC_ASSET_EXTENSIONS):\n file_list = []\n # doc files on toplevel\n for ext in extensions:\n file_list += config[\"topdir\"].glob('*' + ext)\n # doc files in include dirs\n for incdir in config['incdirs']:\n for ext in extensions:\n file_list += config[\"topdir\"].joinpath(incdir).rglob('*' + ext)\n return file_list", "def get_all_files(self):\n dp = FileSystemDataProvider.FileSystemDataProvider(self.folder)\n filenames = dp.getFileNames()\n htmlOut = \"available files:\"+\", \".join(filenames)\n return htmlOut", "def lsFiles(ruta = getcwd()):\r\n files = [arch.name for arch in scandir(ruta) if arch.is_file()]\r\n return files", "def files_list(directory: str) -> list:\n files = os.listdir(directory)\n\n return files", "def extract_files(self) -> list:\n pass", "def getFiles(self):\n return self.model.getFiles()", "def get_dataset_filelist(dataset):\n\n query = {\n \"_source\": {\n \"includes\": [\"info.directory\", \"info.name\"]\n },\n \"query\": {\n \"match_phrase_prefix\": {\n \"info.directory.analyzed\": dataset\n }\n }\n }\n\n es = CEDAElasticsearchClient()\n results = scan(es, query=query, index='opensearch-files')\n\n file_list = [\n os.path.join(\n item['_source']['info']['directory'],\n item['_source']['info']['name']\n ) for item in results\n ]\n\n return file_list", "def load_texts_from_directory(path_to_documents, subset=None):\n files = sorted(os.listdir(path_to_documents))\n if subset is not None:\n files = files[subset[0]:subset[1]]\n docs = []\n keywords = []\n filenames = []\n for f in files:\n filenames.append(f)\n doc = ''\n with open(os.path.join(path_to_documents, f), 'r', encoding='utf-8') as file:\n for i, l in enumerate(file.readlines()):\n if i is 0:\n keywords.append(l.replace('%%%', '').strip().split('|')[:-1])\n else:\n doc += l.strip() + ' '\n file.close()\n docs.append(doc)\n return docs, keywords, filenames", "def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret", "def load_files(directory):\n # first load the files from the corpus directory into memory\n corpus = dict()\n # Return a list specifiying a directory given by 'path'.\n for filename in os.listdir(directory):\n file_p = os.path.join(directory, filename)\n if os.path.isfile(file_p) and filename.endswith(\".txt\"):\n # os.path.join(path, *path) -- concatenation of path and *paths with exactly one directory separator (os.sep)\n with open(file_p, \"r\", encoding='utf8') as file:\n corpus[filename] = file.read()\n return corpus", "def get_files(a_dir):\n gf = []\n for file in os.listdir(a_dir):\n if file.endswith(\".txt\"):\n gf.append(a_dir + \"/\" + str(file))\n if len(gf) != 0:\n return gf\n else:\n print \"Error: Cannot find TXT files in subdirectory!\\n\\t (%s)\" % a_dir", "def file_list(start_dir):\n file_list = []\n for root, dirs, files in os.walk(start_dir):\n for f in files:\n if f[0] != '.':\n file_list.append(f)\n return file_list", "def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]", "def _get_all_files(self):\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(self.dataset_dir,\n file_pattern % self.split_name)\n return tf.gfile.Glob(file_pattern)", "def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]", "def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def getAllFiles(dataSets):\n\n\tfiles = []\n\tfor dataSet in dataSets:\n\t\tif dataSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid data set: \" + dataSet)\n\n\t\tfiles += sorted(map(lambda x: _dataSets[dataSet] + \"test/\" + x,\n\t\t\t\t\t\t\tfilter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[dataSet] + \"test/\"))))\n\t\tfiles += sorted(map(lambda x: _dataSets[dataSet] + x,\n\t\t\t\t\t\t\tfilter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[dataSet]))))\n\n\treturn files", "def get_files(self):\n return self.ebook_file.get_files()", "def listFiles(root):\n for dirpath, dirnames, filenames in os.walk(root):\n for file in filenames:\n yield os.path.join(dirpath, file)", "def extract_corpus(corpus_dir = \"articles\"):\n corpus = {}\n num_documents = 0\n for filename in os.listdir(corpus_dir):\n with open(os.path.join(corpus_dir, filename)) as f:\n corpus[filename] = re.sub(\"[^\\w]\", \" \", f.read()).split()\n return corpus", "def get_list_of_files_in_dir(file_list_path=None):\n return os.listdir(file_list_path)", "def list_all_files(root):\n local_files = []\n for path, dirs, files in os.walk(os_path(root), followlinks=False):\n if len(files) > 0:\n path_wo_root = path[(len(root) + len(slash)):] # remove root part\n local_files.extend([os.path.join(path_wo_root, f) for f in files])\n return local_files", "def get_files(self) -> Set[str]:\n return ({f for f in os.listdir(self.get_directory())\n if os.path.isfile(os.path.join(self.get_directory(), f))} if self.directory_exists(self.get_directory()) else set())", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def list_files(path):\n ls_output = os.listdir(path)\n return ls_output", "def readfiles(dir):\n\n pwd = os.getcwd()\n os.chdir(dir)\n\n files = os.listdir('.')\n files_text = []\n\n for i in files:\n try:\n f = open(i, 'r', encoding='utf-8')\n files_text.append(f.read())\n except:\n print(\"Could not read %s.\" % i)\n finally:\n f.close()\n\n os.chdir(pwd)\n\n return files_text", "def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files", "def filelist(root):\n allfiles = []\n for path, subdirs, files in os.walk(root):\n for name in files:\n if name.find(\"xls\") >= 0:\n allfiles.append(os.path.join(path, name))\n return allfiles", "def get_filelist(import_path, extension):\n filelist = []\n for root, dirs, files in os.walk(import_path):\n filelist += glob.glob(os.path.join(root, '*.' + extension))\n return filelist", "def collect_documents(self):\n documents = []\n ignored = []\n for path in self.paths:\n try:\n current_document = MAE_Document(path)\n except UnsupportedMIMETypeError as e:\n ignored.append(str(e))\n else:\n documents.append(current_document)\n if ignored:\n print \"Some files were ignored:\"\n for file in ignored:\n print \"\\t%s\" % file\n return documents", "def all_files(self) -> List[IdentifiedFile]:\n return [self.main_file, *self.labware_files, *self.data_files]", "def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l", "def GetFileNames(self):\n return self.files", "def files(self):\n return self._files", "def listFiles(path):\n outputList = []\n for root, dirs, files in os.walk(path):\n for f in files:\n outputList.append('/'.join([root, f]))\n return outputList", "def files_in_dir(path):\n return os.listdir(path)", "def files(self):\r\n return self._files", "def GetFiles(path):\n\n retfiles = []\n target_paths = []\n for root, dirs, files in os.walk(path):\n if root == path:\n target_paths = map(lambda d: os.path.join(root, d), dirs)\n continue\n if root not in target_paths:\n continue\n for f in files:\n if f[-4:] != '.txt':\n continue\n retfiles.append(os.path.join(root, f))\n return retfiles", "def list_all_files(dir):\n\n result = []\n for root, _, filenames in os.walk(dir):\n for name in filenames:\n filename, ext = os.path.splitext(name)\n if ext == '.cs' or ext == '.xaml':\n result.append(os.path.join(root, name))\n return result", "def get_files_to_be_indexed(self):\n\t\tfiles = self.get_all_files()\n\t\tfiles_list = []\n\t\tfor name in files:\n\t\t\tif(name.split('.')[-1] in self.accepted_formats and os.stat(os.path.join(self.root, name)).st_size < 5000000):\n\t\t\t\tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def get_files_list(tree):\n result = list()\n for (dir_path, _, file_names) in walk(tree):\n if file_names:\n for file in file_names:\n if file.lower().endswith(('.png', '.jpg', '.jpeg')):\n result.append(path.join(dir_path, file))\n\n return result", "def get_filenames(self, path):\n files_list = list()\n for filename in os.listdir(path):\n files_list.append(os.path.join(path, filename))\n return files_list", "def get_filenames(self, path: str):\n files_list = []\n for filename in os.listdir(path):\n files_list.append(os.path.join(path, filename))\n return files_list", "def iter_documents(top_directory):\n for root, dirs, files in os.walk(top_directory):\n for file in filter(lambda file: file.endswith('.txt'), files):\n document = open(os.path.join(root, file)).read() # read the entire document, as one big string\n yield utils.tokenize(document, lower=True) # or whatever tokenization suits you", "def getFilesList(data):\n\n filesList = []\n\n if os.path.isdir(data):\n logging.info(\"Using files from \" + data)\n #Create a list containing the file names\n for root, dirs, files in os.walk(data):\n for filename in files:\n filesList.append(os.path.join(root,filename))\n\n else:\n logging.info(\"Using file \" + data)\n filesList.append(os.path.abspath(data))\n\n return sorted(filesList)", "def getGlobusFiles(self):\n\t\treturn self.transfer_client.operation_ls(self.transfer_client.endpoint_search(DATA_ENDPOINT_NAME)[0]['name'])", "def get_filenames(self):\n return self.filenames", "def get_all_pdfs():\n\n return filter(lambda f: fnmatch.fnmatch(f, '*.pdf'), os.listdir(cwd))", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def get_dataset_files(dataset_info, mode, root):\n basepath = dataset_info.basepath\n base = os.path.join(root, basepath, mode)\n\n # usually of form '{}-of-{}.tfrecord'\n files = sorted(os.listdir(base))\n\n return [os.path.join(base, file) for file in files]", "def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []", "def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)", "def _get_subject_files(self):\n from itertools import chain\n\n subjsf = fetch_one_file(self.ica_dir, self._subjects_fname)\n mat_file = sio.loadmat(subjsf)['files']\n return [f.strip() for f in list(chain.from_iterable(chain.from_iterable(chain.from_iterable(mat_file))))]", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []", "def getListOfFiles(directory):\n listOfFiles = []\n for path, dirs, files in os.walk(directory):\n for eachFile in files:\n filePath = os.path.join(path, eachFile)\n listOfFiles.append(filePath)\n return listOfFiles", "def _get_all_files(dir_path):\n for root, _, filenames in os.walk(dir_path):\n for name in filenames:\n target = os.path.join(root, name)\n yield target", "def get_filenames():\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames", "def getTxtFileList(folder_path='.'):\n files = [os.path.splitext(f)[0] for f in os.listdir(folder_path) if os.path.isfile(f) and os.path.splitext(f)[1] == '.txt']\n return files", "def get_files(self):\n return self._files.values()", "def list_all_files(in_dir):\n\n for dirname, dirs, files in os.walk(in_dir):\n for filename in files:\n yield op.join(dirname, filename)", "def listfiles(self, *path):\n dir = self.localpath(*path)\n files = []\n for root, dirs, fnms in os.walk(dir):\n for f in fnms:\n if f[-5:] == '.info' and os.path.exists(os.path.join(root, f[:-5])):\n try:\n _open_file_info(os.path.join(root, f))\n files.append(\n path + tuple(_split_path(\n os.path.relpath(os.path.join(root, f[:-5]), start=dir)\n )))\n except ValueError:\n pass\n return files", "def getContentFiles():\n contentFiles = []\n for contentDir, subDirs, filenames in os.walk(sourceDir, followlinks=True):\n if shouldIgnore(contentDir):\n subDirs[:] = []\n continue\n for filename in filenames:\n if not shouldIgnore(filename):\n cf = ContentFile(os.path.join(contentDir, filename))\n log(`cf.path`)\n contentFiles.append(cf)\n return contentFiles", "def get_list_from_all_files(self):\n if not self.is_dir:\n from parsing_exceptions import FileException\n raise FileException(\"Il metodo non puo' essere invocato solo su directory\")\n\n converted_files = []\n\n for f in self.files:\n converted_files.append(f.get_list_from_file())\n\n return converted_files", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def get_data_files(dirname):\r\n flist = []\r\n for dirpath, _dirnames, filenames in os.walk(dirname):\r\n for fname in filenames:\r\n flist.append(osp.join(dirpath, fname))\r\n return flist", "def get_file_names():\n all_file_names = []\n cwd = os.getcwd()\n # Change to dir with result files to analyze\n os.chdir(args.dir)\n \n for file in glob.glob(\"*.csv\"):\n all_file_names.append(file)\n\n # Return to current working directory\n os.chdir(cwd)\n return all_file_names", "def read_all_files():\n paths = get_all_recording_paths()\n\n return read_by_paths(paths)", "def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def files(self):\n return self._files.items()", "def getcontent(self):\n filelist=[]\n if len(self.filelist) == 0:\n return \"empty directory\"\n else:\n for file in self.filelist:\n filelist.append(file)\n return filelist", "def list_of_files(self, dirname): \n\n list_of_files = os.listdir(dirname)\n all_files = []\n\n for entry in list_of_files:\n full_path = os.path.join(dirname, entry)\n\n if os.path.isdir(full_path):\n all_files = all_files + self.list_of_files(full_path)\n else:\n all_files.append(full_path)\n\n return all_files", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def load_files(directory):\n corpus_dict = dict()\n filenames = os.listdir(directory)\n for file in filenames:\n path = os.path.join('corpus',file)\n with open(path,encoding='utf-8') as f:\n text = f.read()\n corpus_dict[file] = text\n \n\n return corpus_dict", "def files(pathspec):\n\treturn [f for f in glob.glob(pathspec)]", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def _getFilesForDataSets(self, dataSets):\n\n if len(dataSets) == 0:\n return []\n\n dataSetFiles = []\n for dataSet in dataSets:\n content = contentProvider.getContent(dataSet.getDataSetCode())\n nodes = content.listMatchingNodes(\"original\", \".*\\.fcs\")\n if nodes is not None:\n for node in nodes:\n fileName = node.tryGetFile()\n if fileName is not None:\n fileName = str(fileName)\n if fileName.lower().endswith(\".fcs\"):\n dataSetFiles.append(fileName)\n\n if len(dataSetFiles) == 0:\n self._message = \"Could not retrieve dataset files!\"\n self._logger.error(self._message)\n\n # Return the files\n return dataSetFiles", "def files(self):\r\n return files.Files(self)" ]
[ "0.7367868", "0.7332538", "0.7282746", "0.7256595", "0.72560257", "0.7246942", "0.7218564", "0.7056109", "0.7021829", "0.69263995", "0.68938583", "0.6842887", "0.68216294", "0.67668146", "0.6749649", "0.6749649", "0.67369485", "0.67292917", "0.67268735", "0.67252684", "0.6723229", "0.67189837", "0.6713074", "0.67100596", "0.6684722", "0.665102", "0.66364205", "0.66352916", "0.66338176", "0.661819", "0.661643", "0.66136897", "0.6612588", "0.65981585", "0.6589441", "0.65839744", "0.65839744", "0.657806", "0.65626097", "0.65558946", "0.65528697", "0.6545102", "0.6541675", "0.65353096", "0.6519798", "0.6514767", "0.6513423", "0.6511372", "0.6508501", "0.6499127", "0.6498421", "0.6482805", "0.648158", "0.647641", "0.64726025", "0.6471721", "0.6468985", "0.64606553", "0.64581984", "0.6455013", "0.644431", "0.6438648", "0.6422894", "0.64219004", "0.6410911", "0.63988614", "0.63945335", "0.6380322", "0.6378637", "0.63700646", "0.63512236", "0.6349052", "0.633826", "0.633656", "0.63301927", "0.6318759", "0.6310758", "0.6308201", "0.62940425", "0.629144", "0.62858737", "0.6285106", "0.6281768", "0.62781656", "0.6270205", "0.62681127", "0.6265245", "0.6262214", "0.6257783", "0.62512565", "0.6250957", "0.6249617", "0.624014", "0.62399423", "0.6229592", "0.622836", "0.62206584", "0.62171084", "0.6216334", "0.621558" ]
0.6863361
11
Read files from disk, starting from the first one.
def load_files(self, n=None): if not n: n = len(self.files) for _, name in zip(list(range(n)), self.files): yield self.load_file(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_next_file(self):\n\n if self._file_ptr == len(self.files):\n raise pipeline.PipelineStopIteration\n\n # Collect garbage to remove any prior data objects\n gc.collect()\n\n # Fetch and remove the next item in the list\n file_ = self.files[self._file_ptr]\n self._file_ptr += 1\n\n # Set up a Reader class\n rd = self._acqtype_reader[self.acqtype](file_)\n\n self.log.info(f\"Reading file {self._file_ptr} of {len(self.files)}. ({file_})\")\n data = rd.read()\n\n return data", "def read_all_files():\n paths = get_all_recording_paths()\n\n return read_by_paths(paths)", "def read_all(self, prog:progress=None):\t\t\t\n\t\tself.__output_status(\"Read & compare all files\")\n\t\tself.__read_files('all', prog)", "def _iter_from_disk(self):\n self.f.seek(0, 0) # relative to start\n for line in self.f:\n yield line\n self.f.seek(0, 2) # relative to end", "def __read_files(self, kind:str, prog:progress=None):\n\t\tself.readed_sectors = 0\n\t\tself.read_elapsed = 0.0\t\n\t\t\n\t\tself.__ipc_send_progress(prog, 0)\n\n\t\tself.filematrix.reset(kind=='dynamic')\n\n\t\twhile not self.filematrix.done():\t\t\n\t\t\tif (self.__check_terminated()):\n\t\t\t\treturn;\t\t\t\n\t\t\t\t\t\t\n\t\t\tfp = self.filematrix.next()\t\n\n\t\t\t####logging.info('read path:' + fp.path + ', size: ' + str(fp.size) + ', seed: ' + str(fp.rand_seed))\n\t\t\t\n\t\t\tif not os.path.exists(fp.folder):\n\t\t\t\traise_error(FileExistsError, myerror.dir_error)\n\n\t\t\tfile_time = 0.0\n\t\t\tstart = time.time()\t\t\t\n\t\t\t\n\t\t\twith iolib.fopen(fp.path, 'rd') as f:\n\t\t\t\tremain = fp.size\n\t\t\t\tfile_time = 0.0\n\t\t\t\tstart = 0.0\n\t\t\t\telapsed = 0.0\t\t\t\t\n\t\t\t\t\n\t\t\t\twhile (remain != 0):\n\t\t\t\t\tchunk_sectors = min(remain, self.max_buff_size)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\texpected = self.__random_chunk_pattern(chunk_sectors, fp.rand_seed)\t\t\t\t\t\n\t\t\t\t\t#expected = self.__next_chunk_pattern(chunk_sectors)\t\n\n\t\t\t\t\tif (self.__check_terminated()):\n\t\t\t\t\t\treturn;\n\n\t\t\t\t\treal, bytesRead, elapsed = iolib.read(512 * chunk_sectors, f)\n\t\t\t\t\tfile_time += elapsed\n\t\t\t\t\t\t\t\n\t\t\t\t\tif (real != expected):\n\t\t\t\t\t\tif (self.__check_terminated()):\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\traise_exception(BaseException, myerror.pattern_error, \"compare error at the file:\" + fp.path)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\tself.readed_sectors += int(bytesRead / 512)\n\t\t\t\t\tremain = remain - chunk_sectors\n\t\t\t\t\n\t\t\t\tself.read_elapsed += file_time\t\t\n\t\t\t\ttime.sleep(0.001)\t\t\n\n\t\t\tself.__ipc_send_progress(prog, self.filematrix.get_progress())", "def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()", "def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None):\n path = uri_to_path(path)\n files = self.list(path, ext=ext, start=start, stop=stop, recursive=recursive)\n\n nfiles = len(files)\n self.nfiles = nfiles\n\n if spark and isinstance(self.engine, spark):\n npartitions = min(npartitions, nfiles) if npartitions else nfiles\n rdd = self.engine.parallelize(enumerate(files), npartitions)\n return rdd.map(lambda kv: (kv[0], readlocal(kv[1])))\n else:\n return [(k, readlocal(v)) for k, v in enumerate(files)]", "def ReadFilesGenerator(self):\n\n for file in self._file_names:\n file_list = []\n\n # TODO see further into yielding one line at a time\n with open(file, 'r', encoding='mbcs') as sped:\n file_list = sped.read().splitlines()\n\n if not self.isSigned(file_list):\n file_list = self.stripSignature(file_list)\n\n yield file, file_list", "def read_zero_files(self):\n\n # Master reads the files\n if i_am_master:\n self.qptanalyzer.read_zero_files()\n\n # Broadcast\n self.qptanalyzer.broadcast_zero_files()", "def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...", "def next_file(self):\n raise NotImplementedError()", "def multipleFileReadLines(filePaths): \n \n buffers = [] \n filePositions = [] \n \n for filePath in filePaths: \n lines, filePosition= readMultipleFileLinesAndPositions(filePath) \n buffers.append(lines) \n filePositions.append(filePosition) \n \n linesRemaining = True \n \n while linesRemaining: \n currentLines = [] \n for i,fileBuffer in enumerate(buffers): \n currentLines.append(fileBuffer[0].strip()) \n \n del fileBuffer[0] \n \n if ( not(fileBuffer) and linesRemaining): \n lines, filePosition = readMultipleFileLinesAndPositions(filePaths[i],filePositions[i]) \n buffers[i] = lines \n filePositions[i] = filePosition \n linesRemaining = bool(lines) \n \n yield currentLines", "def head(self, n=5, multiple_iterators=False):\n if multiple_iterators:\n head_iter = bamnostic.AlignmentFile(self._handle.name, index_filename=self._index_path)\n else:\n curr_pos = self.tell()\n # BAMheader uses byte specific positions (and not BGZF virtual offsets)\n self._handle.seek(self._header._BAMheader_end)\n self._load_block(self._handle.tell())\n head_iter = self\n\n head_reads = [next(head_iter) for read in range(n)]\n\n if multiple_iterators:\n # close the independent file object\n head_iter.close()\n else:\n # otherwise, just go back to old position\n self.seek(curr_pos)\n assert self.tell() == curr_pos\n return head_reads", "def read_concat_file(self):\n\n file_list = []\n for i in self.IDs[0:3]:\n with open(i, 'r') as cf:\n cf = cf.read()\n file_list.append(cf)\n return file_list", "def initialize_file_readers():\n savefile_path = os.path.join(os.getcwd()+ \"/../data/\", SAVE_FILE)\n file_reader_list = []\n for file in os.listdir(savefile_path):\n file_reader = open(os.path.join(savefile_path,file), \"r\")\n file_reader_list.append({\"file_reader\": file_reader, \"last_read\": { \"word\": \"\", \"doc_score_list\": []}})\n return file_reader_list", "def open_read_files(answer_files, answers):\r\n \"\"\"And designates each file to a variable in answers\"\"\"\r\n count = 0\r\n s = 0\r\n answer_files2 = []\r\n for file in answer_files[:]: # Used [:] to get all file_names in answer_files\r\n ans = open(file, mode='r')\r\n print(f\"Opening {ans.name}\")\r\n time.sleep(s)\r\n answers[count] = ans\r\n count += 1\r\n if ans.closed == False: # Section for checking if files are closed\r\n print(f\"Closing {ans.name}\")\r\n ans.close()\r\n answer_files2.append(ans.name)\r\n answer_files.remove(ans.name)\r\n time.sleep(s)\r\n return answer_files2, answers", "def read_inputs(self):\n curdir = os.getcwd()\n os.chdir(self.fst_dir)\n rstat = self.readFST()\n if rstat == 0:\n os.chdir(curdir)\n return 0\n # the names of the next files are either set by caller or come from the reading the FAST file\n rstat = self.readNoise()\n rstat = self.readAD()\n rstat = self.readBlade()\n rstat = self.readPtfm()\n os.chdir(curdir)", "def read(self, filenames):\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp, filename)\n fp.close()\n read_ok.append(filename)\n return read_ok", "def _next(self, filename):\n try:\n return self.tmp_read[filename]['reader'].__next__()\n except StopIteration:\n return None", "def read_files(path):\n fid = open(path, 'r')\n lines = fid.readlines()\n fid.close()\n return lines", "def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd", "def getReadersFromFilenames(self):\n\t\tfor i in self.readers:\n\t\t\tdel i\n\t\tself.readers = []\n\n\t\tif not self.filenames:\n\t\t\traise Logging.GUIError(\"No files could be found\", \\\n\t\t\t\t\t\t\t\t\t\"For some reason, no files were listed to be imported.\")\t\t \n\t\t\t\t\t\n\t\tfiles = self.filenames\n\t\tprint \"Determining readers from \", self.filenames\n\n\t\tisRGB = 1\n\t\tself.ext = files[0].split(\".\")[-1].lower()\n\t\tdim = self.dimMapping[self.ext]\n\t\t# Initially flip the image if it's tiff, png or jpg.\n\t\t# In setVerticalFlip we negate the setting to have it set correctly.\n\t\tif self.ext.lower() in [\"png\", \"jpg\", \"jpeg\"]:\n\t\t\tself.flipVertically = True\n\t\tif self.ext in [\"tif\", \"tiff\"]:\n\t\t\treader = vtkbxd.vtkExtTIFFReader()\n\t\t\treader.SetFileName(files[0])\n\t\t\treader.UpdateInformation()\n\t\t\tif reader.GetNumberOfScalarComponents() >= 3:\n\t\t\t\tprint \"MODE IS RGB, IS AN RGB IMAGE\"\n\t\t\telse:\n\t\t\t\tprint \"MODE ISN'T RGB, THEREFORE NOT RGB\"\n\t\t\t\tisRGB = 0\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\trdr.SetFileName(files[0])\n\t\t\tif rdr.GetNumberOfSubFiles() > 1:\n\t\t\t\tdim = 3\n\t\t\t\t\n\t\tself.isRGB = isRGB\n\t\tself.is3D = (dim == 3)\n\t\t\n\t\tdirName = os.path.dirname(files[0])\n\t\tprint \"THERE ARE\", self.slicesPerTimepoint, \"SLICES PER TIMEPOINT\"\n\t\tself.ext = files[0].split(\".\")[-1].lower()\n\t\t\n\t\tif dim == 3:\n\t\t\ttotalFiles = len(files)\n\t\t\tfor i, file in enumerate(files):\n\t\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\t\trdr.SetFileName(file)\n\t\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\t\n\t\ttotalFiles = len(files) / self.slicesPerTimepoint\n\n\t\timgAmnt = len(files)\n\t\tif totalFiles == 1:\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\tarr = vtk.vtkStringArray()\n\t\t\tfor fileName in files:\n\t\t\t\tarr.InsertNextValue(os.path.join(dirName, fileName))\n\t\t\trdr.SetFileNames(arr)\n\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\t\n\t\tif imgAmnt > 1:\n\t\t\t# If the pattern doesn't have %, then we just use\n\t\t\t# the given filenames and allocate them to timepoints\n\t\t\t# using slicesPerTimepoint slices per timepoint\n\t\t\tntps = len(files) / self.slicesPerTimepoint\n\t\t\tfilelst = files[:]\n\t\t\t# dirn #TODO: what was this?\n\t\t\tfor tp in range(0, ntps):\n\t\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\t\tarr = vtk.vtkStringArray()\n\t\t\t\tfor i in range(0, self.slicesPerTimepoint):\n\t\t\t\t\tarr.InsertNextValue(filelst[0])\n\t\t\t\t\tfilelst = filelst[1:]\n\t\t\t\trdr.SetFileNames(arr)\n\t\t\t\trdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1)\n\t\t\t\trdr.SetDataSpacing(self.spacing)\n\t\t\t\trdr.SetDataOrigin(0, 0, 0)\n\t\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\n\t\telif imgAmnt == 1:\n\t\t\t# If only one file\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\trdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1)\n\t\t\trdr.SetDataSpacing(self.spacing)\n\t\t\trdr.SetDataOrigin(0, 0, 0)\n\t\t\trdr.SetFileName(files[0])\n\n\t\t\tLogging.info(\"Reader = \", rdr, kw = \"io\")\n\t\t\tself.readers.append(rdr)", "def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj", "def processSetOfCerFiles(files):\n printHeader()\n \n k = 0\n for f in files:\n k = k + 1\n sz = get_file_size(f)\n with open(f, 'rb') as fb:\n processCerFile(k, fb, sz=sz)", "def readfile(name, outstream, start=0, end=None):", "def read_combine_elia_freq_R1(path):\r\n i=0\r\n dfs = []\r\n data_files = glob.glob(path + 'FrequencyAndDemand_*')\r\n print(str(datetime.datetime.utcnow()) + \" amount of files to combine: \" + str(len(data_files)))\r\n for file in data_files:\r\n i=i+1\r\n print(str(datetime.datetime.utcnow()) + \" processing file number: \"+ str(i))\r\n df = read_elia_freq_R1(file)\r\n dfs.append(df)\r\n combined_data = pd.concat(dfs, axis = 0)\r\n return combined_data", "def read_file(path_to_file):\n 8", "def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None):\n from .utils import connection_with_anon, connection_with_gs\n\n path = addextension(path, ext)\n scheme, bucket_name, keylist = self.getfiles(\n path, start=start, stop=stop, recursive=recursive)\n\n if not keylist:\n raise FileNotFoundError(\"No objects found for '%s'\" % path)\n\n credentials = self.credentials\n\n self.nfiles = len(keylist)\n\n if spark and isinstance(self.engine, spark):\n\n def getsplit(kvIter):\n if scheme == 's3' or scheme == 's3n':\n conn = connection_with_anon(credentials)\n bucket = conn.get_bucket(bucket_name)\n elif scheme == 'gs':\n conn = boto.storage_uri(bucket_name, 'gs')\n bucket = conn.get_bucket()\n else:\n raise NotImplementedError(\"No file reader implementation for URL scheme \" + scheme)\n\n for kv in kvIter:\n idx, keyName = kv\n key = bucket.get_key(keyName)\n buf = key.get_contents_as_string()\n yield idx, buf\n\n npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles\n rdd = self.engine.parallelize(enumerate(keylist), npartitions)\n return rdd.mapPartitions(getsplit)\n\n else:\n\n if scheme == 's3' or scheme == 's3n':\n conn = connection_with_anon(credentials)\n bucket = conn.get_bucket(bucket_name)\n elif scheme == 'gs':\n conn = connection_with_gs(bucket_name)\n bucket = conn.get_bucket()\n else:\n raise NotImplementedError(\"No file reader implementation for URL scheme \" + scheme)\n\n def getsplit(kv):\n idx, keyName = kv\n key = bucket.get_key(keyName)\n buf = key.get_contents_as_string()\n return idx, buf\n\n return [getsplit(kv) for kv in enumerate(keylist)]", "def lazy_read_file(self):\n store = zarr.DirectoryStore(self.fpath)\n z_array = zarr.open(store=store, mode='r')\n self.da_input = da.from_array(z_array)\n self.data = self.da_input\n self.data_dim = self.data.shape\n self.chunk_size = z_array.chunks", "def read(file_path):\n file_name, path = (file_path.split('\\\\')[-1] + '.txt', file_path[:file_path.rfind('\\\\') + 1])\n os.chdir(path)\n with open(file_name, 'r') as fd:\n it = iter(list(set(fd.readlines())))\n return it", "def read_files(self):\n\n self.selecteddata = []\n try:\n for itemnum in self.selected:\n dfileent = self.indexdata[itemnum]\n fname = dfileent[0]\n if not os.path.isabs(fname): fname = os.path.join(self.indexdir, fname)\n ddata = self.dfparser.parsefile(fname)\n if self.doppleradj.isChecked(): ddata = doppler.apply_doppler_array(ddata, dfileent[3])\n self.selecteddata.append(ddata)\n self.warningmsg.setText(\"\")\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0] + \" file \" + e.filename + \" line \" + e.linenumber + \" col \" + e.colnumber)\n self.selected = []\n self.selecteddata = []", "def readfile(path, outstream, start=0, end=None):", "def get_stream_reader(fh, tmp_dir):\n magic_dict = {\n b\"\\x1f\\x8b\\x08\": _get_stream_readers_for_gzip,\n b\"\\x42\\x5a\\x68\": _get_stream_readers_for_bz2,\n b\"\\x50\\x4b\\x03\\x04\": _get_stream_readers_for_zip,\n }\n start_of_file = fh.read(CHUNK_SIZE)\n try:\n fh.seek(0)\n except UnsupportedOperation: # This happens if fh has been created by urlopen\n fh = _download_file(start_of_file, fh)\n try: # Check if file is tar file\n if tarfile.open(fileobj=StringIO(start_of_file)):\n return _get_stream_readers_for_tar(fh, tmp_dir)\n except tarfile.ReadError:\n pass\n for k, v in magic_dict.items():\n if start_of_file.startswith(k):\n return v(fh, tmp_dir)\n return [fh]", "def read(path):", "def iter_local_docs(docs_path, skip=0, stop=sys.maxsize):\n for i, line in enumerate(open(docs_path)):\n if i < skip:\n continue\n elif i < stop:\n yield json.loads(line)\n else:\n break", "def read(self,filenames):\n\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp)\n fp.close()\n read_ok.append(filename)\n return read_ok", "def _read_file(self, current_path):\n # type: (str)->None\n\n if current_path in self._parsed_files:\n return\n self._parsed_files.add(current_path)\n\n with open(current_path, \"r\") as interfaces:\n # When the first non-comment line is parsed, header\n # comments have been read in.\n header_parsed = False\n # Loop through the interfaces file.\n for line in interfaces:\n # 1. Identify the clauses by analyzing the first\n # word of each line.\n # 2. Go to the next line if the current line is a comment.\n # line = line.strip().replace(\"\\n\", \"\")\n if not line:\n pass\n elif line.strip().startswith(\"#\") is True:\n if not header_parsed:\n self._header_comments += line\n else:\n # Header comments can no longer\n # be parsed in when the first interfaces\n # line is parsed in.\n header_parsed = True\n self._parse_iface(line, current_path)\n # Ignore blank lines.\n if not line.isspace():\n self._parse_details(line)\n self._read_auto(line)\n self._read_hotplug(line)\n\n # Is there some file to source ?\n source_path = self._read_sourced_path(line)\n if source_path:\n self._read_files(source_path)\n\n # TODO: lots of directives are completly ignored\n # and would be deleted", "def read_in_files():\n\n num_files = len([name for name in os.listdir(DATA_SOURCE) if name.endswith(\".txt\")])\n loading_section_size = num_files / 30\n count = 0\n\n sentences_as_lists = []\n for filename in os.listdir(DATA_SOURCE):\n if filename.endswith(\".txt\"):\n\n # Pretty loading bar\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == num_files:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()\n\n # Open the paper\n paper_to_open = DATA_SOURCE + filename\n paper = Reader().open_file_single_string(paper_to_open)\n udata = paper.decode(\"utf-8\")\n paper = udata.encode(\"ascii\", \"ignore\")\n\n # Split the data into a list of sentences, where each sentence is a list of words\n sentences = sent_tokenize(paper)\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n sentences_as_lists.append(words)\n\n if DEBUG:\n print(sentences_as_lists)\n wait()\n\n count += 1\n\n return sentences_as_lists", "def read_scan(self, dir, **args):\n files = []\n files_dir = {}\n for file in os.listdir(dir):\n if file.endswith('tif'):\n fnbase = file[:-4]\n elif file.endswith('tiff'):\n fnbase = file[:-4]\n else:\n continue\n last_digits = re.search(r'\\d+$', fnbase)\n if last_digits is not None:\n key = int(last_digits.group())\n files_dir[key] = file\n\n ordered_keys = sorted(list(files_dir.keys()))\n\n for key in ordered_keys:\n file = files_dir[key]\n files.append(os.path.join(dir, file))\n\n # look at slice0 to find out shape\n n = 0\n try:\n slice0 = self.detector.get_frame(files[n], self.roi, self.Imult)\n except Exception as e:\n print(e)\n return None\n shape = (slice0.shape[0], slice0.shape[1], len(files))\n arr = np.zeros(shape, dtype=slice0.dtype)\n arr[:, :, 0] = slice0\n\n for file in files[1:]:\n n = n + 1\n slice = self.detector.get_frame(file, self.roi, self.Imult)\n arr[:, :, n] = slice\n return arr", "def get_files1(dirname, size_in_kb):\n for file in glob.glob(os.path.join(dirname, \"*\")):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file", "def partial_reader(filename, chunk_size):\n try:\n file = open(filename, 'rb')\n while True:\n chunk = file.read(chunk_size)\n if not chunk:\n return\n yield chunk\n except IOError as e:\n logger.error(\"IOError: %s\" %(str(e)), exc_info=True)\n return", "def readfast(self, name=\"\", *args, **kwargs):\n\n assert _os.path.isfile(self.__str__()) == True\n\n with open(self.__str__(), *args, **kwargs) as file_handler:\n for line in file_handler:\n yield line", "def flow_from_files(self, filenames=None, batch_size=32):\n\n if filenames:\n self.filenames = filenames\n\n for i in range(0, len(self.filenames), batch_size):\n yield np.concatenate([np.load(self.path / f) \\\n for f in self.filenames.iloc[i:i+batch_size]])", "def retrieveFiles(self):\n initialPath = self.destination\n destinationPath = self.initial\n fileList = self._filesToRetrieve\n self._transferFiles(initialPath, destinationPath, fileList)", "def read_lines(path, header=True):\n with open(path, 'r') as f:\n if not header:\n f.readline() # skip header\n return f.readlines()", "def read(self, filename=None):\n\t\tif filename is None:\n\t\t\tif hasattr(self, 'filename'):\n\t\t\t\tfilename = os.path.join(self.path, self.filename)\n\t\t\telse:\n\t\t\t\traise Exception, 'no filename given!'\n\t\tif os.path.splitext(filename)[1] in self.extensions and os.path.exists(filename):\n\t\t\tfor line in open(filename).readlines():\n\t\t\t\tself.read_line(line)", "def read_all_files(\n pathname: Path, index: int = 0, pattern: str = \"dump-Trimer-*.gsd\"\n) -> List[Tuple[Variables, HoomdFrame]]:\n pathname = Path(pathname)\n snapshots = []\n for filename in sorted(glob.glob(str(pathname / pattern))):\n logger.debug(\"Reading %s\", Path(filename).stem)\n with gsd.hoomd.open(str(filename)) as trj:\n try:\n snapshots.append((get_filename_vars(filename), HoomdFrame(trj[index])))\n except IndexError:\n continue\n if not snapshots:\n logger.warning(\n \"There were no files found with a configuration at index %s\", index\n )\n return snapshots", "def read_everything_in_dir(dirpath, include_cnt=False):\n if not os.path.isdir(dirpath):\n raise ValueError(\"not a directory: \" + str(dirpath))\n all_filepaths = []\n for d, _, filepaths in os.walk(dirpath):\n all_filepaths += [os.path.join(d, p) for p in filepaths]\n\n should_stop, q = threading.Event(), Queue(maxsize=100)\n\n def bg_thread():\n try:\n for full_path in all_filepaths:\n try:\n if should_stop.is_set():\n return\n with open(full_path, 'r') as fo:\n contents = fo.read()\n q.put(contents)\n except:\n print(f\"Exception while reading: {full_path}; skipping\", file=sys.stderr)\n traceback.print_exc()\n finally:\n q.put(None)\n\n t = threading.Thread(name=\"Reader-Thread\", target=bg_thread, daemon=True)\n t.start()\n\n def contents_gen():\n try:\n while 1:\n msg = q.get()\n if msg is None:\n break\n yield msg\n finally:\n should_stop.set()\n t.join(timeout=2)\n assert not t.is_alive()\n\n if include_cnt:\n return len(all_filepaths), contents_gen()\n return contents_gen()", "def read_files(filepath,forc):\n \n dir_all = []\n dir_sub = []\n \n path = os.path.join(str(filepath))\n for dir_name in os.listdir(path):\n filad = str(dir_name)\n sub_path = str(filepath)+filad+'/*.*'\n \n if forc == 'cropped':\n dir_sub = import_image(str(sub_path))\n \n if forc == 'full':\n dir_sub = import_fimage(str(sub_path))\n \n \n dir_sub = np.array(dir_sub)\n \n dir_all.append(dir_sub.transpose())\n \n return(np.array(dir_all))", "def read(self, filenames, encoding=None):\n if isinstance(filenames, str):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n with open(filename, encoding=encoding) as f:\n self.read_file(f)\n except OSError:\n continue\n read_ok.append(filename)\n return read_ok", "async def fetch_files(self, tag=None, n=100):\n\n logging.debug(\"Fetching files (tag is %s)\" % tag)\n\n params = {\"n\": n}\n if tag is not None:\n params.update({\"tag\": tag})\n\n files = await self.client.request.get(\"/files\", params=params)\n return [FileBase.build_file(\n self.client, file, self.loop) for file in files[\"data\"]]", "def get_chunk_files(self, path, chunks=None):\n iterator = self.run_as_current_user(self.fs.get_list_dir, path)\n files = Queue.Queue()\n if chunks:\n while True:\n try:\n liststr = iterator.next()\n filename = ((liststr.split(' ')[-1]).split('\\r'))[0]\n chunk_num = (filename.split('_')[0]).split('.')[-1]\n if chunk_num.isdigit() and int(chunk_num) in chunks:\n filepath = path + '/' + filename\n if DEBUGGING_MSG:\n print filepath\n fd = self.run_as_current_user(self.fs.open, filepath, 'rb')\n files.put(fd)\n except StopIteration, err:\n break\n return files\n\n while True:\n try:\n liststr = iterator.next()\n filename = ((liststr.split(' ')[-1]).split('\\r'))[0]\n filepath = path + '/' + filename\n print filepath\n fd = self.run_as_current_user(self.fs.open, filepath, 'rb')\n files.put(fd)\n except StopIteration, err:\n print err\n self.respond('544 %s' %why)\n break\n return files", "def doReadFiles(self, logicalFileName=None, realFileName=None):\n #type: (Text)->List(Text)\n assert logicalFileName or realFileName\n if logicalFileName is not None:\n self.fileName=logicalFileName\n self.sourceLines=readFileLines(\n file=self.fileName,\n issueOrigin=self,\n message='Cannot read source file %s')\n if realFileName is not None:\n self.realFileName=realFileName\n if logicalFileName==realFileName:\n self.realSourceLines=self.sourceLines\n else:\n self.realFileName = realFileName\n self.realSourceLines = readFileLines(\n file=self.realFileName,\n issueOrigin=self,\n message='Cannot read generated file %s')", "def read_files(files):\n if len(files) == 1:\n return pd.read_csv(files[0], comment='#', names=[\"time\", \"volts\"])\n\n elif len(files)>1:\n df = []\n for f in files:\n data = pd.read_csv(f, comment='#', names=[\"time\", \"volts\"])\n df.append(data)\n new_df = pd.concat(df)\n new_df = new_df.drop_duplicates(subset='time')\n new_df.reset_index(drop=True, inplace=True)\n return new_df", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def open_bus_stop_all_read(file_name):\n\t# change directory\n\tra_to_bus_stops_all()\n\t# open the file\n\tsource = open(file_name, \"r\")\n\t# return to starting direcotyr\n\tbus_stops_all_to_ra()\n\t# return the open file\n\treturn source", "def _file_iter(f, size):\n chunk = f.read(size)\n while chunk:\n yield chunk\n chunk = f.read(size)", "def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files", "def readMas(dir):\n n=0\n fs = []\n for (dirpath, dirnames, filenames) in os.walk(dir):\n if len(filenames)>0:\n for f in filenames:\n pth = os.path.join(dirpath,f)\n fs.append(pth)\n pass\n return fs", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def chunk_reader(chunk_filenames, chunk_filename_queue):\n chunks = []\n done = chunk_filenames\n\n while True:\n if not chunks:\n chunks, done = done, chunks\n random.shuffle(chunks)\n if not chunks:\n print(\"chunk_reader didn't find any chunks.\")\n return None\n while len(chunks):\n filename = chunks.pop()\n done.append(filename)\n chunk_filename_queue.put(filename)\n print(\"chunk_reader exiting.\")\n return None", "def read_iter_from_file(path_to_file_read):\n with open(path_to_file_read, \"r\") as fichero:\n line = fichero.readline().strip()\n while line:\n yield line\n line = fichero.readline().strip()", "def get_files_to_be_indexed(self):\n\t\tfiles = self.get_all_files()\n\t\tfiles_list = []\n\t\tfor name in files:\n\t\t\tif(name.split('.')[-1] in self.accepted_formats and os.stat(os.path.join(self.root, name)).st_size < 5000000):\n\t\t\t\tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def readfiles(dir):\n\n pwd = os.getcwd()\n os.chdir(dir)\n\n files = os.listdir('.')\n files_text = []\n\n for i in files:\n try:\n f = open(i, 'r', encoding='utf-8')\n files_text.append(f.read())\n except:\n print(\"Could not read %s.\" % i)\n finally:\n f.close()\n\n os.chdir(pwd)\n\n return files_text", "def read_in_chunks(self):\n chunksize = 10 ** 3\n lines_number = sum(1 for line in open(self.filepath))\n self.progressMaximum.emit(lines_number // chunksize)\n dfList = []\n\n # self.df = traja.read_file(\n # str(filepath),\n # index_col=\"time_stamps_vec\",\n # parse_dates=[\"time_stamps_vec\"],\n # )\n\n TextFileReader = pd.read_csv(\n self.filepath,\n index_col=\"time_stamps_vec\",\n parse_dates=[\"time_stamps_vec\"],\n chunksize=chunksize,\n )\n for idx, df in enumerate(TextFileReader):\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S:%f\")\n dfList.append(df)\n self.intReady.emit(idx)\n self.completed.emit(dfList)\n self.finished.emit()", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def firstn(reader, n):\n\n # TODO(yuyang18): Check if just drop the reader, could clean the opened\n # resource or not?\n\n def firstn_reader():\n for i, item in enumerate(reader()):\n if i == n:\n break\n yield item\n\n return firstn_reader", "def open_files(filenames):\n for filename in filenames:\n if filename.endswith('.gz') or filename.endswith('.zip'):\n yield gzip.open(filename, 'r')\n else:\n yield open(filename, 'rb')", "def process(self):\n\n return self._load_next_file()", "def preprocess_files_second_pass(self, max_files=10):\n print \"reading {} files\".format(max_files)\n file_counter = 0\n for fn in os.listdir(self.output_data_path):\n file_path = self.output_data_path + fn\n if os.path.isfile(file_path) and fn.startswith('user-ct-test-collection') and file_counter < max_files:\n # print(\"loading {}...\".format(file_path))\n # data = pd.read_csv(file_path, sep=\",\")\n # print(\"preprocessing {}...\".format(file_path))\n # data = self.preprocess_data_second(data)\n # save_df(self.output_data_path, data, fn)\n # file_counter += 1\n return", "def ReadRecipesFromDirectory(self, path: str) -> None:\n for file_path in glob.glob(os.path.join(path, '*.json')):\n self.ReadRecipeFromFile(file_path)", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def get_files(self):\n\n self.files = []\n retriever_methods = [\n m\n for m in rtorrent9.file.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # 2nd arg can be anything, but it'll return all files in torrent\n # regardless\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"f.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n offset_method_index = retriever_methods.index(\n rtorrent9.rpc.find_method(\"f.offset\")\n )\n\n # make a list of the offsets of all the files, sort appropriately\n offset_list = sorted([r[offset_method_index] for r in results])\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n # get proper index positions for each file (based on the file\n # offset)\n f_index = offset_list.index(results_dict[\"offset\"])\n\n self.files.append(\n File(self._rt_obj, self.info_hash, f_index, **results_dict)\n )\n\n return self.files", "def read_file(path):\n with open(path, \"r\") as IN:\n file_seqs = [line.strip() for line in IN]\n return file_seqs", "def _get_files(self, paths: List[str]) -> List[Tuple[str, bytes]]:\n pool = multiprocessing.dummy.Pool(self._processes)\n return pool.map(self._get_file, paths) # type: ignore", "def get_all(self) -> Generator:\n\n for filename in self.list_files():\n yield self.get(filename)", "def split_single_file(self, filename):\n file_size = os.path.getsize(filename)\n chunk_size = (file_size + self.worker_num - 1) / self.worker_num\n file_handler = open(filename, \"r\")\n chunks = []\n pos = 0\n while pos < file_size:\n next_pos = min(pos + chunk_size, file_size)\n if pos == 0:\n chunks.append((filename, pos, self.find_next_newline(file_handler, next_pos)))\n else:\n chunks.append((filename, self.find_next_newline(file_handler, pos), self.find_next_newline(file_handler, next_pos)))\n pos = next_pos\n file_handler.close()\n return chunks", "def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr", "def readFromFiles(self, networkFile, demandFile):\n self.readNetworkFile(networkFile)\n self.readDemandFile(demandFile)\n self.validate()\n self.finalize()", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read_cif_files(directory, progress_bar=True):\n if progress_bar:\n import progressbar\n files = progressbar.progressbar(os.listdir(directory))\n else:\n files = os.listdir(directory)\n\n for filename in files:\n if filename.endswith('.cif'):\n atoms = read(os.path.join(directory, filename))\n yield atoms, filename", "def __next__(self) -> Report:\n raw_data = []\n # Get the current timestamp\n current_timestamp = self.saved_timestamp\n previous_target = None\n # For all files\n\n for path_file in self.filenames:\n # While timestamp is lower or equal\n while True:\n # Get the next line\n row = self.tmp_read[path_file]['next_line']\n\n # If nothing more, break\n if row is None:\n # If the first file a no next file, just stop the iteration\n if not raw_data and path_file == self.filenames[0]:\n self._close_file()\n raise StopIteration()\n else:\n break\n\n # Get the timestamp as datetime\n row_timestamp = utils.timestamp_to_datetime(\n int(row['timestamp']))\n # If timestamp is higher, we stop here\n if row_timestamp > current_timestamp:\n if path_file == self.filenames[-1]:\n self.saved_timestamp = row_timestamp\n break # move to next file\n\n if row_timestamp < current_timestamp:\n self.tmp_read[path_file]['next_line'] = self._next(path_file)\n continue\n\n if previous_target is not None:\n if row['target'] != previous_target:\n break # move to next file\n else:\n previous_target = row['target']\n\n # Else if it's the same, we merge\n raw_data.append((path_file.split('/')[-1], row))\n # Next line\n self.tmp_read[path_file]['next_line'] = self._next(path_file)\n\n if not raw_data:\n self._close_file()\n raise StopIteration()\n\n report = self.report_type.from_csv_lines(raw_data)\n\n return report", "def read_data_files(filenames, datapath, ids=None):\n filenames = np.array(filenames) # make sure it's array\n if ids is None:\n ids = range(0, len(filenames))\n\n for i in [filenames[k] for k in ids]:\n yield str(open(datapath+i, 'r').read())", "def read_files(path):\n with open(path, \"rt\") as f:\n for line in f:\n print(line.replace(\"\\n\", \"\"))", "def load_cloudset(self, idx: int):\n\n while idx < len(self.files1):\n file = self.files1[idx]\n slashs = [pos for pos, char in enumerate(file) if char == '/']\n filename = file[slashs[-1]:-4]\n print(\"Viewing: \" + filename)\n\n with open(file, 'rb') as f:\n content = pickle.load(f)\n\n hybrid_idx = content[0]\n hybrid_file = [file for file in self.files2 if 'cloud_{}'.format(hybrid_idx) in file]\n hybrid = basics.load_pkl(hybrid_file[0])\n\n local_bfs = content[1]\n sample = content[2]\n bfs_cloud = visualize.prepare_bfs(hybrid, local_bfs)\n\n hybrid_bfs = clouds.merge_clouds([hybrid, bfs_cloud])\n res = self.core_next(hybrid_bfs, sample, 'sample_h{}_i{}'.format(hybrid_idx, idx))\n\n if res is None:\n return\n else:\n idx += res", "def get_one_shot_iterator(self):\n\n files = self._get_all_files()\n\n dataset = (\n tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)\n .map(self._parse_function, num_parallel_calls=self.num_readers)\n .map(self._preprocess_image, num_parallel_calls=self.num_readers))\n\n if self.should_shuffle:\n dataset = dataset.shuffle(buffer_size=100)\n\n if self.should_repeat:\n dataset = dataset.repeat() # Repeat forever for training.\n else:\n dataset = dataset.repeat(1)\n\n dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)\n return dataset.make_one_shot_iterator()", "def next(self, files):\n\n csd_list = {}\n\n for path in files:\n acq, fname = path.split(\"/\")[-2:]\n name = os.path.join(acq, fname)\n\n # Always include non corr files\n if name not in self.corr_files:\n self.log.debug(\"Non time stream file encountered %s.\", name)\n continue\n\n # Figure out which CSD the file starts and ends on\n start, end = [int(t) for t in self.corr_files[name]]\n\n # Add this file to the set of files for the relevant days\n csd_list.setdefault(start, set()).add(path)\n csd_list.setdefault(end, set()).add(path)\n\n new_files = set()\n\n for csd, csd_files in sorted(csd_list.items()):\n if csd in self.csd_list:\n self.log.debug(\"Skipping existing CSD=%i, files: %s\", csd, csd_files)\n continue\n\n if csd in self.skip_csd:\n self.log.debug(\"Skipping specified CSD=%i, files: %s\", csd, csd_files)\n continue\n\n if len(csd_files) < self.min_files_in_csd:\n self.log.debug(\"Skipping CSD=%i with too few files: %s\", csd, csd_files)\n continue\n\n # Great, we passed the cut, add to the final set\n new_files.update(csd_files)\n\n self.log.debug(\n \"Input list %i files, after filtering %i files.\", len(files), len(new_files)\n )\n\n return sorted(list(new_files))", "def files_and_folders(self):\n yield from self._root.files_and_folders(0)", "def iterate_file(fpath, start=None, stop=None, step=None, mmap_mode=None):\n slicer = slice(start, stop, step)\n _, ext = splitext(fpath)\n if ext == '.pkl':\n events = load_pickle(fpath)\n elif ext == '.npy':\n try:\n events = np.load(fpath, mmap_mode=mmap_mode)\n except:\n sys.stderr.write('failed to load \"{}\"\\n'.format(fpath))\n raise\n else:\n raise ValueError(fpath)\n\n num_events_in_file = len(events)\n indices = range(num_events_in_file)[slicer] # pylint: disable=range-builtin-not-iterating\n sliced_events = events[slicer]\n\n return num_events_in_file, indices, sliced_events", "def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return", "def loadFirst(self):\r\n logger.debug(\"loadFirst\")\r\n\r\n if self.filelength <= self.chunksize:\r\n logger.debug(f\"EOF {self.filelength} <= chunksize {self.chunksize}\")\r\n text = self.reader(self.fileName, 0, os.SEEK_SET, nbytes=None)\r\n else:\r\n logger.debug(f\"EOF {self.filelength} > chunksize{self.chunksize}\")\r\n text = self.reader(self.fileName, 0, os.SEEK_SET, nbytes=self.chunksize)\r\n\r\n if not self.delimiter: # determine once the basic properties of this file, such as delimtier, quoting etc.\r\n self.set_fileproperties(text)\r\n\r\n self.currentstartline = 0\r\n if self.line_numbers():\r\n text = self._add_line_numbers(text, linestart=self.currentstartline)\r\n self.textwnd.setText(text)\r\n\r\n if self.tableBtn.isChecked():\r\n self._show_as_table()", "def open_files(directory):\n documents = []\n for fl in (os.listdir(directory)):\n if fl.endswith('.txt'):\n fl_path = os.path.join(directory, fl)\n with open(fl_path, 'r') as f:\n full_text = f.read()\n documents.append(full_text)\n return documents", "def read_file(file_name: str):\n with open(file_name) as fread:\n for line in fread:\n yield line", "def open_fast5_files(path, mode=\"r\"):\n for filename in find_fast5_files(path):\n try:\n hdf = Fast5File(filename, mode=mode)\n if sanity_check(hdf):\n yield hdf\n except OSError:\n try:\n hdf.close()\n except:\n pass", "def read_raw_files(path):\n dfs = []\n try:\n for file in os.listdir(path):\n dfs.append(\n spark.read.format('com.github.saurfang.sas.spark') \\\n .load(file)\n )\n except Exception as e:\n logger.error('Failed to read raw SAS files...')\n logger.error(e)\n raise\n return concat_df(*ds)", "def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')", "def read(self, filename):\n pass", "def read(self, filename):\n pass" ]
[ "0.64000064", "0.6366881", "0.62643075", "0.6083623", "0.60555285", "0.57724005", "0.5749551", "0.5745724", "0.5738006", "0.5731174", "0.5714755", "0.5696059", "0.56833583", "0.5662904", "0.5600594", "0.5582189", "0.5556307", "0.5550086", "0.55438334", "0.5493082", "0.5475053", "0.543539", "0.5413961", "0.5403187", "0.5346062", "0.53380346", "0.5332199", "0.5331485", "0.5320079", "0.53041494", "0.53009206", "0.5287062", "0.5284417", "0.5280773", "0.52677286", "0.5264409", "0.5260476", "0.5253044", "0.5240104", "0.52374953", "0.5232847", "0.5217604", "0.52032185", "0.5202861", "0.5193149", "0.51916677", "0.51834667", "0.5181705", "0.5174694", "0.5173255", "0.51710516", "0.517041", "0.517028", "0.51572424", "0.51524603", "0.5150926", "0.51496404", "0.51459014", "0.5139021", "0.5131229", "0.51236457", "0.51206553", "0.5118841", "0.5117904", "0.50949574", "0.5093591", "0.5090304", "0.50814784", "0.5078751", "0.50777155", "0.5072757", "0.5071154", "0.50708705", "0.50636816", "0.5057534", "0.5053183", "0.5049723", "0.5048605", "0.5040765", "0.5038469", "0.5032782", "0.50309926", "0.5030676", "0.50194186", "0.5011115", "0.50080633", "0.5002997", "0.49983686", "0.49974492", "0.49963573", "0.4996261", "0.49928012", "0.49897823", "0.49865177", "0.49825105", "0.49761048", "0.49744195", "0.49728835", "0.4972716", "0.4972716" ]
0.54052305
23
Pick section of signal
def pick_section(signal, section=None): len_noise = signal.shape[-1] if section is None: len_sig = len_noise ii = 0 elif isinstance(section, int): len_sig = section ii = np.random.randint(0, len_noise - len_sig) else: len_sig = np.asarray(section).shape[-1] ii = np.random.randint(0, len_noise - len_sig) return signal[..., ii:ii + len_sig]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def phasing_signal(self, phasing, r, c):\r\n def signal():\r\n value = phasing.currentIndex()\r\n if value >= 0 and value <= 2:\r\n globals.sections[r, c] = value\r\n return signal", "def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)", "def selection_fn(self, trace, points, selector):\n self.segment = self.fig.layout[\"sliders\"][0].active\n seg = self.segment\n\n xrange = selector.xrange\n wave = self.wave[seg]\n mask = self.mask[seg]\n\n # Choose pixels and value depending on selected type\n if self.mask_type == \"good\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 0)\n elif self.mask_type == \"bad\":\n value = 0\n idx = (wave > xrange[0]) & (wave < xrange[1])\n elif self.mask_type == \"line\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask != 0)\n print(np.count_nonzero(idx))\n elif self.mask_type == \"cont\":\n value = 2\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 1)\n else:\n return\n\n # Apply changes if any\n if np.count_nonzero(idx) != 0:\n self.mask[seg][idx] = value\n\n with self.fig.batch_update():\n # Update Line Mask\n m = self.line_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 1\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y\n\n # Update Cont Mask\n m = self.cont_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 2\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def slicewhere(condition):\n regions = ndimage.find_objects(ndimage.label(condition)[0])\n return [region[0] for region in regions]", "def get_sample_mask(self):", "def segment(data):", "def _pickFull(self, context):\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n rayObject = rayObject[:, :3]\n\n data = self.getData(copy=False)\n bins = utils.segmentVolumeIntersect(\n rayObject, numpy.array(data.shape) - 1)\n if bins is None:\n return None\n\n # gather bin data\n offsets = [(i, j, k) for i in (0, 1) for j in (0, 1) for k in (0, 1)]\n indices = bins[:, numpy.newaxis, :] + offsets\n binsData = data[indices[:, :, 0], indices[:, :, 1], indices[:, :, 2]]\n # binsData.shape = nbins, 8\n # TODO up-to this point everything can be done once for all isosurfaces\n\n # check bin candidates\n level = self.getLevel()\n mask = numpy.logical_and(numpy.nanmin(binsData, axis=1) <= level,\n level <= numpy.nanmax(binsData, axis=1))\n bins = bins[mask]\n binsData = binsData[mask]\n\n if len(bins) == 0:\n return None # No bin candidate\n\n # do picking on candidates\n intersections = []\n depths = []\n for currentBin, data in zip(bins, binsData):\n mc = MarchingCubes(data.reshape(2, 2, 2), isolevel=level)\n points = mc.get_vertices() + currentBin\n triangles = points[mc.get_indices()]\n t = glu.segmentTrianglesIntersection(rayObject, triangles)[1]\n t = numpy.unique(t) # Duplicates happen on triangle edges\n if len(t) != 0:\n # Compute intersection points and get closest data point\n points = t.reshape(-1, 1) * (rayObject[1] - rayObject[0]) + rayObject[0]\n # Get closest data points by rounding to int\n intersections.extend(points)\n depths.extend(t)\n\n if len(intersections) == 0:\n return None # No intersected triangles\n\n intersections = numpy.array(intersections)[numpy.argsort(depths)]\n indices = numpy.transpose(numpy.round(intersections).astype(numpy.int64))\n return PickingResult(self, positions=intersections, indices=indices)", "def selectData(self, rubberBandRect, fromScenePoint, toScenePoint):\n if fromScenePoint == toScenePoint:\n return\n\n if QApplication.keyboardModifiers() != Qt.ShiftModifier and QApplication.keyboardModifiers() != Qt.ControlModifier:\n # unselect all currently selected items\n for h in self.highlightedItems:\n h.highlighted = False\n self.highlightedItems.clear()\n self.highlightedRings.clear()\n\n sel = self.items(rubberBandRect)\n for s in sel:\n if type(s) == PlotLine:\n parent = s.parentItem()\n siblings = parent.childItems()\n\n if QApplication.keyboardModifiers() == Qt.ControlModifier:\n for sib in siblings:\n if sib in self.highlightedItems:\n sib.highlighted = False\n self.highlightedItems.remove(sib)\n if parent in self.highlightedRings:\n self.highlightedRings.remove(parent)\n else:\n for sib in siblings:\n sib.highlighted = True\n self.highlightedItems.add(sib)\n self.highlightedRings.add(parent)\n\n self.__selectionUpdateTimer.start(self.selectionUpdateDelay)", "def take(self, condition):\n full_data = self._data.copy()\n series_data = full_data['@1'].copy()\n slicer, _ = get_logic_index(series_data, condition, full_data)\n return slicer", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def pick(self,i):\n x_i = self.all[i,:]\n return x_i", "def cbSelectSignal( BoardNum, Direction, Signal, Connection, Polarity ):\n CHK( cbw.cbSelectSignal( BoardNum, Direction, Signal, Connection, Polarity ) )", "def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def randselwave(sample, minlen=0, maxlen=None, nosilence=True):\n if nosilence:\n sig = rmsilence(sample)\n else:\n sig = sample.signal\n\n sigsize = len(sig)\n minoffset = int(minlen * sample.samplerate)\n maxoffset = min(int(maxlen*sample.samplerate),\n sigsize) if maxlen else sigsize\n\n assert (minoffset < maxoffset) and (minoffset <= sigsize), \\\n f\"\"\"BAD: siglen={sigsize}, minlen={minoffset}, maxlen={maxoffset}\"\"\"\n\n # Select begin sample\n ns = randrange(max(1, sigsize-minoffset))\n ne = randrange(ns+minoffset, min(ns+maxoffset, sigsize+1))\n\n return sig[ns:ne]", "def on_select_clip_slot(self, clip_slot):\n pass", "def onpick(cls, event):\n event_len = len(event.ind)\n if not event_len:\n return True\n value = event.ind[-1] + FigureControl.minPossibleGenNumber\n vis_now = FigureControl.isVisible(value)\n FigureControl.makeGenVisible(value, not vis_now, \"dist\")", "def randselphon(sample, phonfunc=None):\n (ns, ne), ph = sample.phonemeseq[randrange(len(sample.phonemeseq))]\n if phonfunc is not None:\n while not phonfunc(ph):\n (ns, ne), ph = sample.phonemeseq[randrange(len(sample.phonemeseq))]\n\n return sample.signal[ns:ne], ph", "def _on_pick(self, event):\n pix_id = event.ind[-1]\n xx, yy, aa = u.Quantity(self.geom.pix_x[pix_id]).value, \\\n u.Quantity(self.geom.pix_y[pix_id]).value, \\\n u.Quantity(np.array(self.geom.pix_area)[pix_id])\n if self.geom.pix_type.startswith(\"hex\"):\n self._active_pixel.xy = (xx, yy)\n else:\n rr = sqrt(aa)\n self._active_pixel.xy = (xx - rr / 2., yy - rr / 2.)\n self._active_pixel.set_visible(True)\n self._active_pixel_label.set_x(xx)\n self._active_pixel_label.set_y(yy)\n self._active_pixel_label.set_text(f\"{pix_id:003d}\")\n self._active_pixel_label.set_visible(True)\n self._update()\n self.on_pixel_clicked(pix_id) # call user-function", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def sample(self, seg_logit, seg_label):", "def _pickFull(self, context):\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n\n points = utils.segmentPlaneIntersect(\n rayObject[0, :3],\n rayObject[1, :3],\n planeNorm=self.getNormal(),\n planePt=self.getPoint())\n\n if len(points) == 1: # Single intersection\n if numpy.any(points[0] < 0.):\n return None # Outside volume\n z, y, x = int(points[0][2]), int(points[0][1]), int(points[0][0])\n\n data = self.getData(copy=False)\n if data is None:\n return None # No dataset\n\n depth, height, width = data.shape\n if z < depth and y < height and x < width:\n return PickingResult(self,\n positions=[points[0]],\n indices=([z], [y], [x]))\n else:\n return None # Outside image\n else: # Either no intersection or segment and image are coplanar\n return None", "def getSplitDetectorSignal(self):\r\n\t\treturn self.splitData", "def selectregion(self, group=None):\n points = pylab.ginput(n=2, timeout=0)\n bounds = [int(point[not self.waveaxis]) for point in points]\n bounds = self._validateregion(bounds)\n try:\n self.regions.append({'min': bounds[0], 'max': bounds[1],\n 'group': group})\n except TypeError:\n pass", "def nearest_test_pulse(self):", "def RecursiveLowPassFast(signal, coeff, self):\n # Creates running mean value of the input\n ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) \n # Plot Running threshold value at the current plot\n self.p1.plot(self.t, ml, pen=pg.mkPen(color=(246, 178, 255), width=3))\n\n # Creates running square deviation from the mean\n vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml))\n # Creates \"threshold line\". If current value < sl[i] -> i belongs to event. \n sl = ml - coeff['S'] * np.sqrt(vl)\n self.p1.plot(self.t, sl, pen=pg.mkPen(color=(173, 27, 183), width=3))\n # Finds the length of the initial signal\n Ni = len(signal)\n # Finds those points where signal less than \"threshold line\"\n points = np.array(np.where(signal<=sl)[0])\n to_pop=np.array([]) # Empty supplementary array for finding adjacent points \n # For loop for finding adjacent points \n for i in range(1,len(points)):\n if points[i] - points[i - 1] == 1:\n to_pop=np.append(to_pop, i)\n # Points contain only border points of events\n points = np.delete(points, to_pop)\n # Empty list for Event location storage\n RoughEventLocations = []\n NumberOfEvents=0 #Number of events\n\n # For Loop for finding separating edges of different events and satisfying Event length limits\n for i in points:\n if NumberOfEvents is not 0:\n if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]:\n continue\n NumberOfEvents += 1\n start = i\n El = ml[i] - coeff['E'] * np.sqrt(vl[i])\n Mm = ml[i]\n Vv = vl[i]\n duration = 0\n while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['eventlengthLimit']:\n duration += 1\n i += 1\n if duration >= coeff['eventlengthLimit'] or i > (Ni - 10):\n NumberOfEvents -= 1\n else:\n k = start\n while signal[k] < Mm and k > 1:\n k -= 1\n start = k - 1\n k2 = i + 1\n while signal[k2] > Mm:\n k2 -= 1\n endp = k2\n if start<0:\n start=0\n RoughEventLocations.append((start, endp, ml[start], vl[start]))\n\n return np.array(RoughEventLocations)", "def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selected(self, point):\n local_point = (point[0] - self.x, point[1] - self.y)\n self.remove(self.slide.rect)\n self.slide.update(local_point)\n self.insert(1, self.slide.rect)\n self.slide.rect.fill = self.slide_color\n self.title.text = f\"{self.name}:{int(self.slide.value)}\"", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def extract_signal_from_mask(data, mask):\r\n affine = data[0].affine\r\n resample_mask = resample_img(mask,affine)\r\n signal = apply_mask(data, resample_mask, ensure_finite=True)\r\n print(signal.shape, type(signal))\r\n\r\n return signal", "def __getitem__(self, item):\n if item is Ellipsis or (\n isinstance(item, slice) and item == slice(None)):\n return self\n\n if not isinstance(item, tuple):\n item = (item,)\n\n if not all(is_integer(i) or isinstance(i, slice) for i in item):\n raise SignalError(\"Can only index or slice into signals\")\n\n if all(map(is_integer, item)):\n # turn one index into slice to get a view from numpy\n item = item[:-1] + (slice(item[-1], item[-1]+1),)\n\n view = self._initial_value[item]\n offset = (npext.array_offset(view)\n - npext.array_offset(self._initial_value))\n return Signal(view, name=\"%s[%s]\" % (self.name, item),\n base=self.base, offset=offset)", "def sample_selection(attr, old, new):\n if len(new) == 0:\n source.data = source.from_df(merged_data)\n else:\n samples = [s+1 for s in new]\n selected_data = merged_data.loc[merged_data['sample_num'].isin(samples)]\n source.data = source.from_df(selected_data)\n z = np.linspace(min(source.data['redshift']), max(source.data['redshift']), 100)\n cosmo_distmod_range = cosmo.distmod(z=z).value\n source.data['z_range'] = z\n source.data['cosmo_distmod_range'] = cosmo_distmod_range", "def select_sweepstakes(self):\n pass", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def sample(self, shape):\n\t\traise NotImplementedError()", "def pick(self, inv, pl, group, sc):\r\n if self.rect.colliderect(pl) and not self.used:\r\n group.remove(self)\r\n inv += ['score {}'.format(id(self))]\r\n sc += [sc[len(sc) - 1] + 100]\r\n self.used = True", "def update_signal(self,current_time):\r\n time = (current_time+self.offset)%self.cycle\r\n \r\n for ph_id,group in self.lane_groups.items():\r\n \r\n ph = self.phases[ph_id]\r\n \r\n if not (ph.start<=time<ph.end):\r\n # when the light is red, the section cannot generate demand\r\n for sec in group:\r\n sec.demand=0", "def extract_signal_features(signal, signal_sr):\n\n # normalise the sound signal before processing\n signal = signal / np.max(np.abs(signal))\n signal = np.nan_to_num(signal)\n # trim the signal to the appropriate length\n trimmed_signal, idc = librosa.effects.trim(signal, frame_length=FRAME_LEN, hop_length=HOP)\n # extract the signal duration\n signal_duration = librosa.get_duration(y=trimmed_signal, sr=signal_sr)\n # use librosa to track the beats\n tempo, beats = librosa.beat.beat_track(y=trimmed_signal, sr=signal_sr)\n # find the onset strength of the trimmed signal\n o_env = librosa.onset.onset_strength(trimmed_signal, sr=signal_sr)\n # find the frames of the onset\n onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=signal_sr)\n # keep only the first onset frame\n onsets = onset_frames.shape[0]\n # decompose the signal into its magnitude and the phase components such that signal = mag * phase\n mag, phase = librosa.magphase(librosa.stft(trimmed_signal, n_fft=FRAME_LEN, hop_length=HOP))\n # extract the rms from the magnitude component\n rms = librosa.feature.rms(y=trimmed_signal)[0]\n # extract the spectral centroid of the magnitude\n cent = librosa.feature.spectral_centroid(S=mag)[0]\n # extract the spectral rolloff point from the magnitude\n rolloff = librosa.feature.spectral_rolloff(S=mag, sr=signal_sr)[0]\n # extract the zero crossing rate from the trimmed signal using the predefined frame and hop lengths\n zcr = librosa.feature.zero_crossing_rate(trimmed_signal, frame_length=FRAME_LEN, hop_length=HOP)[0]\n\n # pack the extracted features into the feature vector to be returned\n signal_features = np.concatenate(\n (\n np.array([signal_duration, tempo, onsets]),\n get_period(signal, signal_sr=signal_sr),\n sta_fun(rms),\n sta_fun(cent),\n sta_fun(rolloff),\n sta_fun(zcr),\n ),\n axis=0,\n )\n\n # finally, return the gathered features and the trimmed signal\n return signal_features, trimmed_signal", "def Select_Data(spec,wave_edges):\n\tif len(wave_edges) < 2: \n\t\traise ValueError('must be at least two bin edges!')\n\n\twave,flux,error,dfp,dfm= spec\n\tinds = np.where( (wave > wave_edges[0]) & (wave <= wave_edges[1]) )[0]\n\n\treturn wave[inds], flux[inds],error[inds], dfp[inds], dfm[inds]", "def selector(min_k: float, max_k: float) -> Callable:\n if max_k < osc1 or (max_k - min_k) * r < 2 * pi:\n return low_osc\n return hi_osc", "def pick(layer, event):\n # on press\n layer.selected_label = layer._value or 0", "def test():\n\n file = 'crosssection.dat'\n f = open(file,'r')\n lines = f.readlines()\n nline = len(lines)\n points = np.zeros(shape=(nline,4))\n sigtable = np.zeros(nline)\n for i in range(nline):\n points[i,0] = float(lines[i].split()[0])\n points[i,1] = float(lines[i].split()[1])\n points[i,2] = float(lines[i].split()[2])\n points[i,3] = float(lines[i].split()[3])\n sigtable[i] = float(lines[i].split()[4])\n\n nbin = 60\n npts = nline/nbin\n\n # checking lensing cross section against magnitude\n '''\n for i in range(npts):\n plt.plot(points[i*nbin:(i+1)*nbin,3],sigtable[i*nbin:(i+1)*nbin])\n plt.show()\n '''\n npts = npts/nbin\n\n # checking lensing cross section against velocity dispersion\n '''\n for i in range(nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,0]==points[i,0])\\\n &(points[:,3]==points[i,3]))\n vel = points[mask,2]\n sigma = sigtable[mask]\n plt.plot(vel,sigma)\n plt.show()\n '''\n\n # checking lensing cross section against lens redshift\n #'''\n for i in range(3000,nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zl = points[mask,0]\n sigma = sigtable[mask]\n plt.plot(zl,sigma)\n plt.show()\n #'''\n\n # checking lensing cross section against source redshift\n for i in reversed(range(nline)):\n mask, = np.where((points[:,0]==points[i,0])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zs = points[mask,1]\n sigma = sigtable[mask]\n plt.plot(zs,sigma)\n plt.show()", "def sample_from_prior(self):\n raise NotImplementedError", "def __getitem__(self, index):\n d = self._signal_object[index]\n d = bootstrap_data_voxel(d, self._H, self._R)\n d.clip(self._min_signal, 1., d)\n return d", "def first_active(self, k):\n return k - self.p", "def signal_kernel(self, signum: int):", "def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)", "def _selectInd(self, ind):\n logger.info(f'plotNumber:{self.plotNumber} ind: {ind}')\n if ind > len(self.plotDf)-1:\n return\n xVal = self.plotDf.at[ind, self.stateDict['xStat']]\n yVal = self.plotDf.at[ind, self.stateDict['yStat']]\n if self.scatterPlotSelection is not None:\n logger.info(f' setting scatterPlotSelection x:{xVal} y:{yVal}')\n self.scatterPlotSelection.set_data(xVal, yVal)\n self.fig.canvas.draw()", "def select(self, arr):\n\n return arr[self.relative_degree_idxs]", "def getLowGainScatteringSignal(self):\r\n\t\treturn self.lowGainScatData", "def cutSec(ppm, X, start, stop, featureMask):\n\tflip=0\n\tif ppm[0]>ppm[-1]:\n\t\tflip=1\n\t\tppm = ppm[::-1]\n\t\tX = X[:, ::-1]\n \n #find first entry in ppm with >='start' valu\n\tstart = (ppm>=start).nonzero()\n\tstart = start[0][0]#first entry\n\tstop = (ppm<=stop).nonzero()\n\tstop = stop[0][-1]#last entry\n\n#currently setting featureMask will get rid of peaks in start:stop region BUT it also marks as excluded so have removed as inaccurately marking for exclusion when all we want to do is remove from intensityData not mark as exluded\n\ttry:\n\t\tfeatureMask[0,start:stop]=False # this may only occur on unit test data, not sure need to check but either way was causing issue\n\texcept:\n\t\tfeatureMask[start:stop]=False\n\tif flip==1:\n\t\tppm = ppm[::-1]\n\t\tX = X[:, ::-1]\n\treturn ppm, X, featureMask\n\tpass", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices", "def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]", "def slice_region( self, ini, end ):\n return self[(self['frame'] >= ini) & (self['frame'] <= end)]", "def selector(min_k: float, max_k: float) -> Callable:\n if max_k < 2 * osc or (max_k - min_k) * r < 2 * pi:\n return low_osc\n return hi_osc", "def selectShot(self):\r\n shot = self.mapToShot(self.remainingCoordinates.pop())\r\n logging.debug(\"select shot: %s\" % (shot))\r\n return shot", "def get_sample(x, y):\n return noise[x][y]", "def select(self):\n\n return self.p[0], self.p[1]", "def getSamples(self, section, pitch, target=\"beats\"):\n sample_list = audio.AudioQuantumList()\n if target == \"beats\":\n sample_list.extend([b for x in section.children() for b in x.children()]);\n elif target == \"bars\":\n sample_list.extend(section.children())\n return sample_list.that(overlap_ends_of(self.original.analysis.segments.that(have_pitch_max(pitch)).that(overlap_starts_of(sample_list))))", "def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()", "def _phase_picker(self, event):\n\n event_crd = np.array([event[[\"X\", \"Y\", \"Z\"]].values])\n event_xyz = np.array(self.lut.xyz2coord(event_crd,\n inverse=True)).astype(int)[0]\n\n p_ttime = self.lut.value_at(\"TIME_P\", event_xyz)[0]\n s_ttime = self.lut.value_at(\"TIME_S\", event_xyz)[0]\n\n # Determining the stations that can be picked on and the phases\n picks = pd.DataFrame(index=np.arange(0, 2 * len(self.data.p_onset)),\n columns=[\"Name\", \"Phase\", \"ModelledTime\",\n \"PickTime\", \"PickError\", \"SNR\"])\n\n p_gauss = np.array([])\n s_gauss = np.array([])\n idx = 0\n for i in range(len(self.data.p_onset)):\n p_arrival = event[\"DT\"] + p_ttime[i]\n s_arrival = event[\"DT\"] + s_ttime[i]\n\n if self.picking_mode == \"Gaussian\":\n for phase in [\"P\", \"S\"]:\n if phase == \"P\":\n onset = self.data.p_onset[i]\n arrival = p_arrival\n else:\n onset = self.data.s_onset[i]\n arrival = s_arrival\n\n gau, max_onset, err, mn = \\\n self._gaussian_picker(onset,\n phase,\n self.data.start_time,\n p_arrival,\n s_arrival,\n p_ttime[i],\n s_ttime[i])\n\n if phase == \"P\":\n p_gauss = np.hstack([p_gauss, gau])\n else:\n s_gauss = np.hstack([s_gauss, gau])\n\n picks.iloc[idx] = [self.lut.station_data[\"Name\"][i],\n phase, arrival, mn, err, max_onset]\n idx += 1\n\n phase_picks = {}\n phase_picks[\"Pick\"] = picks\n phase_picks[\"GAU_P\"] = p_gauss\n phase_picks[\"GAU_S\"] = s_gauss\n\n return phase_picks", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def figure_mouse_release(self, event):\n\n if event.button != 1: return None\n try:\n signal_time, signal_cid = self._exclude_selected_region_signal\n except AttributeError:\n return None\n \n xy = self._exclude_selected_region.get_xy()\n \n if event.xdata is None:\n # Out of axis; exclude based on the last known worthwhile position.\n xdata = xy[2, 0]\n else:\n xdata = event.xdata\n\n # If the two mouse events were within some time interval,\n # then we should not add a mask because those signals were probably\n # part of a double-click event.\n if time() - signal_time > DOUBLE_CLICK_INTERVAL \\\n and np.abs(xy[0,0] - xdata) > 0:\n \n # Update the cache with the new mask.\n _ = self._cache[\"input\"].get(\"exclude\", np.array([]))\n _.shape = (-1, 2)\n self._cache[\"input\"][\"exclude\"] = np.vstack((\n np.array([xy[0,0], xy[2, 0]]).reshape(-1, 2), _))\n\n # Fit and re-draw the continuum, and its mask.\n self.fit_continuum(clobber=True)\n self.update_continuum_mask(refresh=False)\n self.draw_continuum(refresh=False)\n\n xy[:, 0] = np.nan\n\n self._exclude_selected_region.set_xy(xy)\n self.norm_plot.mpl_disconnect(signal_cid)\n self.norm_plot.draw()\n del self._exclude_selected_region_signal\n return None", "def add_picks(sta, e, orig_time, st, cfg):\n from obspy.core.event import (Pick, WaveformStreamID, EvaluationMode,\n QuantityError)\n for n_p, p in enumerate(sta[\"best_obs\"]):\n if n_p == 0:\n phase = \"P\"\n elif n_p == 1:\n phase = \"S\"\n if p and p[3] != 4:\n pick = Pick(waveform_id=WaveformStreamID(\n network_code=st[0].stats.network,\n station_code=st[0].stats.station,\n channel_code=p[2]),\n time=p[0], phase_hint=phase,\n evaluation_mode=EvaluationMode(\"automatic\"),\n time_errors=QuantityError(\n uncertainty=cfg.picking.T_ERRORS[p[3]]))\n e.picks.append(pick)\n return(e)", "def selector(min_k: float, max_k: float) -> Callable:\n if max_k < osc10 or (max_k - min_k) * r < 2 * pi:\n return low_osc\n return hi_osc", "def define_sections (data_shape, xbin=1, ybin=1, tel=None):\n\n ysize, xsize = data_shape\n ny = get_par(set_bb.ny,tel)\n nx = get_par(set_bb.nx,tel)\n dy = ysize // ny\n dx = xsize // nx\n\n ysize_chan = get_par(set_bb.ysize_chan,tel) // ybin\n xsize_chan = get_par(set_bb.xsize_chan,tel) // xbin\n ysize_os = (ysize-ny*ysize_chan) // ny\n xsize_os = (xsize-nx*xsize_chan) // nx\n\n # the sections below are defined such that e.g. chan_sec[0] refers\n # to all pixels of the first channel, where the channel indices\n # are currently defined to be located on the CCD as follows:\n #\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n\n # channel section slices including overscan; shape=(16,2)\n chan_sec = tuple([(slice(y,y+dy), slice(x,x+dx))\n for y in range(0,ysize,dy) for x in range(0,xsize,dx)])\n\n # channel data section slices; shape=(16,2)\n data_sec = tuple([(slice(y,y+ysize_chan), slice(x,x+xsize_chan))\n for y in range(0,ysize,dy+ysize_os) for x in range(0,xsize,dx)])\n\n # channel vertical overscan section slices; shape=(16,2)\n # cut off [ncut] pixels to avoid including pixels on the edge of the\n # overscan that are contaminated with flux from the image\n # and also discard last column as can have high value\n ncut = 5\n ncut_vert = max(ncut // xbin, 1)\n os_sec_vert = tuple([(slice(y,y+dy), slice(x+xsize_chan+ncut_vert,x+dx-1))\n for y in range(0,ysize,dy) for x in range(0,xsize,dx)])\n\n # channel horizontal overscan sections; shape=(16,2)\n # cut off [ncut] pixels to avoid including pixels on the edge of the\n # overscan that are contaminated with flux from the image\n ncut_hori = max(ncut // ybin, 1)\n ysize_os_cut = ysize_os - ncut_hori\n os_sec_hori = tuple([(slice(y,y+ysize_os_cut), slice(x,x+dx))\n for y in range(dy-ysize_os_cut,dy+ysize_os_cut,ysize_os_cut)\n for x in range(0,xsize,dx)])\n\n # channel reduced data section slices; shape=(16,2)\n data_sec_red = tuple([(slice(y,y+ysize_chan), slice(x,x+xsize_chan))\n for y in range(0,ysize-ny*ysize_os,ysize_chan)\n for x in range(0,xsize-nx*xsize_os,xsize_chan)])\n\n\n return chan_sec, data_sec, os_sec_hori, os_sec_vert, data_sec_red", "def _gaussian_picker(self, onset, phase, start_time, p_arr, s_arr, ptt,\n stt):\n\n # Determine indices of P and S pick times\n pt_idx = int((p_arr - start_time) * self.sampling_rate)\n st_idx = int((s_arr - start_time) * self.sampling_rate)\n\n # Determine P and S pick window upper and lower bounds based on\n # (P-S)/2 -- either this or the next window definition will be\n # used depending on which is wider.\n pmin_idx = int(pt_idx - (st_idx - pt_idx) / 2)\n pmax_idx = int(pt_idx + (st_idx - pt_idx) / 2)\n smin_idx = int(st_idx - (st_idx - pt_idx) / 2)\n smax_idx = int(st_idx + (st_idx - pt_idx) / 2)\n\n # Check if index falls outside length of onset function; if so set\n # window to start/end at start/end of data.\n for idx in [pmin_idx, pmax_idx, smin_idx, smax_idx]:\n if idx < 0:\n idx = 0\n if idx > len(onset):\n idx = len(onset)\n\n # Defining the bounds to search for the event over\n # Determine P and S pick window upper and lower bounds based on\n # set percentage of total travel time, plus marginal window\n\n # window based on self.fraction_tt of P/S travel time\n pp_ttime = ptt * self.fraction_tt\n ps_ttime = stt * self.fraction_tt\n\n # Add length of marginal window to this. Convert to index.\n P_idxmin_new = int(pt_idx - int((self.marginal_window + pp_ttime)\n * self.sampling_rate))\n P_idxmax_new = int(pt_idx + int((self.marginal_window + pp_ttime)\n * self.sampling_rate))\n S_idxmin_new = int(st_idx - int((self.marginal_window + ps_ttime)\n * self.sampling_rate))\n S_idxmax_new = int(st_idx + int((self.marginal_window + ps_ttime)\n * self.sampling_rate))\n\n # Setting so the search region can't be bigger than (P-S)/2:\n # compare the two window definitions; if (P-S)/2 window is\n # smaller then use this (to avoid picking the wrong phase).\n P_idxmin = np.max([pmin_idx, P_idxmin_new])\n P_idxmax = np.min([pmax_idx, P_idxmax_new])\n S_idxmin = np.max([smin_idx, S_idxmin_new])\n S_idxmax = np.min([smax_idx, S_idxmax_new])\n\n # Setting parameters depending on the phase\n if phase == \"P\":\n sta_winlen = self.p_onset_win[0]\n win_min = P_idxmin\n win_max = P_idxmax\n if phase == \"S\":\n sta_winlen = self.s_onset_win[0]\n win_min = S_idxmin\n win_max = S_idxmax\n\n # Find index of maximum value of onset function in the appropriate\n # pick window\n max_onset = np.argmax(onset[win_min:win_max]) + win_min\n # Trim the onset function in the pick window\n onset_trim = onset[win_min:win_max]\n\n # Only keep the onset function outside the pick windows to\n # calculate the pick threshold\n onset_threshold = onset.copy()\n onset_threshold[P_idxmin:P_idxmax] = -1\n onset_threshold[S_idxmin:S_idxmax] = -1\n onset_threshold = onset_threshold[onset_threshold > -1]\n\n # Calculate the pick threshold: either user-specified percentile of\n # data outside pick windows, or 88th percentile within the relevant\n # pick window (whichever is bigger).\n threshold = np.percentile(onset_threshold, self.pick_threshold * 100)\n threshold_window = np.percentile(onset_trim, 88)\n threshold = np.max([threshold, threshold_window])\n\n # Remove data within the pick window that is lower than the threshold\n tmp = (onset_trim - threshold).any() > 0\n\n # If there is any data that meets this requirement...\n if onset[max_onset] >= threshold and tmp:\n exceedence = np.where((onset_trim - threshold) > 0)[0]\n exceedence_dist = np.zeros(len(exceedence))\n\n # Really faffy process to identify the period of data which is\n # above the threshold around the highest value of the onset\n # function.\n d = 1\n e = 0\n while e < len(exceedence_dist) - 1:\n if e == len(exceedence_dist):\n exceedence_dist[e] = d\n else:\n if exceedence[e + 1] == exceedence[e] + 1:\n exceedence_dist[e] = d\n else:\n exceedence_dist[e] = d\n d += 1\n e += 1\n\n # Find the indices for this period of data\n tmp = exceedence_dist[np.argmax(onset_trim[exceedence])]\n tmp = np.where(exceedence_dist == tmp)\n\n # Add one data point below the threshold at each end of this period\n gau_idxmin = exceedence[tmp][0] + win_min - 1\n gau_idxmax = exceedence[tmp][-1] + win_min + 2\n\n # Initial guess for gaussian half-width based on onset function\n # STA window length\n data_half_range = int(sta_winlen * self.sampling_rate / 2)\n\n # Select data to fit the gaussian to\n x_data = np.arange(gau_idxmin, gau_idxmax, dtype=float)\n x_data = x_data / self.sampling_rate\n y_data = onset[gau_idxmin:gau_idxmax]\n\n # Convert indices to times\n x_data_dt = np.array([])\n for i in range(len(x_data)):\n x_data_dt = np.hstack([x_data_dt, start_time + x_data[i]])\n\n # Try to fit a gaussian.\n try:\n # Initial parameters are:\n # height = max value of onset function\n # mean = time of max value\n # sigma = data half-range (calculated above)\n p0 = [np.max(y_data),\n float(gau_idxmin + np.argmax(y_data))\n / self.sampling_rate,\n data_half_range / self.sampling_rate]\n\n # Do the fit\n popt, _ = curve_fit(util.gaussian_1d, x_data, y_data, p0)\n\n # Results:\n # popt = [height, mean (seconds), sigma (seconds)]\n max_onset = popt[0]\n # Convert mean (pick time) to time\n mean = start_time + float(popt[1])\n sigma = np.absolute(popt[2])\n\n gaussian_fit = {\"popt\": popt,\n \"xdata\": x_data,\n \"xdata_dt\": x_data_dt,\n \"PickValue\": max_onset,\n \"PickThreshold\": threshold}\n\n # If curve_fit fails. Will also spit error message to stdout,\n # though this can be suppressed - see warnings.filterwarnings()\n except (ValueError, RuntimeError):\n gaussian_fit = self.DEFAULT_GAUSSIAN_FIT\n gaussian_fit[\"PickThreshold\"] = threshold\n sigma = -1\n mean = -1\n max_onset = -1\n\n # If onset function does not exceed threshold in pick window\n else:\n gaussian_fit = self.DEFAULT_GAUSSIAN_FIT\n gaussian_fit[\"PickThreshold\"] = threshold\n sigma = -1\n mean = -1\n max_onset = -1\n\n return gaussian_fit, max_onset, sigma, mean", "def __getitem__(self, idx):\n sliced_timeseries = [None] * self.ch_amount\n return_instant = False\n if not self.trigger_idx:\n self.trigger_idx = 0\n\n trigger_length = len(self.timeseries[self.trigger_idx])\n\n # If idx is an integer, return an \"instantaneous slice\" and initialise slice\n if isinstance(idx, int):\n return_instant = True\n\n # If idx is a negative integer, take the idx element from the end.\n if idx < 0:\n idx = trigger_length + idx\n\n idx = slice(idx, idx + 1)\n\n # If idx.start or stop are None, make them 0 or trigger length.\n if idx.start is None:\n idx = slice(0, idx.stop)\n if idx.stop is None:\n idx = slice(idx.start, trigger_length)\n\n # Check that the indexes are not out of bounds\n if idx.start >= trigger_length or idx.stop > trigger_length:\n raise IndexError(\n f\"Slice ({idx.start}, {idx.stop}) is out of \"\n f\"bounds for channel {self.trigger_idx} \"\n f\"with size {trigger_length}\"\n )\n\n # Operate on each channel on its own\n for n, channel in enumerate(self.timeseries):\n idx_dict = {\"start\": idx.start, \"stop\": idx.stop, \"step\": idx.step}\n # Adapt the slicing indexes to the right frequency\n for i in [\"start\", \"stop\", \"step\"]:\n if idx_dict[i]:\n idx_dict[i] = int(\n np.floor(self.freq[n] / self.freq[self.trigger_idx] * idx_dict[i])\n )\n\n # Correct the slicing stop if necessary\n if idx_dict[\"start\"] == idx_dict[\"stop\"] or return_instant:\n idx_dict[\"stop\"] = idx_dict[\"start\"] + 1\n elif trigger_length == idx.stop:\n idx_dict[\"stop\"] = len(channel)\n\n new_idx = slice(idx_dict[\"start\"], idx_dict[\"stop\"], idx_dict[\"step\"])\n sliced_timeseries[n] = channel[new_idx]\n\n sliced_bp = BlueprintInput(\n sliced_timeseries,\n self.freq,\n self.ch_name,\n self.units,\n self.trigger_idx,\n self.num_timepoints_found,\n self.thr,\n self.time_offset,\n )\n\n sliced_bp._time_resampled_to_trigger = self._time_resampled_to_trigger\n return sliced_bp", "def get_selected_muons(muons, trigobj, mask_events, mu_pt_cut_leading, mu_pt_cut_subleading, mu_aeta_cut, mu_iso_cut): \n passes_iso = muons.pfRelIso04_all < mu_iso_cut\n passes_id = muons.mediumId == 1\n passes_subleading_pt = muons.pt > mu_pt_cut_subleading\n passes_leading_pt = muons.pt > mu_pt_cut_leading\n passes_aeta = NUMPY_LIB.abs(muons.eta) < mu_aeta_cut\n \n trigobj.masks[\"mu\"] = (trigobj.id == 13)\n \n muons_matched_to_trigobj = NUMPY_LIB.invert(mask_deltar_first(muons, muons.masks[\"all\"], trigobj, trigobj.masks[\"mu\"], 0.1))\n \n #select muons that pass these cuts\n muons_passing_id = passes_iso & passes_id & passes_subleading_pt & muons_matched_to_trigobj\n \n #select events that have muons passing cuts \n events_passes_muid = sum_in_offsets(muons, muons_passing_id, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n events_passes_leading_pt = sum_in_offsets(muons, muons_passing_id & passes_leading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 1\n events_passes_subleading_pt = sum_in_offsets(muons, muons_passing_id & passes_subleading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n\n base_event_sel = mask_events & events_passes_muid & events_passes_leading_pt & events_passes_subleading_pt\n \n muons_passing_os = select_muons_opposite_sign(muons, muons_passing_id & passes_subleading_pt)\n events_passes_os = sum_in_offsets(muons, muons_passing_os, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) == 2\n \n final_event_sel = base_event_sel & events_passes_os\n final_muon_sel = muons_passing_id & passes_subleading_pt & muons_passing_os\n \n return {\n \"selected_events\": final_event_sel,\n \"selected_muons\": final_muon_sel,\n }", "def updateSelectionArea(self):\n self.machine.setSelectionArea(self.points,\n fill='hatch',\n color=self.machine.color)\n eventDict = prepareDrawingSignal('drawingProgress',\n 'polygon',\n self.points,\n self.machine.parameters)\n self.machine.plot.notify(**eventDict)", "def selector(min_k: float, max_k: float) -> Callable:\n if max_k < 10 * osc or (max_k - min_k) * r < 2 * pi:\n return low_osc\n return hi_osc", "def pick_initial_sample(self):\n x = np.atleast_1d(self.init_sample_func())\n return 0, x", "def input_select(self):\n return self._read(0x17, 0, 0x01)", "def pick_loc(self, event, x):\n #print(event, x)\n self.vtkWidget.iren.RemoveObservers('RightButtonPressEvent')\n loc = event.GetEventPosition()\n\n # Currently this only allow one pick points, but in the future, more reference points may be needed\n if self.pnt is None: # Check no points are already picked\n self.pnt = vtkRenWin.Pick_point(self.renWin, loc)\n else:\n show_message(\"A point is already set as the reference.\\n\"\n \"Clear the picked points to change reference\",\n message_type=\"info\")\n #vtkRenWin.mark(self.renWin,self.pnt[0],self.pnt[1],self.pnt[2])\n # print(self.pnt)", "def sample(self, idx):\n idx = (self._curr_pos + idx) % self._curr_size\n action = self.action[idx]\n reward = self.reward[idx]\n isOver = self.isOver[idx]\n comb_mask = self.comb_mask[idx]\n if idx + 2 <= self._curr_size:\n state = self.state[idx:idx+2]\n fine_mask = self.fine_mask[idx:idx+2]\n else:\n end = idx + 2 - self._curr_size\n state = self._slice(self.state, idx, end)\n fine_mask = self._slice(self.fine_mask, idx, end)\n return state, action, reward, isOver, comb_mask, fine_mask", "def take_one_sample(self, event, pw_intensity):\n phase = self.get_current_time(event) % self.time_period\n # Rejection sampling with self.random_state\n new_sample = 0\n num_segments = pw_intensity.shape[0]\n s_max = np.max(pw_intensity)\n while True:\n new_sample += self.random_state.exponential(scale=1.0 / s_max)\n current_piece_index = int(num_segments * ((new_sample + phase) % self.time_period) / self.time_period)\n if self.random_state.rand() < pw_intensity[current_piece_index] / s_max:\n # print('Sample chosen: ', new_sample)\n return new_sample", "def consecutive_sections(): # noqa: D416", "def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set", "def get_cross_section(self, sample):\n \n # Path to SusHi \n sushi_binary = os.environ['SUSHIPATH']\n if not os.path.exists(sushi_binary):\n print 'No known SusHi binary file'\n exit(-1)\n \n if sample in ['H', 'A']:\n \n self.sushi_output[sample] = self.sushi_input[sample].replace('.in', '.out')\n \n # Convert to relative path to shorten the file name, since SusHi\n # can't deal with inputs >60 chars\n relpath_in = os.path.relpath(self.sushi_input[sample], os.getcwd())\n relpath_out = os.path.relpath(self.sushi_output[sample], os.getcwd())\n \n # Run SusHi\n ret = subprocess.check_call([sushi_binary,\n relpath_in,\n relpath_out])\n #self.sushi_input[sample],\n #self.sushi_output[sample]])\n \n if ret: return ret\n \n # Parse result \n lha = LHA(self.sushi_output[sample])\n \n self.xsec[sample] = float(lha.get_block('SUSHIggh').get_entry_by_key(1))\n \n # Compare to Pythia\n with h5py.File(self.trainfiles[sample]) as hf:\n xsec = float(hf.get('data').attrs['cross_section'])\n xsec = xsec * 10e9 # convert from mb to pb\n \n print 'SAMPLE:', sample, ':\\tSusHi = %.4e, \\t Pythia = %.4e' % (self.xsec[sample], xsec)\n \n elif sample == 'Z':\n self.xsec[sample] = 2.7910 # from FEWZ at LO \n\n elif sample in self.backgrounds and sample != 'Z':\n \n # Open train set\n with h5py.File(self.trainfiles[sample]) as hf:\n xsec = float(hf.get('data').attrs['cross_section'])\n self.xsec[sample] = xsec * 10e9 # convert from mb to pb\n \n #print 'Cross section for %s = %.3e pb' % (sample, self.xsec[sample])\n \n return 0", "def __getitem__(self, idx):\n if idx >= len(self):\n raise StopIteration\n\n _, timesteps, height, width = self.data.shape\n\n # If time=True, return an entire time slice,\n # otherwise return a single time step\n t = slice(None)\n if not self.time:\n t = idx % timesteps\n idx = idx // timesteps\n\n if self.subset == 'train':\n # Right quadrants\n # Return a random subset\n xl = width // 2\n xr = width - self.size\n x = random.randint(xl, xr)\n\n yu = 0\n yl = height - self.size\n y = random.randint(yu, yl)\n elif self.subset == 'val':\n # Bottom left quadrant\n # Convert the index to a (row, col) location\n row = idx // (width // 2 // self.size)\n col = idx % (width // 2 // self.size)\n\n # Find the exact coordinates in the array\n y = row * self.size + (height // 2)\n x = col * self.size\n elif self.subset == 'test':\n # Top left quadrant\n # Convert the index to a (row, col) location\n row = idx // (width // 2 // self.size)\n col = idx % (width // 2 // self.size)\n\n # Find the exact coordinates in the array\n y = row * self.size\n x = col * self.size\n else:\n # All quadrants\n # Convert the index to a (row, col) location\n row = idx // (width // self.size)\n col = idx % (width // self.size)\n\n # Find the exact coordinates in the array\n y = row * self.size\n x = col * self.size\n\n data = self.data[:, t, y:y + self.size, x:x + self.size]\n target = self.segmentation[y:y + self.size, x:x + self.size]\n\n # Apply any requested transforms\n if self.transform:\n data = self.transform(data)\n\n if self.target_transform:\n target = self.target_transform(target)\n\n if isinstance(t, slice):\n t = np.zeros_like(y) - 1\n\n return data, target, t, y, x", "def slice_curve(y, x, x_min=None, x_max=None):\n if x_min is None:\n x_min = x.min()\n\n if x_max is None:\n x_max = x.max()\n\n indices = np.where(np.logical_and(x <= x_max, x >= x_min))\n return y[indices], x[indices]", "def bake_signal(signal, sr, duration):\n if signal[\"type\"] == \"sine\":\n carrier_domain = numpy.linspace(0, duration, int(sr*duration), endpoint=False) * numpy.pi * 2\n carrier_freq = signal[\"frequency\"]\n carrier_mult = bake_multiplier(signal[\"multiplier\"], sr, duration)\n carrier_modf = bake_modifier(signal[\"modifier\"], sr, duration)\n baked_signal = numpy.sin(carrier_domain * carrier_freq + carrier_modf) * carrier_mult\n else:\n print(\"error\")\n exit(-1)\n return baked_signal", "def pickRow(self, a, b, tag=True):\n for row in self.rows:\n if not row.intervalOccupied(a, b):\n row.markInterval(a, b, tag)\n return row\n\n row = self.newRow()\n row.markInterval(a, b, tag)\n return row", "def select_vert(img):\n\n # Local variable which breaks loop if area of interest is selected well\n OK = False\n\n # Main while-loop\n while OK == False:\n\n # Plot image\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.imshow(img, cmap=\"gray\")\n\n # Let user specify points\n coord = np.asarray(plt.ginput(4, show_clicks=True))\n p = Polygon(coord, linewidth=1, edgecolor='r', facecolor='none')\n plt.gca().add_artist(p)\n # Include area of interest in plot\n plt.draw()\n plt.show()\n\n # Ask user to accept or reject the proposed area of interest\n val = input(\"Is the region correct ([Y]/n)?\\n\")\n\n # Break if OK, re-do if not\n if val == \"Y\" or val == \"\":\n OK = True\n\n \"\"\"\n Creates a mask which marks the vertical line based on the coordinates given by the user.\n \"\"\"\n \n x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]), indexing='xy')\n x, y = x.flatten(), y.flatten()\n pts = np.vstack((x,y)).T\n pts_t = tuple(map(tuple, pts))\n mask = np.ones((img.shape[0],img.shape[1]))\n for (x,y) in pts_t:\n if p.get_path().contains_point((x,y)):\n mask[y][x] = 0\n\n # Return mask which is the area of interest with value 1, 0 else\n return mask", "def on_floor_selected(self, floor):\n pass", "def Point_Pick(self):\n self.vtkWidget.iren.AddObserver('RightButtonPressEvent', self.pick_loc)\n self.renWin.Render()", "def sample_selection(points, gap_fraction):\n if gap_fraction == 0:\n return points\n n = len(points)\n k = int(n * (1. - gap_fraction / 100.))\n return sample(points, k)", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def sample_trajectory(trajectory, t):\n\t# First point\n\tif t <= 0.0:\n\t\treturn copy.deepcopy(trajectory.points[0])\n\t# Last point\n\tif t >= trajectory.points[-1].time_from_start.to_sec():\n\t\treturn copy.deepcopy(trajectory.points[-1])\n\t# Finds the (middle) segment containing t\n\ti = 0\n\twhile trajectory.points[i + 1].time_from_start.to_sec() < t:\n\t\ti += 1\n\n\treturn interp_cubic(trajectory.points[i], trajectory.points[i + 1], t)", "def prior_sample(self):\n pass", "def test_block_picking(multiblock_poly):\n\n pl = pyvista.Plotter()\n width, height = pl.window_size\n actor, mapper = pl.add_composite(multiblock_poly)\n\n picked_blocks = []\n\n def turn_blue(index, dataset):\n mapper.block_attr[index].color = 'blue'\n picked_blocks.append(index)\n\n pl.enable_block_picking(callback=turn_blue)\n pl.show(auto_close=False)\n\n # click in the corner\n assert not picked_blocks\n pl.iren._mouse_left_button_click(0, 0)\n assert not picked_blocks\n\n # click directly in the middle\n pl.iren._mouse_left_button_click(width // 2, height // 2)\n assert mapper.block_attr[2].color\n\n assert pl.picked_block_index == picked_blocks[0]", "def _selected_indices(self, subset):\n # We want the DataFrame to be indexed the same way its values array is\n ftr = self.frametracks.reset_index(drop=True)\n if subset is not None:\n ftr['tmpindex'] = ftr.index.values\n ftr = ftr.set_index('particle').reindex(subset).set_index('tmpindex')\n if self.autoclip:\n # Boundaries are computed for the whole system\n xmin = self.frametracks.x.min() + self.nncutoff\n xmax = self.frametracks.x.max() - self.nncutoff\n ymin = self.frametracks.y.min() + self.nncutoff\n ymax = self.frametracks.y.max() - self.nncutoff\n r = ftr.index[ (ftr.x > xmin) & (ftr.x < xmax) & \\\n (ftr.y > ymin) & (ftr.y < ymax) ].values.astype(int)\n else:\n r = ftr.index.values.astype(int)\n if self.fast:\n return np.random.permutation(r)[:int(len(r) / 10)]\n else:\n return r", "def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:\n # Retrieves the sequence from the bed generator\n x = super().__getitem__(idx)\n y = x.copy()\n x[:, self.window_length//2, :] = 0.25\n return x, y", "def slice(self, evidence={}):\n return condition(self,evidence)", "def get_selected_sections(sections):\n num_sections = len(sections)\n number = int(input('Enter Your Choice: '))\n while number > num_sections + 1:\n _print('Enter a valid Number between 1 and %d' % (num_sections + 1))\n number = int(input('Enter Your Choice: '))\n\n if number == num_sections + 1:\n return sections\n return [sections[number - 1]]", "def sample(self, observation):\n raise NotImplementedError", "def get_slice(self, frequency_band):\n index = frequency_band\n\n if isinstance(index, slice):\n types = {\n index.start.__class__,\n index.stop.__class__,\n index.step.__class__\n }\n\n if Hertz not in types:\n return index\n\n try:\n start = Hertz(0) if index.start is None else index.start\n if start < Hertz(0):\n start = self.stop_hz + start\n stop = self.stop_hz if index.stop is None else index.stop\n if stop < Hertz(0):\n stop = self.stop_hz + stop\n frequency_band = FrequencyBand(start, stop)\n except (ValueError, TypeError):\n pass\n\n start_index = bisect.bisect_left(\n self.band_stops, frequency_band.start_hz)\n stop_index = bisect.bisect_left(\n self.band_starts, frequency_band.stop_hz)\n\n if self.always_even and (stop_index - start_index) % 2:\n # KLUDGE: This is simple, but it may make sense to choose move the\n # upper *or* lower bound, based on which one introduces a lower\n # error\n stop_index += 1\n return slice(start_index, stop_index)", "def get_mask(self, h, k):\n return self.mask[(self.h==h)&(self.k==k)]" ]
[ "0.6373907", "0.6050952", "0.5974559", "0.54288924", "0.54075724", "0.5390018", "0.5377656", "0.5331255", "0.5308175", "0.5240958", "0.5233869", "0.51971555", "0.5189088", "0.5158308", "0.5147491", "0.5136177", "0.51356614", "0.50236404", "0.49819353", "0.49715742", "0.4927835", "0.49169132", "0.49138194", "0.4899514", "0.48878258", "0.48869595", "0.48764083", "0.48672202", "0.48371017", "0.48254293", "0.48199156", "0.48146015", "0.4814022", "0.47694275", "0.47670096", "0.47502506", "0.4748249", "0.47306567", "0.4718764", "0.471352", "0.4709442", "0.47078598", "0.47076085", "0.47066876", "0.47040337", "0.4696752", "0.46946386", "0.4684932", "0.46721488", "0.4665809", "0.46633393", "0.4661625", "0.46588558", "0.46532533", "0.46531513", "0.46399337", "0.4638315", "0.46230412", "0.46209663", "0.46208146", "0.46197122", "0.4614544", "0.46139732", "0.4605658", "0.46051332", "0.45868093", "0.4580865", "0.45728576", "0.45695877", "0.45665073", "0.45659488", "0.45561528", "0.45500416", "0.45489413", "0.45418838", "0.45377246", "0.45342728", "0.45341283", "0.45213595", "0.45210344", "0.45197734", "0.4512448", "0.4509684", "0.45072284", "0.4505615", "0.45054105", "0.44996488", "0.4496574", "0.44965425", "0.44953668", "0.44932193", "0.44890794", "0.44877872", "0.448271", "0.44767788", "0.44684333", "0.44660768", "0.44660264", "0.44658688", "0.44655743" ]
0.6796413
0
Returns the speechshaped noise appropriate for the speech material.
def ssn(self, x=None): section = self.pick_section(self._ssn, x) if self.force_mono and section.ndim > 1: return section[0] return section
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def noise(self):\n return self._noise", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals", "def get_noise(self):\n\n n = self.qubic.get_noise().ravel()\n n = np.r_[n, self.planck.get_noise().ravel()]\n\n return n", "def noise(self) -> Sequence:\n\n return self._noise", "def _get_noise(self, shape, dtype=None):", "def get_read_noise(self):\n\n read_noise_adu = self.ccd.read_noise / self.ccd.gain\n return numpy.random.normal(scale=read_noise_adu, size=self.image.shape)", "def _get_noise_m(self, shape, dtype=None):\n return self.range_to_m(self._get_noise(shape=shape, dtype=dtype))", "def get_estimated_noise(self):\n return self.gp_core.noise_var", "def noise_generator(self, power=None, SNR=None, size=None):\r\n alpha = self.db2power(SNR)\r\n sigma = np.sqrt(power / alpha) # 计算噪声标准差\r\n # 产生噪声\r\n noise_data = np.sqrt(0.5) * (np.random.normal(0, sigma, size=size) + np.random.normal(0, sigma, size=size) * 1j)\r\n noise_data = noise_data.astype(np.complex64)\r\n return noise_data", "def get_estimated_noise(self):\n raise NotImplementedError('Abstract Method.')", "def __call__(self, wav):\n beg_i = 0\n end_i = wav.shape[0]\n sel_noise = self.load_noise(self.sample_noise())\n if len(sel_noise) < len(wav):\n # pad noise\n P = len(wav) - len(sel_noise)\n sel_noise = np.pad(sel_noise, (0, P))\n # mode='reflect').view(-1).data.numpy()\n T = end_i - beg_i\n # TODO: not pre-loading noises from files?\n if len(sel_noise) > T:\n n_beg_i = np.random.randint(0, len(sel_noise) - T)\n else:\n n_beg_i = 0\n noise = sel_noise[n_beg_i:n_beg_i + T]\n # randomly sample the SNR level\n snr = random.choice(self.snr_levels)\n K, Ex, En = self.compute_SNR_K(wav, noise, snr)\n scaled_noise = K * noise\n if En > 0:\n noisy_wav = wav + scaled_noise\n noisy_wav = self.norm_energy(noisy_wav, Ex)\n else:\n noisy_wav = wav\n return noisy_wav", "def _get_noise(self, shape, dtype=None):\n return np.random.normal(self._bias, self._scale, shape).astype(dtype)", "def noise(self, freq: int, /) -> None:", "def get_estimated_noise(self):\n return self.gp_core.likelihood.noise.item()", "def _get_noise(self, shape, dtype=None):\n return np.random.uniform(self._low, self._high, shape).astype(dtype)", "def snr(self):\n\n return self.signal.astype(numpy.float32) / self.noise.astype(numpy.float32)", "def _sample_noise(self) -> np.ndarray:\n return np.random.randn(self.actor_action_size)", "def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise", "def noise(self, stddev):\n #add noise to weights\n pass", "def getRxNoise(self):\n \n return self.rx_noise", "def calculate_desired_noise_rms(clean_rms, snr):\n a = float(snr) / 20\n noise_rms = clean_rms / (10 ** a)\n return noise_rms", "def add_noise(spectrum,rms):\n noise = np.random.randn(spectrum.data.shape[0])*rms\n noisy_data = spectrum.data + noise\n noisy_spec = pyspeckit.Spectrum(xarr=spectrum.xarr,data=noisy_data)\n return noisy_spec", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def noiseReduction(self):\n pass", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length, self.noise_dim)", "def noisePreset() :\n s.noisePreset()", "def white_noise():\n return random.randint(-32767, 32767)", "def noise(self):\r\n if self.buffer_offset + self.frames_per_buffer - 1 > self.x_max:\r\n #relleno con ceros al final si es necesario\r\n xs = np.arange(self.buffer_offset, self.x_max)\r\n tmp = np.random.random_sample(len(xs)) #ruido\r\n out = np.append(tmp, np.zeros(self.frames_per_buffer-len(tmp)))\r\n else:\r\n xs = np.arange(self.buffer_offset,\r\n self.buffer_offset + self.frames_per_buffer)\r\n out = np.random.random_sample(len(xs))\r\n self.buffer_offset += self.frames_per_buffer\r\n return out", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length + 2 * self.seq_pad,\n self.noise_dim)", "def snr(mag=20, itime=1., read=24.5, sky=8.43, npix=24., zero=26.44, dark=0.0):\n # 2009-02-20 14:40 IJC: Initiated\n \n star = itime * 10**(0.4*(zero-mag))\n noise = npix * (itime*(sky+dark)+read**2)\n\n return star * (star+noise)**-0.5", "def get_noise(n_samples, noise_dims):\n return torch.randn(n_samples, noise_dims).to(device)", "def calc_noise(self, name):\n noise = self._noise_objs[name]\n cals = [self._cal_objs[cal].calc() for cal in self._noise_cals[name]]\n data = noise.calc_trace(cals)\n if isinstance(data, dict):\n return data['Total'][0]\n else:\n return data[0]", "def _get_noise(self, shape, dtype=None):\n return np.full(shape, self._bias, dtype)", "def predict_w_noise(self, xs, stochastic=True, **kwargs):\n raise NotImplementedError", "def add_noise(self, data):", "def get_noise(self, batch_size, max_len):\n\n noise_size = (batch_size, max_len, self.noise_ratio)\n noise_samples = self.alias.draw(1, 1, self.noise_ratio).expand(*noise_size)\n\n noise_samples = noise_samples.contiguous()\n return noise_samples", "def pink_noise():\n global curr_tick\n octave = octave_lookup[curr_tick]\n curr_noise[octave] = int(white_noise() / (5-octave))\n curr_tick += 1\n if curr_tick >= len(octave_lookup):\n curr_tick = 0\n return sum(curr_noise)", "def softing_noise(image, kn):\n\n s_noise = cv2.GaussianBlur(image, (kn, kn), 0)\n\n return s_noise", "def noisysphere(self, x, noise=4.0, cond=1.0):\r\n return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x))", "def obtain_noise_len(hyps, hyps_mask):\n\n # assume sigma_n is the final hyperparameter\n sigma_n = hyps[-1]\n # correct it if map is defined\n non_noise_hyps = len(hyps)-1\n train_noise = True\n if (hyps_mask is not None):\n train_noise = hyps_mask.get('train_noise', True)\n if (train_noise is False):\n sigma_n = hyps_mask['original'][-1]\n non_noise_hyps = len(hyps)\n\n return sigma_n, non_noise_hyps, train_noise", "def noise(self, xs, ys):\n raise NotImplementedError", "def gen_noise(sample_size, latent):\r\n\treturn Variable(torch.randn(sample_size, latent))", "def image(self):\n\n return self.signal + self.noise", "def get_noise(batch_size, total_continuous_noise_dims):\n # Get unstructurd noise.\n noise = tf.random_normal(\n [batch_size, total_continuous_noise_dims])\n\n return noise", "def add_gaussian_noise(self, samples):\n\n if 'sigma' in self.gaussian_component:\n sigma = self.gaussian_component['sigma']\n return samples + self.random_state.normal(size=samples.shape) * sigma\n if 'sigmas' in self.gaussian_component:\n sigmas = self.gaussian_component['sigmas']\n return samples + self.random_state.normal(size=samples.shape) * sigmas\n\n return samples", "def make_silence_phones_txt(self):\n raise NotImplementedError", "def sky_noise(sky_file_name):\n fits_file = fits.open(sky_file_name)\n image_data = fits_file[0].data\n return image_data", "def process_noise_dist(self, dt=0.0):\n Q = self.process_noise_cov(dt)\n return dist.MultivariateNormal(\n torch.zeros(Q.shape[-1], dtype=Q.dtype, device=Q.device), Q\n )", "def ternary_noise(N_stimuli, Nx, Ny):\n return np.random.randint(-1, 2, size=(N_stimuli, Nx, Ny))", "def noisy_seismogram(t, seismogram, noise_amp=5):\n\n # Noise\n signoise = 2 * np.sqrt(3)\n\n # Create filter to take out high frequency noise\n filtgauss = gaussian(t, 75, signoise, 1.)\n filtgauss = filtgauss / sum(filtgauss)\n\n # Amplitude for the noise\n amp = noise_amp\n noise = 2 * (np.random.uniform(size=t.shape) - 0.5) * amp\n\n # Compute filtered noise\n filtnoise = np.real(ifft(fft(noise) * fft(filtgauss)))\n\n # Add noise to original seismogram\n noisemogram = seismogram + filtnoise\n\n return noisemogram", "def _make_noisy(x, the_noise):\n noise_sample = the_noise[np.random.choice(the_noise.shape[0],\n x.shape[0],\n replace=False)]\n return x + noise_sample", "def get_snow(samples=1, num_dots=10, dark=True, img_h=100, img_w=100, **kwargs):\n result = [np.zeros((img_h, img_w)) for _ in range(0, samples)] if isinstance(samples, int) else samples\n num_dots = 10 * np.random.rand() + 10 if not num_dots else num_dots\n for _ in range(0, num_dots):\n r = (np.random.rand() * 5 + 5) / (img_h + img_w)\n get_ellipsis(samples=result, dark=dark, img_h=img_h, img_w=img_w, r=r, **kwargs)\n return result", "def sample(self,noise,s):\n rhs=self.prior.sqrtM*noise\n self.prior.Msolver.solve(s,rhs)", "def create_synthetic_noise_dataset(cfg):\n from colorednoise import powerlaw_psd_gaussian\n\n betas = np.linspace(cfg['data.mix_synthetic_noise.min_beta'],\n cfg['data.mix_synthetic_noise.max_beta'],\n num=cfg['data.mix_synthetic_noise.num_samples'])\n sample_rate = cfg['data.sample_rate']\n segment_length = 2 * cfg['data.len_min']\n wavs = [powerlaw_psd_gaussian(beta, sample_rate * segment_length)\n for beta in betas]\n wavs = [audio.normalize(wav, low=-1, high=1) for wav in wavs]\n return NoiseDataset(wavs)", "def add_noise_m(self, data):\n return self.range_to_m(self.add_noise(self.m_to_range(data)))", "def gaussian_noise(self, tensor):\n return tensor.new_empty(tensor.size()).normal_(std=self._discreteness)", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image", "def addNoise(data, amp, scale):\n lfnData = addLFNoise(data, amp, scale)\n noisyData = addHFNoise(hfnData, amp)\n\n return noisyData", "def applyChannelNoise(self, signal):\n \n # If noise not applieable\n if self.rx_SNR == None:\n return signal\n \n # Average power for the convolved signal\n signal_power = np.mean(abs(signal**2))\n \n # Calculate the std for given signal power, based on rx_SNR\n sigma2 = signal_power * 10**( -self.rx_SNR/10)\n \n # Generate noise given std\n noise = np.sqrt(sigma2/2) * (np.random.randn(*signal.shape)+1j*np.random.randn(*signal.shape))\n \n # TODO -- IS THAT CORRECT? APPLY CIR AND THEN ABS\n if Global.IM_DD:\n noise = np.abs(noise)\n \n return noise + signal", "def synthesize(base, overlay, snr):\n assert -5 <= snr < 50\n noise_pre_scale = 1 - snr / 50\n if snr > 0:\n overlay = overlay * noise_pre_scale\n\n len_speech = base.shape[0]\n len_noise = overlay.shape[0]\n assert len_noise > len_speech\n\n start_point_noise = random.randint(0, len_noise - len_speech)\n overlay = overlay[start_point_noise: start_point_noise + len_speech]\n\n rms_overlay = rms(overlay)\n rms_base = rms(base)\n\n db_overlay = 20 * np.log10(rms_overlay + 1e-8)\n db_base = 20 * np.log10(rms_base)\n\n snr_origin = db_base - db_overlay\n db_adjust = snr - snr_origin\n scale_adjust = np.power(10, db_adjust / 20)\n\n output = overlay + base * scale_adjust\n return output", "def snr(signal, noise, impl):\n if np.abs(np.asarray(noise)).sum() != 0:\n ave1 = np.sum(signal) / signal.size\n ave2 = np.sum(noise) / noise.size\n s_power = np.sqrt(np.sum((signal - ave1) * (signal - ave1)))\n n_power = np.sqrt(np.sum((noise - ave2) * (noise - ave2)))\n if impl == 'general':\n return s_power / n_power\n elif impl == 'dB':\n return 10.0 * np.log10(s_power / n_power)\n else:\n raise ValueError('unknown `impl` {}'.format(impl))\n else:\n return float('inf')", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def _noise_matrix(self):\n\n return self._noise * (np.random.rand(self._rows, self._cols)\n - 0.5)", "def gen_return_signals(s, ny, nx):\n r = np.tile(s.reshape((len(s),1,1)),(1,ny,nx))\n base_noise = np.random.normal(loc=0.0, scale=1./(ny*nx), size=len(s))\n noise_multi = np.arange(0, ny*nx, 1).reshape(ny,nx)\n noise_cube = base_noise[:,None,None]*noise_multi\n return r+noise_cube", "def SNR_to_noise_power(signal_power, SNR_dB):\n noise_power = signal_power*10.**(-SNR_dB/10.)\n return noise_power", "def phosphate(self):\n index = self.var_index(4)\n return self.var_data(index)", "def noise_patch(patch,prc=0.2): #X\n npatch = patch.copy().reshape(-1,3)\n height,width = patch.shape[:2]\n nb =int(prc*height*width)\n npatch[np.random.randint(0,height*width,nb),:]=DEAD\n return npatch.reshape(height,width,3)", "def gen_loss_wasserstein(self, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n logits_gen = self.disc_model(generator_samples)\n\n loss = -tf.reduce_mean(logits_gen)\n return loss", "def rmsilence(sample):\n ns, ne = sample.wordseq[0][0][0], sample.wordseq[-1][0][1]\n return sample.signal[ns:ne]", "def generate_normal_data(avg_strike, avg_dip, n=10, noise_std=5, porp=2):\n opp_strike = avg_strike + 180\n if opp_strike > 360: \n opp_strike -= 360\n strike = avg_strike * np.ones(n)\n strike[n//porp:] = opp_strike\n dip = avg_dip * np.ones(n)\n \n # Add noise\n strike += noise_std * np.random.randn(n)\n dip += noise_std * np.random.randn(n)\n\n # Filter out things out of a reasonable range\n strike[dip > 90] -= 180\n dip[dip > 90] = 180 - dip[dip>90]\n\n strike[dip < 0] -= 180\n dip[dip < 0] *= -1\n\n strike[strike < 0] += 360\n strike[strike > 360] -= 360\n \n\n normal = geometric_functions.plane2normal(strike, dip)\n slip = geometric_functions.normal_slip(*normal)\n\n return strike, dip, normal, slip", "def estimate_noise_std(r_data):\n\n s_noise = 0.2 * np.max(abs(r_data))\n return s_noise", "def Noise(self, eps, size):\n return eps * (np.random.uniform(size=size) * 2 - 1)", "def synthetic_seismogram(green, wavelet):\n return np.real(ifft(fft(wavelet) * fft(green)))", "def estimate_noise(spec, ind_range=None):\n\n # -- set the index range (nb, ends at len(spec)-1 since the derivative has\n # one fewer points than the spectrum).\n ind_range = ind_range if ind_range else [0,spec.shape[0]-1]\n\n # -- compute the derivative and estimate the noise over the range\n noise = (spec[1:]-spec[:-1])[ind_range[0]:ind_range[1]].std(0)/np.sqrt(2.0)\n\n return noise", "def GSM_Denoise(Y, gsm_model, noise_std):\n X = np.empty(Y.shape)\n k = gsm_model.mix.shape[0]\n I = np.identity(gsm_model.cov[0, :].shape[0])\n for i in range(k):\n mvn = multivariate_normal(cov=(gsm_model.cov[i, :] + ((noise_std**2) * I)))\n upper_arg = gsm_model.mix[i] * (mvn.logpdf(Y[:, i]))\n lower_arg = 0\n for j in range(k):\n inner_mvn = multivariate_normal(cov=(gsm_model.cov[j] + ((noise_std**2) * I)))\n lower_arg += gsm_model.mix[j] * (inner_mvn.logpdf(Y[:, i]))\n c_i = upper_arg / lower_arg\n weiner_i = calc_weiner_filter(Y, np.zeros(Y.shape[0]), gsm_model.cov[i, :], noise_std)\n X += c_i * weiner_i\n return X", "def noise_data(self, x):\n return x + np.random.normal(size=x.shape)", "def get_silence(self, duration):\n nsamples = int(self.sample_rate * duration)\n return \"\".join([wave.struct.pack('h', 0) for i in range(0, nsamples)])", "def add_noise_at_snr(channel_in, snr):\n\n rms_channel = np.sqrt(np.mean(channel_in ** 2.0))\n noise_std = rms_channel / np.sqrt(10.0 ** (snr/10.0))\n\n return channel_in + np.random.normal(size=channel_in.shape, scale=noise_std)", "def generate_worley_noise(width, height, npoints, option, noise_background):\n\n points = [(random.randint(0, width), random.randint(0, height)) for _ in range(npoints)]\n image_worley = np.full((height, width), fill_value=noise_background, dtype=np.float64)\n\n for y in nb.prange(height):\n for x in nb.prange(width):\n distances = [np.sqrt((p[0] - x) ** 2 + (p[1] - y) ** 2) for p in points]\n image_worley[y, x] = sorted(distances)[option]\n return image_worley", "def get_noise_frames(samples, sampling_rate, window_width=2048, stepsize=512, verbose=False):\n\t\n\t# Separate the samples in frames according to the window_width and stepsize\n\tnr_of_frames, frames = get_frames(samples, window_width=window_width, stepsize=stepsize)\n\t\n\t# Use a window function (hamming works best) on all our frames\n\tframes = window_function_transform(frames)\n\t\n\t# Get the statistical features that we need. For now only 'energy' works.\n\tenergies, mean_energy = get_statistcal_features( frames )\n\t\n\t# Get the energy coefficient that we need for separating pure noise from non-pure noise.\n\tSNR, energy_coefficient = compute_energy_coefficient(samples, base_coefficient=2)\n\t\n\tif verbose:\n\t\tprint(\"Energy coefficient: \" + str(round(energy_coefficient, 3) ) )\n\t\tprint(\"Signal-to-Noise: \" + str(round(SNR, 3)))\n\t\n\t\"\"\" Separating pure noise from non-pure noise. \"\"\"\n\t\n\t# Initiate lists to store the separated frames in.\n\tnoisy_frames = []\n\tnon_noisy_frames = []\n\tnoisy_energy = []\n\tnon_noisy_energy = []\n\t\n\t# Go through all of the frame-energies. The ones below a certain threshold have a very high chance of being pure background noise.\n\tfor index, energy in enumerate(energies):\n\t\t\n\t\tif energy < energy_coefficient * mean_energy:\n\t\t\t\n\t\t\t# Add the pure noisy parts to the appropriate list\n\t\t\tnoisy_frames.extend(frames[index][int((window_width-stepsize)/2):int((window_width+stepsize)/2)])\n\t\t\tnoisy_energy.append(energy)\n\t\t\n\t\telse:\n\t\t\t# Add the non-noise frames to the appropriate list\n\t\t\tnon_noisy_frames.extend(frames[index][int((window_width-stepsize)/2):int((window_width+stepsize)/2)])\n\t\t\tnon_noisy_energy.append(energy)\n\t\n\tif verbose:\n\t\t\n\t\t# A measure for how well the noise is predictable (higher is better). The better predictable it is, the better a spectral noise gate will work\n\t\tprint(\"Noise predictability: \" + str(round(autocorr(noisy_frames)[0,1] / autocorr(non_noisy_frames)[0,1], 3) ) )\n\t\n\t\t\"\"\" Plotting \"\"\"\n\t\t\n\t\t# Initiate time domain axes for some different graphs\n\t\tt_soundwave = np.linspace(0, len(samples)/sampling_rate, len(samples))\n\t\tt_soundwave_noisy = np.linspace(0, len(noisy_frames)/sampling_rate, len(noisy_frames))\n\t\tt_soundwave_non_noisy = np.linspace(0, len(non_noisy_frames)/sampling_rate, len(non_noisy_frames))\n\t\t\n\t\tt_windowed_features = np.linspace(0, len(samples)/sampling_rate, nr_of_frames)\n\t\tt_windowed_features_noisy = np.linspace(0, len(noisy_frames)/sampling_rate, len(noisy_energy))\n\t\tt_windowed_features_non_noisy = np.linspace(0, len(non_noisy_frames)/sampling_rate, len(non_noisy_energy))\n\t\t\n\t\t# Plot the signal versus the signal energy\n\t\tplt.figure(figsize=(20,12))\n\t\tplt.title(\"Energy whole signal\")\n\t\tplt.plot(t_soundwave, preprocessing.normalize(samples), alpha=0.5)\n\t\tplt.plot(t_windowed_features, preprocessing.normalize(energies))\n\t\tplt.show()\n\t\t\n\t\t# Plot the signal versus the signal energy\n\t\tplt.figure(figsize=(20,12))\n\t\tplt.title(\"Energy pure noise signal\")\n\t\tplt.plot(t_soundwave_noisy, preprocessing.normalize(noisy_frames), alpha=0.5)\n\t\tplt.plot(t_windowed_features_noisy, preprocessing.normalize(noisy_energy) )\n\t\tplt.show()\n\t\t\n\t\t# Plot the signal versus the signal energy\n\t\tplt.figure(figsize=(20,12))\n\t\tplt.title(\"Energy non pure noise signal\")\n\t\tplt.plot(t_soundwave_non_noisy, preprocessing.normalize(non_noisy_frames), alpha=0.5)\n\t\tplt.plot(t_windowed_features_non_noisy, preprocessing.normalize(non_noisy_energy))\n\t\tplt.show()\n\t\n\treturn np.array(noisy_frames)", "def create_noise_data_from_original():\n # Get the noise values we'll choose from.\n noiseValues = AudioDataOriginal.query.filter(\n AudioDataOriginal.datetime >= '2017-06-14 07:26:24',\n AudioDataOriginal.datetime <= '2017-06-14 07:27:54',\n ).all()\n # Add a noise value to each record with a `processedValue`, as these are the\n # only ones used in the model generation later on.\n audioSamples = AudioDataOriginal.query.filter(\n AudioDataOriginal.datetime > '2017-06-14 07:27:54',\n AudioDataOriginal.processedValue.isnot(None)\n ).all()\n for sample in audioSamples:\n noiseRecord = random.choice(noiseValues)\n sample.noiseValue = noiseRecord.audio\n db.session.commit()", "def _gauss_noise(self, shape):\n\n n = np.random.normal(0, 1, shape)\n return self.e*n", "def random_noise_levels():\n log_min_shot_noise = math.log(0.0001)\n log_max_shot_noise = math.log(0.012)\n log_shot_noise = random.uniform(log_min_shot_noise, log_max_shot_noise)\n shot_noise = math.exp(log_shot_noise)\n\n line = lambda x: 2.18 * x + 1.20\n log_read_noise = line(log_shot_noise) + random.gauss(mu=0.0, sigma=0.26)\n read_noise = math.exp(log_read_noise)\n return shot_noise, read_noise", "def sample_noise(batch_size, dim):\n temp = torch.rand(batch_size, dim) + torch.rand(batch_size, dim)*(-1)\n\n return temp", "def scale_noise(size: int) -> torch.Tensor:\n x = torch.FloatTensor(np.random.normal(loc=0.0, scale=1.0, size=size))\n\n return x.sign().mul(x.abs().sqrt())", "def test_heteroscedastic_noise_signal(self):\n ts = self.create_ts(length=100 * 24, signal_to_noise_ratio=0, freq=\"1h\")\n\n # add heteroscedastic noise to the data\n\n ts.value *= (\n (\n (ts.time - pd.to_datetime(\"2020-01-01\")) % timedelta(days=7)\n > timedelta(days=3.5)\n )\n * np.random.rand(100 * 24)\n * 0.5\n * 2\n + 1\n - 0.5\n )\n\n ts.value[93 * 24] += 100\n ts.value[96 * 24] += 100\n\n model = ProphetDetectorModel(score_func=\"z_score\")\n response = model.fit_predict(ts[90 * 24 :], ts[: 90 * 24])\n\n self.assertGreater(response.scores.value[3 * 24], response.scores.value[6 * 24])", "def _get_sample(self):\n mu = self._get_mean().reshape((1, self.out_dim))\n sigma = self.variables[\"s\"]\n sample = self.random.normal(mu, sigma)\n sample = sample.reshape(self.out_dim)\n return sample", "def mock_noise(scale, size):\n return np_repeat(scale, size)", "def make_noise(self, signal_only):\n\n #print >> sys.stdout, \"generating noise...\"\n\n if signal_only:\n\n # the noise is just a time series of zeros\n \n self.td_noise = pycbc.types.timeseries.TimeSeries(\n initial_array=np.zeros(self.duration/self.delta_t),\n delta_t=self.delta_t, epoch=self.epoch)\n\n else:\n # Generate noise \n self.assign_noise_curve()\n\n # Generate time-domain noise\n # XXX: minimum duration seems to be 1 second. I'll hack around this by\n # reducing the 1 second to the desired duration\n tmplen=max(self.duration,1.0)/self.delta_t\n self.td_noise = pycbc.noise.noise_from_psd(int(tmplen), self.delta_t,\n self.psd, seed=self.seed)\n\n self.td_noise = \\\n pycbc.types.TimeSeries(self.td_noise.data[:self.duration/self.delta_t],\n delta_t=self.delta_t)\n\n # XXX not sure if this is a good idea...\n self.td_noise.start_time = float(self.epoch)\n\n self.fd_noise = self.td_noise.to_frequencyseries()", "def get_sample(x, y):\n return noise[x][y]", "def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles", "def get_mlt_phys(sed_name):\n\n new_name = sed_name.replace('+','-').replace('a','-').split('-')\n\n logg_sgn_dex = len(new_name[0])\n\n if sed_name[logg_sgn_dex] == '-':\n logg_sgn = 1.0\n elif sed_name[logg_sgn_dex] == '+':\n logg_sgn = -1.0\n else:\n raise RuntimeError('Cannot get logg_sgn for %s' % sed_name)\n\n metallicity_sgn_dex = len(new_name[0]) + len(new_name[1]) + 1\n\n if sed_name[metallicity_sgn_dex] == '-':\n metallicity_sgn = -1.0\n elif sed_name[metallicity_sgn_dex] == '+':\n metallicity_sgn = 1.0\n else:\n raise RuntimeError('Cannot get metallicity_sgn for %s' % sed_name)\n\n teff = 100.0*float(new_name[0][3:])\n metallicity = metallicity_sgn*float(new_name[2])\n logg = logg_sgn*float(new_name[1])\n\n return teff, metallicity, logg", "def get_noisy_output_of_system(self, y_without_noise):\n # There were some problems with copying the array data so I just wrote a copy command for every single line\n if self.bOutputNoise:\n if np.size(y_without_noise, 0) == 3:\n y_with_noise = np.zeros(3)\n y_with_noise[0] = y_without_noise[0] + np.random.normal(0, np.sqrt(self.p_var), 1)[0]\n y_with_noise[1] = y_without_noise[1] + np.random.normal(0, np.sqrt(self.e_var), 1)[0]\n y_with_noise[2] = y_without_noise[2] + np.random.normal(0, np.sqrt(self.lamb_var), 1)[0]\n elif np.size(y_without_noise, 0) == 5:\n y_with_noise = np.zeros(5)\n y_with_noise[0] = y_without_noise[0] + np.random.normal(0, np.sqrt(self.p_var), 1)[0]\n y_with_noise[1] = y_without_noise[1] + np.random.normal(0, np.sqrt(self.e_var), 1)[0]\n y_with_noise[2] = y_without_noise[2] + np.random.normal(0, np.sqrt(self.lamb_var), 1)[0]\n y_with_noise[3] = y_without_noise[3] + np.random.normal(0, np.sqrt(self.f_var), 1)[0]\n y_with_noise[4] = y_without_noise[4] + np.random.normal(0, np.sqrt(self.b_var), 1)[0]\n else:\n y_with_noise = y_without_noise\n return y_with_noise", "def noise_level(data):\n length=len(data) - 2\n dev=[]\n for i in range(1,length - 1):\n dev.append((abs(data[i] - data[i-1]) + abs(data[i] - data[i + 1]))/2)\n dev.sort()\n return dev[round(0.9*length)]", "def add_noise(self, words, lengths):\n words, lengths = self.word_shuffle(words, lengths)\n words, lengths = self.word_dropout(words, lengths)\n # words, lengths = self.word_blank(words, lengths)\n return words, lengths", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def get_noise(input_depth, method, spatial_size, noise_type='u', var=1./10):\n if isinstance(spatial_size, int):\n spatial_size = (spatial_size, spatial_size)\n if method == 'noise':\n shape = [1, input_depth, spatial_size[0], spatial_size[1]]\n net_input = torch.zeros(shape)\n \n fill_noise(net_input, noise_type)\n net_input *= var \n elif method == 'meshgrid': \n assert input_depth == 2\n X, Y = np.meshgrid(np.arange(0, spatial_size[1])/float(spatial_size[1]-1), np.arange(0, spatial_size[0])/float(spatial_size[0]-1))\n meshgrid = np.concatenate([X[None,:], Y[None,:]])\n net_input= np_to_torch(meshgrid)\n else:\n assert False\n \n return net_input", "def add_noise(image, type=\"s&p\"):\n # Get the width and height of the image\n w, h = image.size\n\n # Add salt and pepper noise\n if type == \"s&p\":\n # Choose a random amount of noise (lower number = more noise)\n salt = np.random.randint(100, 400)\n # Generate an array to determine location of noise\n noise = np.random.randint(salt+1, size=(h, w))\n\n # Find the index of the salt and pepper (respectively location with max/min random value)\n idx_salt = noise == salt\n idx_pepper = noise == 0\n\n # Create a numpy array from the initial image and add the salt and pepper\n np_img = np.array(image)\n np_img[idx_salt, :] = 255\n np_img[idx_pepper, :] = 0\n\n return Image.fromarray(np.uint8(np_img))\n\n # Add gaussian noise to image\n if type == \"gauss\":\n # Get the number of channels\n c = len(image.getbands())\n\n # Get a random value for the mean and the standard deviation of the noise\n mean = np.random.randint(-4, 5)\n std = np.random.randint(5)\n\n # Generate the noise array\n noise = np.random.normal(mean, std, (h, w, c))\n\n # Add noise to the image\n return Image.fromarray(np.uint8(np.array(image) + noise))\n\n else:\n # If the name of the given noise is not correct\n return image", "def add_noise(self):\n self.segments = deepcopy(self.segments)\n # Iterate through each of the first three Segments in the WordForm.\n for i in range(3):\n # Add noise to each Segment.\n self.segments[i].add_noise()", "def __mix_with_snr(self, sig_spk, sig_noise, need_snr):\n\n # Calc SNR\n pow_sp = np.sum((sig_spk) ** 2) / float(len(sig_spk))\n pow_noise = np.sum((sig_noise) ** 2) / float(len(sig_noise))\n actual_snr = 10 * np.log10(pow_sp / (pow_noise + self.eps))\n alfa = pow(10.0, (actual_snr - need_snr) / 20.0)\n sig_noise = sig_noise * alfa\n\n return sig_spk, sig_noise" ]
[ "0.6940478", "0.6876316", "0.67471814", "0.67298526", "0.6711006", "0.6499605", "0.641281", "0.63947856", "0.6361855", "0.624543", "0.6218755", "0.60979575", "0.6082138", "0.6052489", "0.6041792", "0.59897363", "0.59645706", "0.59612185", "0.59013486", "0.5857421", "0.58342046", "0.5811947", "0.58096886", "0.5803915", "0.58007145", "0.57960457", "0.5783553", "0.5753302", "0.5738884", "0.57278013", "0.57236505", "0.5720171", "0.57081395", "0.56873304", "0.56459963", "0.5636207", "0.56268984", "0.56145006", "0.55924815", "0.5589793", "0.5587017", "0.55843663", "0.55728436", "0.5572623", "0.55488986", "0.5547513", "0.55109954", "0.5501338", "0.54931307", "0.5479606", "0.5474977", "0.54724216", "0.5460117", "0.5442728", "0.544074", "0.5422647", "0.5418621", "0.5407494", "0.54020596", "0.5388083", "0.53773266", "0.5372282", "0.5351069", "0.53483427", "0.53469604", "0.534582", "0.53405124", "0.53341764", "0.5329352", "0.5321874", "0.5310836", "0.5302201", "0.5300253", "0.5293672", "0.5289065", "0.52889717", "0.5287972", "0.5285174", "0.5281956", "0.5271112", "0.5267471", "0.5267408", "0.5266974", "0.5264415", "0.5256357", "0.5255598", "0.5246581", "0.5239109", "0.5230852", "0.5230451", "0.5227281", "0.52202326", "0.52174526", "0.52167547", "0.52166414", "0.5202449", "0.5200737", "0.51965296", "0.5196027", "0.51932275", "0.5192635" ]
0.0
-1
Set level of a sentence, in dB.
def set_level(self, x, level): return x * 10 ** ((level - self.ref_level) / 20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")", "def setLevel(self, level):\n self.lvl = level", "def __change_level(self, level):\n self.level = level", "def setLevel(self, level):\n self.level = level", "def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)", "def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L", "def set_level(self, level: LogLevel):\n pass", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "async def _sentence_setheist(self, ctx, seconds: int):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.config.guild(guild).Theme()\r\n t_jail = theme[\"Jail\"]\r\n t_sentence =theme[\"Sentence\"]\r\n\r\n if seconds > 0:\r\n config[\"Sentence\"] = seconds\r\n await self.thief.config.guild(guild).Config.set(config)\r\n time_fmt = self.thief.time_format(seconds)\r\n msg = \"Setting base {} {} to {}.\".format(t_jail, t_sentence, time_fmt)\r\n else:\r\n msg = \"Need a number higher than 0.\"\r\n await ctx.send(msg)", "def gaindb(self, value):\n self._logger.debug(\"setting gain: %7.2f\", value)\n self._gaindb = value\n self._update()", "def set_volume_level(self, volume):\n volume_percent = str(int(volume*100))\n self._lms.query(self._id, 'mixer', 'volume', volume_percent)\n self.update_ha_state()", "def setLevel( self, lvl ):\n if isinstance( lvl, str ):\n return super().setLevel( lvl.upper() )\n else:\n return super().setLevel( lvl )", "def setTriggerLevel(self, Level, stringOnly=0):\n\n msg = \"TRIGger:SEQuence:LEVel \" + str(Level)\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def set_level(self, level):\n if self._level_fixed:\n raise NameError(\"set_level() can be called only once !\")\n\n try:\n Level(level)\n except ValueError:\n raise ValueError(\"LEVEL parameter must be a Level\")\n\n self._level = Level(level)\n self._level_fixed = True", "async def set_volume(self, level: float) -> None:\n if 0.0 <= level <= 100.0:\n await self.relay(\"set_volume\")(level)\n else:\n raise exceptions.ProtocolError(f\"volume {level} is out of range\")", "async def _set_migration_level(self, table: str, level: int) -> None:\n await self.conn.execute('UPDATE tinymud_migrations SET level = $1 WHERE table_name = $2', level, table)", "def set_level(self, level_name):\n\n self.current_level = level_name", "def setLogLevel(self,value):\n self.PDFreactorConfiguration.in1[\"logLevel\"] = value", "async def volume(self, ctx, level:int):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n if 0 <= level <= 200:\n voice.source.volume = level / 100\n await ctx.send(f\"Adjusted volume to {level}%.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)", "def setEventLevel(self, Level, stringOnly=0):\n\n msg = \"EVENt:LEVel \" + str(Level)\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def set_level(self, level: str):\n self._logger.setLevel(getattr(logging, level))", "def setlevel(self, lvl):\n self.logger.setLevel(lvl)", "def update_skill_level_info(self, skill_string):\r\n (self.__skills_ui_elem_ALL[skill_string][\"lvl_indicator\"].\r\n configure(\r\n text=str(self.__skills[skill_string].skill_level)+\"/5\"))", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def set_volume_level(self, volume):\n self._device.set_volume(mute=False, volume=int(volume * 100))\n self._volume = volume", "def level(self, L):\n assert isinstance(L, level)\n self.__level = L", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)", "def set_volume_level(self, volume):\n # self._vlc.audio_set_volume(int(volume * 100))\n self._volume = volume", "def set_volume_level(self, level):\n self._remote.volume(int(level * 60))", "def level(self, level: int):\n if level is None:\n raise ValueError(\"Invalid value for `level`, must not be `None`\")\n\n self._level = level", "def set_contrast(level):\n send_command(0x81)\n send_command(level)", "def set_power(self, power):\n print('Setting santec power to %.4f mW' % power)\n self.santec1.write(\"LP %.2f\" % power)\n self.santec2.write(\"LP %.2f\" % power)\n self.santec3.write(\"LP %.2f\" % power)\n self.santec4.write(\"LP %.2f\" % power)", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def set_volume_level(self, volume):\n self._volume = volume", "def set_volume_level(self, volume):\n self._volume = volume", "async def level(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n await ctx.send(f\"{ctx.author.mention}, your level is {level}. Use the `-info` command to learn more!\")", "async def set_level(user_id, level, command):\n async def update_level():\n \"\"\"Updates a user's level.\"\"\"\n await ex.conn.execute(f\"UPDATE currency.Levels SET {command} = $1 WHERE UserID = $2\", level, user_id)\n\n count = ex.first_result(await ex.conn.fetchrow(f\"SELECT COUNT(*) FROM currency.Levels WHERE UserID = $1\", user_id))\n if not count:\n await ex.conn.execute(\"INSERT INTO currency.Levels VALUES($1, NULL, NULL, NULL, NULL, 1)\", user_id)\n await update_level()\n else:\n await update_level()", "def set_food_level(self, amount):\n self.plant = amount", "def set_logging_level(self, level):\n if str(level) == '1':\n self.logging_level = logging.DEBUG\n elif str(level) == '2':\n self.logging_level = logging.INFO\n elif str(level) == '3':\n self.logging_level = logging.WARNING\n elif str(level) == '4':\n self.logging_level = logging.ERROR\n elif str(level) == '5':\n self.logging_level = logging.CRITICAL", "def set_volume_level(self, volume):\n targetVolume = volume * 100\n tempVolume = -1\n oldVolume = -2\n i = 0\n while int(targetVolume) != tempVolume:\n self.getVolume()\n tempVolume = self._volumeLevel\n i = i + 1\n if tempVolume != oldVolume or i >= 10:\n i = 0\n if tempVolume > targetVolume:\n self.volume_down()\n else:\n self.volume_up()\n oldVolume = tempVolume", "def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:\n text = sentence\n return text", "def level(self, value):\n self._level = mdraid.RAID_levels.raidLevel(value) # pylint: disable=attribute-defined-outside-init", "def setVoiceVolume(self, volume):\n\n try:\n assert volume >= 0 and volume <= 1.0\n\n except AssertionError:\n self.logger.warning(\"Incorrect volume, 0.5 taken into account\")\n volume = 0.5\n\n self.tts.setVolume(volume)", "def setAmbientLevel(self, channel, levelInDb, unitCode=0):\n # Ensure compliance with level boundary conditions\n if levelInDb > 0:\n levelInDb = 0\n elif levelInDb < -70:\n levelInDb = -70\n resp = self.XAPCommand('AMBLVL', channel, levelInDb, unitCode=unitCode)\n return float(resp)", "def set_level(self, debug_level, verbose=False):\n self.debug_level = debug_level\n self.verbosity = verbose\n level = logging.INFO\n if debug_level > 4:\n level = logging.DEBUG - 3\n elif debug_level > 0:\n level = logging.DEBUG - debug_level + 1\n elif verbose:\n level = logging.INFO - 1\n self.mylog.setLevel(level)\n self.handler.setLevel(level)", "def set_gain(self, *args):\n return _uhd_swig.usrp_sink_set_gain(self, *args)", "def position_d_gain(self, value):\n self._write(MX_POSITION_D_GAIN, value)", "def set_logger_level(lgr, level):\n if isinstance(level, int):\n pass\n elif level.isnumeric():\n level = int(level)\n elif level.isalpha():\n level = getattr(logging, level)\n else:\n lgr.warning(\"Do not know how to treat loglevel %s\" % level)\n return\n lgr.setLevel(level)", "def level(self, log_level):\n self.logger.setLevel(log_level)", "def set_brightness(self, level):\n print(\"Got request to set brightness with level: %s\" % (level))\n # Home assistant sets brightness on a scale of 0 to 255\n if level > 0 and level < 255:\n new_level = level / 255\n print(\"Setting brightness to %s\" % (new_level))\n self.turn_on(r=self.r, g=self.g, b=self.b, brightness=new_level)\n self.client.publish(BRIGHTNESS_STATE_TOPIC, level) #publish", "def TL_power(self,power):\n self.write(self.headStr('TL')+'TPDB %d',power)", "def set_power_dbm(self, power=None):\n if power is None:\n power = self.def_power\n self.instr.write('L1 ' + str(power + ' DM'))\n time.sleep(self.sleep_time)", "def update_playback_gain(self, val):\n self.playbackGain = 10**(5.0*(val - self.speedDial.maximum()/2)/self.speedDial.maximum())", "def log_lvl(lvl):\n logs.set_level(logging.getLogger(\"plysp\"), lvl)", "def set_volume_level(self, volume: float) -> None:\n self._get_chromecast().set_volume(volume)", "def write_level(self, current_level):\n try:\n if isinstance(current_level, numbers.Number):\n if 1 <= current_level <= 3:\n current_level = str(current_level)\n self.store.put(LEVEL_STORE, level=current_level)\n except:\n print \"Error: cannot save game level!\"", "def level(self, level=ERROR):\n try:\n self._level = level_dict[level]\n except KeyError:\n raise ValueError(f\"Input level is invalid.\")\n self.cnsl_handler.setLevel(level=self._level)\n self.file_handler.setLevel(level=self._level)\n self.logger.setLevel(level=self._level)", "def dB(x, power=True):\r\n if not power:\r\n return 20 * np.log10(np.abs(x))\r\n return 10 * np.log10(np.abs(x))", "async def async_set_volume_level(self, volume: float) -> None:\n await self._volumio.set_volume_level(int(volume * 100))", "def change_level(level):\n if 'debug' in level: LOG.setLevel(logging.DEBUG)\n elif 'info' in level: LOG.setLevel(logging.INFO)\n elif 'warning' in level: LOG.setLevel(logging.WARNING)\n elif 'error' in level: LOG.setLevel(logging.ERROR)\n elif 'critical' in level: LOG.setLevel(logging.CRITICAL)\n Logger.log('info', 'This logger changed the messages priority level to ', level)", "def score_sentence(self, sentence):\n\t\t\n\t\t# YOUR CODE HERE", "def setLevelReached(self, level):\n \n if(0 < level and level < 6 and self.__levelReached < level):\n self.__levelReached = level\n self.savePlayerInfo()\n return True\n else:\n return False\n print\"level reached: \" + self.__levelReached", "def set_volume_effects(self, value):\n\t\tif self._setting.get(FIFE_MODULE, \"PlaySounds\"):\n\t\t\tself.emitter['effects'].setGain(value)\n\t\t\tself.emitter['speech'].setGain(value)\n\t\t\tfor e in self.emitter['ambient']:\n\t\t\t\te.setGain(value*2)", "def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"SEVERE\", \"FINE\", \"FINER\", \"FINEST\"]\n if level not in allowed_values:\n raise ValueError(\n \"Invalid value for `level` ({0}), must be one of {1}\"\n .format(level, allowed_values)\n )\n\n self._level = level", "async def async_set_volume_level(self, volume: float) -> None:\n await self._table.set_speed(volume)", "def set_threshold_levels(self, event_name, val):\n if self.validate_supply_name(event_name, \"events/\") and val:\n self.console.runcmd(f\"echo {val} > events/{event_name}\")\n else:\n assert (\n False\n ), \"A valid event name or the value, is not given while setting levels\"", "def dB(x, power=False):\n if power:\n return 10 * np.log10(np.abs(x))\n else:\n return 20 * np.log10(np.abs(x))", "def incrementWordLevel(self, ID):\n\t\tcommand = \"UPDATE words SET level=level+1 WHERE ID=?\"\n\t\tparams = (ID,)\n\n\t\tself._run_command(command, params)", "async def update_level():\n await ex.conn.execute(f\"UPDATE currency.Levels SET {command} = $1 WHERE UserID = $2\", level, user_id)", "def set_log_level(self, level):\n if level == 'info':\n level = logging.INFO\n if level == 'debug':\n level = logging.DEBUG\n if level == 'error':\n level = logging.ERROR\n self._log.setLevel(level)", "def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)", "def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")", "def set_gain(self, *args):\n return _uhd_swig.usrp_source_set_gain(self, *args)", "def set_volume(self, val):\n self.sound.volume = val", "def score(self, sentence):\n s = 0;\n\n #for every word\n for i in xrange(len(sentence)):\n score = self.getBackOff(tuple(sentence[:i+1]));\n if(score != 0):\n s += math.log(score);\n\n return s", "def set_level(self, device_id, new_level):\n\t\treturn self.post(self.value_url % (ART_SERVER_HOST, device_id), {'value':new_level })", "def gain(self, value: int):\n self._gain = value", "def set_volume_level(self, volume: float) -> None:\n self._monoprice.set_volume(self._zone_id, round(volume * MAX_VOLUME))", "def trigger_level(self, value):\n self.lib.SetTriggerLevel(ct.c_float(value))", "def set_level(self, level):\n\n self.sh.setLevel(level)\n\n if self.fh:\n self.fh.setLevel(level)", "def addLevel(self, amount):\r\n debug.write(\"[SourceRPG] Handling addLevel\", 1)\r\n self.player['level'] += amount\r\n \r\n \"\"\" If turbo mode is on multipliy the credits received \"\"\"\r\n if currentTurboMode:\r\n self.player['credits'] += int( amount * int(creditsReceived) * float(turboCreditMultiplier))\r\n else:\r\n self.player['credits'] += amount * int(creditsReceived)\r\n \r\n \"\"\" Check if the level has reached the limit \"\"\"\r\n if int(maxLevel) and self.player['level'] > int(maxLevel):\r\n debug.write(\"Maximum level reached, ensure that resetSkills\", 1)\r\n \"\"\" If we want to reset the skills, reset them \"\"\"\r\n if int(maxLevelReset):\r\n self.resetSkills()\r\n tell(self.userid, 'maximum level reached')\r\n debug.write(\"Levels Reset\", 1)\r\n else:\r\n \"\"\" Othewise assign the level and XP to the maximum possible \"\"\"\r\n self.player['level'] = int(maxLevel)\r\n self.player['xp'] = (self.player['level'] - 1) * int(xpIncrement) + int(startXp) - 1\r\n debug.write(\"Assigned XP to maximum value\", 1)\r\n else: \r\n \"\"\" The level is okay, check for bots and play the message etc \"\"\"\r\n if not self.player.isbot:\r\n debug.write(\"Player is not a bot\", 2)\r\n \"\"\" Only do the following for humans \"\"\"\r\n if not int(levelUp):\r\n tokens = {}\r\n tokens['level'] = self.player['level']\r\n tokens['xp'] = self.player['xp']\r\n tokens['nextxp'] = (self.player['level'] - 1) * int(xpIncrement) + int(startXp) - self.player['xp']\r\n tell( self.userid, 'level gained private', tokens )\r\n \r\n if self.player['popup']:\r\n debug.write(\"Building skill menu\", 1)\r\n buildSkillMenu(self.userid)\r\n \r\n else:\r\n \"\"\" Player is a bot, check for the maximum possible level for a bot \"\"\"\r\n debug.write(\"Bot leveled up, choose a random skill\", 2)\r\n if int(botMaxLevel) and self.player['level'] > int(botMaxLevel):\r\n debug.write(\"Reset bot's skills, maximum level achieved\", 2)\r\n self.resetSkills()\r\n else:\r\n \"\"\" Upgrade a random skill if possible \"\"\"\r\n while True:\r\n \"\"\" Loop until we manually break \"\"\"\r\n possibleChoices = []\r\n credits = self.player['credits']\r\n for skill in skills:\r\n \"\"\" \r\n Iterate through all loaded skills and if the bot\r\n can afford the skill, append it to the possible choices\r\n \"\"\"\r\n if credits >= self.player[skill.name] * skill.creditIncrement + skill.startCredit:\r\n if self.player[skill.name] < skill.maxLevel:\r\n possibleChoices.append(skill.name)\r\n if not possibleChoices:\r\n \"\"\" \r\n The bot cannot afford any skills or has maxed out\r\n the skills, the manually break\r\n \"\"\"\r\n break\r\n \r\n \"\"\" \r\n Finally call the checkSkillForUpgrading function passing\r\n the arguments manually rather than letting a popup do it\r\n \"\"\"\r\n debug.write(\"Checking to update a skill\", 2)\r\n checkSkillForUpgrading(self.userid, random.choice(possibleChoices), None, False )\r\n \r\n if int(levelUp):\r\n tokens = {}\r\n tokens['name'] = self.player.name\r\n tokens['level'] = self.player['level']\r\n tokens['xp'] = self.player['xp']\r\n tokens['nextxp'] = (self.player['level'] - 1) * int(xpIncrement) + int(startXp)\r\n \r\n for userid in filter( lambda x: not es.isbot(x), es.getUseridList()):\r\n tell(userid, 'level gained global', tokens)\r\n \r\n if str(levelupSound):\r\n es.emitsound('player', self.userid, str(levelupSound), 0.7, 0.5 )\r\n \r\n \"\"\" Create and fire the levelup event \"\"\"\r\n values = {}\r\n values[\"userid\"] = (\"setint\", self.userid)\r\n values[\"newlevel\"] = (\"setint\", self.player['level'])\r\n values[\"oldlevel\"] = (\"setint\", self.player['level'] - amount)\r\n values[\"amount\"] = (\"setint\", amount)\r\n values[\"xp\"] = (\"setint\", self.player['xp'])\r\n values[\"xpneeded\"] = (\"setint\", (self.player['level'] - 1) * int(xpIncrement) + int(startXp))\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_levelup\", values))\r\n debug.write(\"[SourceRPG] Handled addLevel\", 1)", "async def async_set_volume_level(self, volume: float) -> None:\n await self._client.set_volume(round(volume * 100))\n self.async_write_ha_state()", "def lineartodB(lin):\r\n return 20*np.log10(lin)", "async def set_volume(self, group_id: int, level: int) -> None:\n if not self.VOLUME_MIN <= level <= self.VOLUME_MAX:\n raise ValueError(f'Level must be between {self.VOLUME_MIN} and {self.VOLUME_MAX}')\n\n await self._api.call('group', 'set_volume', gid=group_id, level=level)", "def set_volume(self, value):\n utils.set_volume(self.config[\"alsa\"][\"card\"], value) # Sets the actual volume level\n\n if value == 0:\n mode = \"muted\"\n elif value <= 25:\n mode = \"low\"\n elif value <= 75:\n mode = \"medium\"\n else:\n mode = \"high\"\n \n icon = utils.get_volume_icon(mode)\n self.settings_window.volume_label.setPixmap(icon)", "def setLevel(level='info'):\n\n mapper = {\n 'critical' : logging.CRITICAL, \n 'error' : logging.ERROR,\n 'warning' : logging.WARNING,\n 'info' : logging.INFO,\n 'debug' : logging.DEBUG,\n }\n if level not in mapper:\n raise ValueError('level must be one of these: {}'.format(list(mapper.keys())))\n else:\n logger.setLevel(mapper[level])", "async def async_set_volume_level(self, volume: float) -> None:\n await self._group.set_volume(round(volume * 100))\n self.async_write_ha_state()", "def gain2dB(gain):\n dB = 20*math.log(gain)\n return dB", "def increment_level(self):\n self.level += 1\n styled_set_label_text(self.level_display, \"Level: \"+str(self.level))\n glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))", "def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"ERROR\"]\n if level.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for level -> \" + level)\n self._level = \"outdated_sdk_version\"\n else:\n self._level = level", "def _set_number_of_levels(self, number_of_levels):\n if not number_of_levels & 1:\n number_of_levels -= 1\n logging.warning('Set number of levels to an odd number %r',\n number_of_levels)\n\n self._number_of_levels = number_of_levels\n self._compute_quantization_factor()", "def setLevelAndVersion(self, level, version):\n\n if level == 2 and version == 1:\n self.check(\n self.document.checkL2v1Compatibility(), \"convert to level 2 version 1\"\n )\n elif level == 2 and version == 2:\n self.check(\n self.document.checkL2v2Compatibility(), \"convert to level 2 version 2\"\n )\n elif level == 2 and version == 3:\n self.check(\n self.document.checkL2v3Compatibility(), \"convert to level 2 version 3\"\n )\n elif level == 2 and version == 4:\n self.check(\n self.document.checkL2v4Compatibility(), \"convert to level 2 version 4\"\n )\n elif level == 3 and version == 1:\n self.check(\n self.document.checkL3v1Compatibility(), \"convert to level 3 version 1\"\n )\n else:\n raise SystemExit(\"Invalid level/version combination\")\n\n isSet = self.document.setLevelAndVersion(level, version)\n self.check(isSet, \"convert to level \" + str(level) + \" version \" + str(version))", "def normalize(self):\n self.command.append('gain')\n self.command.append('-n')\n return self", "async def tolevel(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if args[0].isdigit():\n level = int(args[0])\n skill = ' '.join(args[1:])\n else:\n level = None\n skill = ' '.join(args)\n out = users.calc_xp_to_level(ctx.user_object, skill, level)\n await ctx.send(out)", "def setInputSentence(self, sentence):\n self.inputSentence = sentence", "def setLevel(self, level):\n handlers = self.logger.handlers\n for handler in handlers:\n handler.setLevel(level)", "def saving_throw_bonus_on_level(self, level):\n raise NotImplementedError" ]
[ "0.636659", "0.62779075", "0.62480867", "0.62194335", "0.6102128", "0.59673667", "0.5937393", "0.59064263", "0.59064263", "0.59064263", "0.58078915", "0.5776728", "0.57651097", "0.5743857", "0.5737821", "0.5697885", "0.56706774", "0.5657614", "0.5645768", "0.56377774", "0.5626082", "0.56215674", "0.5587731", "0.5568245", "0.555641", "0.55419916", "0.55407333", "0.5494098", "0.5477296", "0.5472362", "0.54711175", "0.54665303", "0.54592985", "0.54542005", "0.54202473", "0.5412224", "0.5411648", "0.5411648", "0.5399013", "0.5362896", "0.53396297", "0.53381693", "0.5337484", "0.5327757", "0.5326258", "0.5309694", "0.530791", "0.53038186", "0.52971697", "0.52835333", "0.5280376", "0.5268839", "0.5260444", "0.52573013", "0.5253911", "0.5244841", "0.5233962", "0.5231497", "0.5223075", "0.52223414", "0.51825714", "0.5181606", "0.5173617", "0.51707214", "0.51666087", "0.51581866", "0.5157848", "0.51535106", "0.5148494", "0.5145211", "0.5141624", "0.5138199", "0.5135775", "0.51265085", "0.51213574", "0.51125413", "0.5107005", "0.51029557", "0.5095589", "0.50952196", "0.5081426", "0.507667", "0.5072306", "0.50709057", "0.5068659", "0.50593734", "0.5057675", "0.50558215", "0.50488913", "0.5044839", "0.50425124", "0.50315034", "0.5029184", "0.5026307", "0.50231713", "0.5020829", "0.5012858", "0.50122374", "0.5008003", "0.50008637" ]
0.5897673
10
Calculate the average level across all sentences. The levels are calculated according to the toolbox's reference level. Returns
def average_level(self): spl = [utils.dbspl(x) for x in self.load_files()] return np.mean(spl), np.std(spl)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rouge_l_sentence_level(eval_sentences, ref_sentences):\n\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n m = float(len(ref_sentence))\n n = float(len(eval_sentence))\n lcs = _len_lcs(eval_sentence, ref_sentence)\n f1_scores.append(_f_lcs(lcs, m, n))\n return np.mean(f1_scores, dtype=np.float32)", "def _averageOfLevels(self, root):\n level = [ root ]\n averages = []\n while len(level) != 0:\n averages.append(float(sum(l.val for l in level)) / len(level))\n level = [kid for node in level for kid in (node.left, node.right) if kid]\n return averages", "def showAverageBetUsed(self) :\n averageBetUsed = 0\n for level in self.level_history :\n averageBetUsed += level.bet\n averageBetUsed = averageBetUsed/len(self.level_history)\n Scenario.messageGetAverageBetUsed(averageBetUsed)", "def showAverageGainWon(self) :\n averageGainWon = 0\n for level in self.level_history :\n averageGainWon += level.profit\n averageGainWon = averageGainWon/len(self.level_history)\n Scenario.messageGetAverageGainWon(averageGainWon)", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average", "def get_avg_sentence_length(self):\n sentences = self.blob.sentences\n average_sentence_length = np.mean(np.array([len(sentence.words) for sentence in sentences]))\n return average_sentence_length", "def _eed_compute(sentence_level_scores: List[Tensor]) ->Tensor:\n if len(sentence_level_scores) == 0:\n return tensor(0.0)\n average = sum(sentence_level_scores) / tensor(len(sentence_level_scores))\n return average", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average", "def calculate_avg_cholesterol(self):\n total = 0\n no_of_valid_patients = 0\n for patient in self._patient_list:\n try:\n total += patient.get_cholesterol_data()[0]\n no_of_valid_patients += 1\n except AttributeError:\n continue\n except TypeError:\n continue\n if no_of_valid_patients == 0:\n return 0\n average = total/no_of_valid_patients\n self.average_cholesterol_level = average\n return average", "def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_hit = accuracy * max_hit / 2\n exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE\n ticks = experience / exp_per_hit * ticks_per_attack\n return ticks", "def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))", "def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def showAverageNbAttemptsByLevels(self) :\n level_current = 1\n while level_current <= len(self.list_level) :\n self.showAverageNbAttemptsByLevel(level_current)\n level_current += 1", "def rouge_l_sentence_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n reference_words = _split_into_words(reference_sentences)\n evaluated_words = _split_into_words(evaluated_sentences)\n m = len(reference_words)\n n = len(evaluated_words)\n lcs = _len_lcs(evaluated_words, reference_words)\n return _f_p_r_lcs(lcs, m, n)", "def accuracy_text_level(dataset, beta):\r\n X, gs, gl = dataset_as_arrays(dataset)\r\n \r\n #value of the sigmoid function for all embeddings in X, given beta \r\n sentence_predictions = ghf.sigmoid_over_array(X, beta)\r\n \r\n text_predictions = []\r\n for index in range(gs.shape[0]):\r\n frm = numpy.sum(gl[0:index])\r\n to = frm + gl[index]\r\n text_prediction = numpy.average(sentence_predictions[frm:to])\r\n text_predictions.append(0 if text_prediction < 0.5 else 1)\r\n \r\n prediction_right = 0\r\n for index, prediction in enumerate(text_predictions): \r\n if prediction == gs[index]:\r\n prediction_right += 1\r\n \r\n return prediction_right/gs.shape[0]", "def avg_text(mukey, layers):\n #read appropriate soils.in content to a python list\n mukey = str(mukey)\n soil_path = \"/data/paustian/ernie/SSURGO_master_script/soil_test2/\"\n soil_fpath = soil_path+mukey[:-3]+\"/\"+mukey+\".in\"\n cont = [[]]\n data_input = open(soil_fpath, 'r')\n for line in data_input:\n cont.append(line.split())\n del cont[0]\n\n #convert all entries in the 2D list to float format where possible, or zero in the case\n #of very small numbers recorded in scientific notation\n for k in range(len(cont)):\n for l in range(len(cont[k])):\n cont[k][l] = float(cont[k][l])\n\n #loop through list and compute the depth-weighted fraction of each texture component\n sand_tot = 0\n silt_tot = 0\n clay_tot = 0\n for i in range(len(cont)):\n if i+1 <= layers:\n depth = float(cont[i][1]) - float(cont[i][0])\n sand = float(cont[i][7])\n clay = float(cont[i][8])\n silt = round(1-sand-clay, 2)\n sand_tot += sand * depth\n silt_tot += silt * depth\n clay_tot += clay * depth\n final_depth = float(cont[i][1])\n\n if layers > len(cont):\n print \"NOTE: specified layer limit exceeds number of layers found in soils.in file\"\n\n # normalize by total depth\n sand_avg = sand_tot/final_depth\n silt_avg = silt_tot/final_depth\n clay_avg = clay_tot/final_depth\n\n return sand_avg, silt_avg, clay_avg, final_depth", "def rouge_l_sentence_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n reference_words = _split_into_words(reference_sentences)\n evaluated_words = _split_into_words(evaluated_sentences)\n m = len(reference_words)\n n = len(evaluated_words)\n lcs = _len_lcs(evaluated_words, reference_words)\n return _f_p_r_lcs(lcs, m, n)", "def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity", "def find_shrunken_averages(tuple_input):\n #The categorical level.\n level = tuple_input[0]\n # The labels list (y varaibale) from a map function.\n labels = tuple_input[1]\n # The total number of level occurances in the frame (ie count)\n level_n = len(labels)\n level_mean = sum(labels) / level_n\n\n # Determine if there enough occurances of a level. If NOT return overall_mean\n if level_n >= threshold:\n return(level,level_mean)\n else:\n return(level, ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean) )", "def getLevels():", "def accuracy_sentence_level(dataset, beta, d2v_model):\r\n \r\n prediction_right = 0\r\n \r\n for row in dataset:\r\n sentence_vec = numpy.reshape(d2v_model.infer_vector(row[0].split(), alpha=0.1, steps=20), newshape=(200,-1))\r\n prediction = (1 / (1 + numpy.exp(-numpy.dot(numpy.transpose(sentence_vec), beta))))\r\n prediction = 1 if prediction>0.5 else 0\r\n if int(row[1]) == prediction:\r\n prediction_right += 1\r\n \r\n return prediction_right / len(dataset)", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def score(self, sentence):\n # count each incremented word\n for word in sentence:\n if word not in self.unigramCounts:\n self.zeroCount += 1\n\n # apply laplace smoothing to unigram model\n score = 0.0\n for word in sentence:\n count = self.unigramCounts[word]\n score += math.log(count + 1)\n score -= math.log(self.totalCount + self.zeroCount)\n return score", "def leafScore(self) :\n return 0", "def score(self, sentence):\n s = 0;\n\n #for every word\n for i in xrange(len(sentence)):\n score = self.getBackOff(tuple(sentence[:i+1]));\n if(score != 0):\n s += math.log(score);\n\n return s", "def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score", "def getLevel(self):\n return _libsbml.SBase_getLevel(self)", "def average_length(sentences, padding_word=\"PAD\"):\n global trainset_average_length\n number_of_all = 0\n sum = 0\n averaged_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n sum = sum + len(sentence)\n number_of_all = number_of_all + 1\n average = int(sum / number_of_all)\n average = 35572\n trainset_average_length = average\n for i in range(len(sentences)):\n sentence = sentences[i]\n if len(sentence) < average:\n num_padding = average - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n averaged_sentences.append(new_sentence)\n elif len(sentence) > average:\n new_sentence = sentence[:average]\n averaged_sentences.append(new_sentence)\n else:\n averaged_sentences.append(sentence)\n print('Average Length is: ' + str(average))\n return averaged_sentences", "def average_word_length(self):\n len_words_only = [len(s) if s.isalpha() else 0 for s in self.text]\n if (len_words_only == 0):\n print('Input file contains no words.')\n return 0, 0, 0\n else:\n return sum(len_words_only) / len(len_words_only), median(len_words_only), mode(len_words_only)", "def average_damage(self) -> float:\r\n number_of_dice = int(self.damage.split(\"d\")[0])\r\n damage_of_dice = int(self.damage.split(\"d\")[1])\r\n average_damage = (number_of_dice + number_of_dice * damage_of_dice) / 2\r\n return average_damage", "def walk_aggregate(word):\n walk_results = []\n\n cur_node = word\n\n for attempt in range(num_walks):\n # check positive\n pos = walk_attempt(cur_node, positive_words)\n \n # check negative\n neg = walk_attempt(cur_node, negative_words)\n\n # store result of walk attempt\n walk_results.append((pos, neg))\n\n # compute averages from attempts\n pos_total, neg_total = 0, 0\n for pos_val, neg_val in walk_results: \n pos_total += pos_val\n neg_total += neg_val\n\n pos_avg = pos_total / num_walks\n neg_avg = neg_total / num_walks\n\n\n # compare positive vs negative\n\n # label original word based on this\n if neg_avg < pos_avg:\n ratio = neg_avg / pos_avg\n if ratio < required_ratio:\n return \"Negative\"\n else:\n ratio = pos_avg / neg_avg\n if ratio < required_ratio:\n return \"Positive\"\n return \"Neutral\"", "def calc_average_depth(self):\n for idx in range(self.size):\n if self._depth_buffer[idx] != []:\n self._depth[idx] = np.mean(self._depth_buffer[idx])", "def average_precision(ranking, references, atk=None):\n total, num_correct = 0.0, 0.0\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n num_correct += 1\n total += num_correct / k\n return total / num_correct if total > 0 else 0.0", "def getScore(self, sentence):\r\n \r\n score = 0\r\n \r\n for word in sentence.words:\r\n score += len(word)\r\n \r\n return score", "def get_mean(self):\n try:\n return sum(self.speakers.values()) / len(self.speakers)\n except (ZeroDivisionError):\n return 0.0", "def score(self, sentence):\n score = 0.0\n prev_word = None\n for token in sentence:\n two_words_count = self.bigram_count[prev_word][token]\n prev_word_count = self.unigram_count[prev_word]\n if (two_words_count > 0):\n score += math.log(two_words_count)\n score -= math.log(prev_word_count)\n else:\n score += math.log(self.backoff_multiplier)\n score += math.log(self.unigram_count[token] + 1.0)\n score -= math.log(self.num_words + self.vocabulary_size)\n prev_word = token\n return score", "def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:\n text = sentence\n return text", "def getLevel(self):\n return _libsbml.ASTBasePlugin_getLevel(self)", "def average(self):\n return self.summation() / self.count()", "def score(self, sentence):\n\n\n # TODO your code here\n score = 0.0 \n prevWord = \"\"\n prevPrevWord = \"\"\n newSentence = []\n for word in sentence:\n newSentence += word.split()\n for currentWord in sentence:\n currentWord = currentWord.strip(STRIP_CHARS)\n currentWord = currentWord.lower()\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n trigramCount = self.trigramCounts[trigram]\n if trigramCount > 0:\n score += math.log(max(self.trigramCounts[trigram] - DISCOUNT, 0)*len(self.trigramCounts) + DISCOUNT*self.followingCounts[(prevPrevWord, prevWord)]*self.continuationCounts[currentWord])\n # Subtraction by 1 removes the add one count from the laplace\n # smoothing\n score -= math.log((self.bigramCounts[(prevPrevWord, prevWord)]) * len(self.trigramCounts))\n elif self.bigramCounts[(prevWord, currentWord)] > 0:\n score += math.log(self.bigramCounts[(prevWord, currentWord)]*BI_BACKOFF_COEFFICIENT)\n score -= math.log(self.totalBigramCounts)\n else:\n count = self.unigramCounts[currentWord]\n score += math.log(count * UNI_BACKOFF_COEFFICIENT)\n score -= math.log(self.total)\n else:\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n return -score", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score", "def average_word_length(self, text):\n return np.mean([len(word) for word in text])", "def get_pertinence (cats):\n sorted_cats = sorted(cats, key=cats.__getitem__, reverse=True)\n score_to_test = cats[sorted_cats[0]]\n all_values = [cats[key] for key in sorted_cats]\n average = sum(all_values) / len(all_values)\n logged_rest = [log(abs(average - val) + 1) for val in all_values[1:]]\n \n rest_average = sum(logged_rest) / len(logged_rest)\n logged_main = log(abs(average - all_values[0])+1)\n \n importance = max(logged_main - rest_average, 0)\n \n return importance", "def getResultLevel(self):\n return _libsbml.DefaultTerm_getResultLevel(self)", "def lix(self, doc):\n num_words = _get_num_words(doc)\n num_sentences = _get_num_sentences(doc)\n num_long_words = _get_num_long_words(doc, min_characters=7)\n return num_words / num_sentences + 100 * num_long_words / num_words", "def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average", "def _get_emb_avg(g, lang):\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0", "def update_english_stats(self):\n try:\n if self[EN_COUNT] != 0:\n self[EN_RATIO] = float(self[EN_COUNT]-self[EN_ERR_COUNT])/self[EN_COUNT]\n except Exception:\n log.debug(\"MyTranslation.update_english_stats : Erreur dans le calcul \\\n du ratio avec EN_COUNT={} et EN_ERR_COUNT={}\"\n .format(self.en_count, self.en_err_count))", "def getLevel(self, *args):\n return _libsbml.SBMLExtension_getLevel(self, *args)", "def record_em_score(record_examples: List[RecordNestedExample]):\n if not record_examples:\n return 0.\n em_scores = []\n for example in record_examples:\n example_ems = []\n for answer in example.answers:\n example_ems.append(string_f1_score(example.prediction, answer))\n if example_ems:\n em_scores.append(max(example_ems))\n return np.mean(em_scores) if em_scores else -1", "def score(self, sentence):\n\n score = 0.0\n i = 0\n temp = \"\"\n for token in sentence:\n count = self.unigramCounts[token]\n if (i == 0):\n i = i + 1\n temp = token\n continue\n\n key = temp + \",\" + token\n bicount = self.bigramCounts[key]\n unicount = self.unigramCounts[temp]\n temp = token\n if bicount > 0 :\n\n score += (math.log(bicount) - math.log(unicount))\n else:\n unicount = self.unigramCounts[token]\n score += math.log(unicount + 1) + math.log(0.4)\n score -= math.log(self.total + len(self.unigramCounts))\n\n return score", "def score(self, sentence):\n score = 0.0\n V = len(self.f1) # vocabulary size\n for token in sentence:\n if token in self.f1: score += self.f1[token]\n else: score -= math.log10(self.total + V)\t\t # OOV \n return score", "def _get_parse_tree_height(tokens):\n avg_parse_tree_height = 0.0\n \n for sentence in tokens.sents:\n avg_parse_tree_height += _parse_tree_height(sentence)\n \n n_sentences = len(list(tokens.sents))\n avg_parse_tree_height /= n_sentences\n \n return avg_parse_tree_height, n_sentences", "def averageOfLevels(self, root: TreeNode) -> List[float]:\n # Solution 1 - 52 ms\n # Solution 2 - 32 ms\n queue = [root]\n answer = []\n\n while queue:\n temp = []\n answer.append(self.Average(queue))\n\n for el in queue:\n if el.left != None:\n temp.append(el.left)\n if el.right != None:\n temp.append(el.right)\n queue = temp\n\n return answer", "def get_avg_word_length(self):\n words = self.blob.words\n average_word_length = np.mean(np.array([len(word) for word in words]))\n return average_word_length", "def avg(self):\n if not self.committed_together:\n return 0\n\n return round(statistics.mean(self.committed_together))", "def get_average_repro(self):\n return np.mean([agent.get_fledge_probability() for agent in self.agents])", "def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls", "def getLevel(self, *args):\n return _libsbml.MultiExtension_getLevel(self, *args)", "def baseline_mle(self):\n tag_word_tapples = self.data_to_taples(self.training_set)\n e = np.zeros((self.pos_size, self.words_size))\n for tag, word in tag_word_tapples:\n e[self.pos2i[tag], self.word2i[word]] += 1\n sum = e.sum(axis=1)[..., np.newaxis]\n e = e / sum\n return e", "def print_avg():", "def _get_emb_wavg(g, lang, a=0.001):\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0", "def get_avg_len(self, index, window=4):\n if index < 4:\n words = [len(self.get_prev_word(i, orignal=True)) for i in range(1, index)]\n else:\n words = [\n len(self.get_prev_word(index - i, orignal=True)) for i in range(window)\n ]\n try:\n return sum(words) / len(words)\n except ZeroDivisionError:\n return 0", "def mean_STD(self,counter):\n \n \n pass", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)", "def energies(self, num_levels=-1):\n if not self.solved: self.solve()\n return self.en[:num_levels]", "def compute_acc_on_selection(arts, forms_set):\n correct=0\n total=0\n for article in arts:\n for entity in article.entity_mentions:\n if entity.mention in forms_set:\n total+=1\n if entity.gold_link==entity.sys_link:\n correct+=1\n print(correct, total)\n return correct/total", "def average(self):\n return (self.current + self.last) / 2.0", "def getCurrentAverage(examList, projectList, labList, adjPoints=0):\n \n totalPoints = 1000 if not adjPoints else adjPoints\n grades = examList + projectList + labList # concat into one list to calc the average\n return sum(grades) / totalPoints", "def calculate_mean_stdev(self):\n sentences = [self.tokens_from_string(x) + ['.']\n for x in self.testing_set.split(\".\")]\n probabilities = []\n for sentence in sentences:\n # skip short sentences\n if len(sentence) <= self.order:\n continue\n\n prob = self.prob_calculate(sentence)\n probabilities.append(prob / (len(sentence) - self.order))\n\n self.mean = statistics.mean(probabilities)\n self.stdev = statistics.stdev(probabilities)", "def lcs_rouge(self, RTSummary , SystemSummary):\n return (len(RTSummary)+len(SystemSummary) - (self.lcs(RTSummary,SystemSummary)))/2", "def get_avg_loss(self):\n if self.n_batches > 0:\n avg_loss = self.loss / self.n_batches\n self.loss = 0\n self.n_batches = 0\n return avg_loss\n else:\n return 0", "def get_leg_average():\n animals = [json.loads(rd.get(key)) for key in rd.keys(\"*\")]\n legs = [animal[\"legs\"] for animal in animals]\n return jsonify(sum(legs) / len(legs))", "def avg_added(self):\n avg = {}\n for path, lines in self.lines_added.items():\n avg[path] = round(statistics.mean(lines))\n\n return avg", "def ams_estimate(self):\n return int(_mean([x ** 2 for x in self.ams_estimates]))", "def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def get_importance(self, key, value, depth):\n multiplier = 0.8 ** depth if depth > 1 else 1.0\n base = 0.0\n if key in ['condition', 'symptom', 'disease', 'treatment']:\n base += 5\n elif key in ['gender', 'age'] or 'location' in key:\n base += 4\n elif 'condition' in key or 'symptom' in key or 'disease' in key or 'treatment' in key:\n base += 3\n else:\n base += 2\n return multiplier * base", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def compute_level_offset(self, root: Position) -> int:\n pattern = self.adoc_title_pat if self.kind == 'adoc' else self.pandoc_title_pat\n for line in g.splitLines(root.b):\n if pattern.match(line):\n return 1\n return 0", "def average_timeout_depth(self):\n if self.timeout_depths:\n return sum(self.timeout_depths) / len(self.timeout_depths)\n else:\n return -1", "def get_average_len(self, index):\n prev_word = self.get_prev_word(index, orignal=True)\n next_word = self.get_next_word(index, orignal=True)\n return (len(prev_word) + len(next_word)) / 2", "def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def global_mean(self):\n return self.interaction_data.label.mean()", "def compute_readability(text):\n total_words = 0\n total_sentences = 0\n total_syllables = 0\n score = 0\n\n words = text.split()\n total_words = len(text.split()) \n total_sentences = count_sentences(text)\n total_syllables = count_syllables(words)\n \n score = 206.835 - 1.015 * ( total_words / total_sentences) - 84.6 * (total_syllables / total_words)\n if score > 90.00:\n answer = 'Texto de nível do 5º ano do Ensino Fundamental, facilmente compreendido por um aluno de 11 anos.'\n elif score <= 90.00 and score > 80.00:\n answer = 'Texto de nível do 6º ano do Ensino Fundamental, inglês coloquial para consumidores.'\n elif score <= 80.00 and score > 70.00:\n answer = 'Texto de nível do 7º ano do Ensino Fundamental, razoavelmente fácil de ler.'\n elif score <= 70.00 and score > 60.00:\n answer = 'Texto de nível do 9º ano do Ensino Fundamental, Inglês simples compreendido por adolescentes de 13 - 15 anos.'\n elif score <= 60.00 and score > 50.00:\n answer = 'Texto de 1º a 3º ano do Ensino Médio, razoavelmente difícil de ler.'\n elif score <= 50.00 and score > 30.00:\n answer = 'Texto de nível Universitário, difícil de ler.'\n else:\n answer = 'Texto de nível de Graduação, muito difícil de ler e mais bem-compreendido por universitários graduados.'\n \n print('Pontuação Total:', score, answer)", "def sentiment_analyzer_scores(sentence):\n score = get_sentiment_analyzer().polarity_scores(sentence)\n return 'Negative Score:', score['neg'], 'Neutral Score:', score['neu'], 'Positive Score:', score['pos'], 'Compound Score:', score['compound']", "def check_overall_energy(self):\n energy = 0\n for student in self.students:\n energy += int(student.energy_level)\n for mentor in self.mentors:\n energy += int(mentor.energy_level)\n print(\"Overall energy equals \", energy)", "def getBaselineScoreForSiblings(self, topNode):\n base = 100000\n numberOfParagraphs = 0\n scoreOfParagraphs = 0\n nodesToCheck = Parser.getElementsByTag(topNode, tag='p')\n \n for node in nodesToCheck:\n nodeText = Parser.getText(node)\n wordStats = StopWords().getStopWordCount(nodeText)\n highLinkDensity = self.isHighLinkDensity(node)\n if wordStats.getStopWordCount() > 2 and not highLinkDensity:\n numberOfParagraphs += 1\n scoreOfParagraphs += wordStats.getStopWordCount()\n \n if numberOfParagraphs > 0:\n base = scoreOfParagraphs / numberOfParagraphs\n \n return base", "def readability(target_text):\n\n nb1 = total_words(target_text)\n nb2 = total_phrases(target_text)\n nb3 = total_syllables(target_text)\n k1 = 206.835\n k2 = 1.015\n k3 = 84.6\n score = round((k1 - k2 * (nb1 / nb2) - k3 * (nb3 / nb1)), 2)\n\n print_text = \"Reading level of\"\n if score > 90:\n level = '5th Grade'\n elif score > 80:\n level = '6th Grade'\n elif score > 70:\n level = '7th Grade'\n elif score > 60:\n level = '8-9th Grade'\n elif score > 50:\n level = '10-12th Grade'\n elif score > 30:\n level = 'College student'\n else:\n level = 'Gollege Graduate'\n\n print('Total words:', nb1)\n print('Total phrases:', nb2)\n print('Total syllables:', nb3)\n print('')\n print('Readability score:', score)\n print(print_text, level)", "def mean_average_position():\n pass", "def getLevel(self, *args):\n return _libsbml.GroupsExtension_getLevel(self, *args)", "def get_average_utterance_length(self, speakers, levels):\n if speakers == [] or levels == []:\n return None\n\n average_utterance_length = dict()\n for speaker in speakers:\n average_utterance_length[speaker] = dict()\n speaker_data = self.data.data[self.data.data['speaker'] == speaker]\n for level in levels:\n level_data = speaker_data[speaker_data['level'] == level]\n utterance_texts = level_data['text'].values\n\n # Computing the utterance lengths without the punctuation\n utterance_lengths = [len(str(utterance).split()) - 1 for utterance in utterance_texts]\n level_name = 'level ' + str(level)\n average_utterance_length[speaker][level_name] = float(round(sum(utterance_lengths) /\n len(utterance_lengths), 2))\n table = pd.DataFrame(average_utterance_length)\n table.to_csv('analyses/average_utterance_length.csv')\n return table", "def _information_gain(self, y, subsets):\n n = y.shape[0]\n child_entropy = 0\n\n for y_i in subsets:\n child_entropy += self._entropy(y_i) * y_i.shape[0] / float(n)\n\n return self._entropy(y) - child_entropy", "def showAverageStats(self) :\n Scenario.messageAverageStats()\n self.showAverageGainWon()\n self.showAverageBetUsed()\n self.showAverageNbAttemptsByLevels()", "def getJudgeAverage(self):\n\n try:\n judgeNotesLogger.info(\"getJudgeAverage: Retrieving Judge Average from '%s'\", self.notesFile)\n ratingSum = self.getRatingSum()\n self.average = ratingSum / self.numJudgedFiles\n judgeNotesLogger.debug(\"getJudgeAverage: '%s' / '%s' = '%s'\", str(ratingSum),\n str(self.numJudgedFiles), str(self.average))\n except:\n judgeNotesLogger.warning(\"getJudgeAverage: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))" ]
[ "0.69772524", "0.61759144", "0.61583227", "0.61080706", "0.6031079", "0.59640235", "0.57921195", "0.5750104", "0.57479537", "0.5734042", "0.5672754", "0.5670753", "0.566068", "0.5644736", "0.5632639", "0.5631451", "0.5594872", "0.559045", "0.5566701", "0.55663264", "0.5546832", "0.5533839", "0.55271715", "0.54641473", "0.5463559", "0.54608595", "0.538545", "0.5376117", "0.5370085", "0.53462446", "0.53412426", "0.53394145", "0.5332961", "0.5324108", "0.5323944", "0.5310437", "0.53087175", "0.53063977", "0.5296904", "0.5289894", "0.5287526", "0.52841204", "0.52759486", "0.52411306", "0.5226489", "0.52201295", "0.5216337", "0.5214893", "0.5211315", "0.52024734", "0.51797116", "0.5174991", "0.51694804", "0.51663786", "0.5163661", "0.51634645", "0.5161498", "0.5154492", "0.51486456", "0.51459175", "0.51402193", "0.5126927", "0.51262003", "0.5117622", "0.5112779", "0.5110949", "0.5106915", "0.5104708", "0.51031905", "0.51023525", "0.51006097", "0.509849", "0.50974804", "0.509407", "0.5087692", "0.5086933", "0.50840694", "0.50828594", "0.50813305", "0.5075785", "0.50731105", "0.50663537", "0.50645", "0.50602764", "0.5058618", "0.5050576", "0.5049285", "0.50490713", "0.5045248", "0.504181", "0.50381774", "0.5036424", "0.5034584", "0.5033014", "0.5030106", "0.50285727", "0.50264895", "0.50240034", "0.5022495", "0.50131744" ]
0.6812981
1
Fixed lines start with an area code enclosed in brackets. The area codes vary in length but always begin with 0.
def isfixline(number): if number[0] == '(': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"", "def area_code(self):\n return self.number[:3]", "def clean_open_close_brace(self):\n # Loop over all lines, check for braces and replace them with \\n{ and \\n}\n brack_num = False\n code_on = False\n\n for line_num, line in enumerate(self.file_ltxt[:-1]):\n self.line_num = line_num\n\n # First check if we are in an inline code section\n breaker = False\n for s_type in VALID_SCRIPT_TYPES:\n if re.findall(f\"^ *{s_type} *{{\", line) or (re.findall(f\"^ *{s_type}\", line) and re.findall(\"^ *{\", self.file_ltxt[line_num+1])):\n if code_on is not False:\n self.print_error(f\"Inline {s_type} code is not supported inside {code_on} code.\")\n\n code_on = s_type\n brack_num = 0\n\n if '{' in line:\n s = line.split(\"{\")\n line = s[0] + \"\\n{\\n\" + '{'.join(s[1:])\n brack_num = 1\n if '}' in line:\n s = line.split(\"}\")\n line = s[0] + \"\\n}\\n\" + '}'.join(s[1:])\n code_on = False\n brack_num = 0\n\n\n self.file_ltxt[line_num] = line\n breaker = True\n if breaker:\n continue\n\n # If we are in an inline code section don't edit it\n if code_on is not False:\n if '}' in line: brack_num -= 1\n if '{' in line: brack_num += 1\n\n if brack_num == 0:\n code_on = False\n\n # If not then we can edit the brace opening and closings\n else:\n str_part, non_str = gen_parse.get_str_between_delims(line)\n non_str = non_str.replace(\"{\", \"\\n{\\n\").replace(\"}\", \"\\n}\\n\")\n line = non_str.replace(r\"??!%s!?\", str_part)\n\n self.file_ltxt[line_num] = line\n # print(self.file_ltxt)\n # raise SystemExit(\"BREAK\")\n # Re-split by line-end and remove blank lines\n self.file_ltxt = [i for i in '\\n'.join(self.file_ltxt).split('\\n')\n if not i.isspace() and i]", "def clean_code(code, lengte):\n return code.zfill(lengte)", "def _check_brackets(line_index, input_line):\n global _total_lines_of_code\n if input_line.endswith('{') or input_line.endswith('}'):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def formatCode(self, code):\r\n m = self.codeRe.match(code)\r\n if m:\r\n return code.replace('\\n%s' % m.groups()[0], '\\n')\r\n return code", "def __init__(self, area: str) -> None:\n self.area = area.split(\"\\n\")[:-1] # Trailing newline leaves an annoying empty string\n self.max_h = len(self.area)\n self.max_w = len(self.area[0])", "def code_area(self, on, code_id, code_type='code', show=0, start=-1, step=-1, msg=None):\n _ = self.request.getText\n res = []\n if on:\n code_id = self.sanitize_to_id('CA-%s' % code_id)\n ci = self.qualify_id(self.make_id_unique(code_id))\n\n # Open a code area\n self._in_code_area = 1\n self._in_code_line = 0\n # id in here no longer used\n self._code_area_state = [None, show, start, step, start, ci]\n\n if msg:\n attr = {'class': 'codemsg'}\n res.append(self._open('div', attr={'class': 'codemsg'}))\n res.append(msg)\n res.append(self._close('div'))\n\n # Open the code div - using left to right always!\n attr = {'class': 'codearea', 'lang': 'en', 'dir': 'ltr'}\n res.append(self._open('div', attr=attr))\n\n # Add the script only in the first code area on the page\n if self._code_area_js == 0 and self._code_area_state[1] >= 0:\n res.append(self._toggleLineNumbersScript)\n self._code_area_js = 1\n\n # Add line number link, but only for JavaScript enabled browsers.\n if self._code_area_state[1] >= 0:\n toggleLineNumbersLink = r'''\n<script type=\"text/javascript\">\ndocument.write('<a href=\"#\" onclick=\"return togglenumber(\\'%s\\', %d, %d);\" \\\n class=\"codenumbers\">%s<\\/a>');\n</script>\n''' % (ci, self._code_area_state[2], self._code_area_state[3],\n _(\"Toggle line numbers\"))\n res.append(toggleLineNumbersLink)\n\n # Open pre - using left to right always!\n attr = {'id': ci, 'lang': 'en', 'dir': 'ltr'}\n res.append(self._open('pre', newline=True, attr=attr, is_unique=True))\n else:\n # Close code area\n res = []\n if self._in_code_line:\n res.append(self.code_line(0))\n res.append(self._close('pre'))\n res.append(self._close('div'))\n\n # Update state\n self._in_code_area = 0\n\n return ''.join(res)", "def tabing_tool(code):\n for i, line in enumerate(code):\n code[i] = ' '*4 + line\n return code", "def CheckBraces(fn, filename, clean_lines, linenum, error):\n line = clean_lines.elided[linenum]\n if Match(r'^(.*){(.*)}.*$', line):\n # Special case when both braces are on the same line together, as is the\n # case for one-line getters and setters, for example, or rows of a multi-\n # dimenstional array initializer.\n pass\n else:\n # Line does not contain both an opening and closing brace.\n m = Match(r'^(.*){(.*)$', line)\n if m and not (IsBlankLine(m.group(1))):\n # Line contains a starting brace and is not empty, uh oh.\n if \"=\" in line and Match(r'\\)( *){$', line):\n # Opening brace is permissable in case of an initializer.\n pass\n else:\n error(filename, linenum, 'whitespace/braces', 4,\n 'when starting a new scope, { should be on a line by itself')\n m = Match(r'^(.*)}(.*)$', line)\n if m and (not IsBlankLine(m.group(1)) or not IsBlankLine(m.group(2))):\n if m.group(2) != \";\":\n error(filename, linenum, 'whitespace/braces', 4,\n '} should be on a line by itself')\n pass", "def flag_regional_indicator(code: List[str]) -> str:\r\n\r\n return \"\".join([chr(ord(c.upper()) + OFFSET) for c in code])", "def code() -> str:\n return \"\"\"\n G91 G17\n G0 Y10 X-10\n G0 Y0 X-5\n G0 Y5 X0\n G0 Y0 X5\n G0 Y0 X-5\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G0 Y-5 X0\n G0 Y-10 X10\n G0 Y0 X-5\n G0 Y-15 X-15\n G0 Y0 X5\n G0 Y5 X0\n G0 Y0 X-5\n G0 Y-5 X0\n G0 Y5 X0\n G2 Y5 X5 J0 I5\n G0 Y0 X5\n G0 Y-5 X0\n G2 Y-5 X-5 J0 I-5\n G0 Y5 X0\n G0 Y10 X10\n G0 Y0 X-30\n G3 Y0 X-10 J0 I-5\n G3 Y0 X10 J0 I5\n\n G0 Y0 X5\n G3 Y5 X5 J5 I0\n G3 Y10 X-10 J0 I-10\n G3 Y-5 X-5 J-5 I0\n G0 Y-5 X0\n\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G3 Y-10 X-10 J-10 I0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n\n G0 Y0 X-5\n G3 Y-5 X-5 J-5 I0\n G3 Y-10 X10 J0 I10\n G3 Y5 X5 J5 I0\n G0 Y5 X0\n\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G3 Y10 X10 J10 I0\n G3 Y5 X-5 J0 I-5\n G0 Y0 X-5\n \"\"\"", "def current_area(self, value=None):\n my_area = self.my_text.index(INSERT)\n str(my_area)\n for x in range(0, len(my_area)):\n if my_area[x] == \".\":\n my_y = my_area[0:x]\n my_x = my_area[x + 1:]\n my_new_area = \"Ln: \" + my_y + \" | Col: \" + my_x\n self.my_location.config(text=my_new_area)", "def GetLinePostProcess(self):\r\n retline = None\r\n outline = None\r\n try:\r\n retline= str(self.file.readline())\r\n except IOError:\r\n self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, \"cannot read a line from\" )\r\n finally: \r\n #outline1 = retline.replace(\"/\",\"\")\r\n if( (retline !=\"\") and (retline !=\"\\n\")) :\r\n outline = str(\"\")\r\n az_range=range(97,123)\r\n AZ_range = range (65, 91)\r\n val_range = range (48,58)\r\n space_range = range (32, 33)\r\n for i in range(len(retline)):\r\n value = ord(retline[i] )\r\n if ( (value in az_range) or (value in AZ_range) or (value in val_range) or (value in space_range) ):\r\n outline = \"\".join([outline,retline[i]])\r\n else:\r\n outline = \"\".join([outline,\"_\"])\r\n '''\r\n if( (retline[i] != \"/\") and (retline[i] != \"&\") and (retline[i] != \"\\\\\") and (retline[i] != \"%\") and (retline[i] != \"#\") and (retline[i] != \"_\") and (retline[i] != '\"') and (retline[i] != \"@\") and (retline[i] != \":\") and (retline[i] != \"\\n\")):\r\n #charac = str(retline[i].encode('ascii','ignore'))\r\n if(ord(retline[i]) < 128):\r\n outline = \"\".join([outline,retline[i]])\r\n ''' \r\n return outline\r\n #return unicodedata.normalize('NFKD', outline).encode('ascii','ignore')\r", "def test_reformat_paragraph_new_code_8_of_8(self):\n before_b = \"\"\"\\\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n after_b = \"\"\"\\\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def format(code):\n newcode = \"\"\n for line in code.splitlines():\n stripped = line.strip()\n # indent everything that is not a label\n if not (stripped.startswith(\".\") or stripped.endswith(\":\")):\n line = \" \"+line\n newcode += line + \"\\n\"\n return newcode", "def explainAreaSmall(self):\n \n #EXPLANATION NO. 1\n #fadeout the non-required areas\n self.play(FadeOut(area_ABC_copy), FadeOut(area_ABD_copy),\n FadeOut(geq_2), FadeOut(geq_1),\n FadeOut(area_ABC), FadeOut(area_ABD))\n \n #expand the required area\n self.play(area_ABE_copy.animate.scale(2).move_to(RIGHT*2))\n\n #surrounding text\n abe_text_1 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\text{Area of } \\\\triangle ABE\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #half base height\n abe_text_2 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\text{base}\", \"\\\\times\", \"\\\\text{height}\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #write texts\n self.play(Write(abe_text_1))\n self.wait()\n self.play(ReplacementTransform(abe_text_1[0], abe_text_2[0]),\n ReplacementTransform(abe_text_1[1:], abe_text_2[1:]))\n self.wait()\n\n #defining braces\n abe_base_brace = always_redraw(\n lambda : Brace(radius_ang, DOWN)\n )\n abe_base_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\cos\\\\theta\").scale(0.6).next_to(abe_base_brace, DOWN)\n )\n abe_height_brace = always_redraw(\n lambda : Brace(radius_ang, LEFT)\n )\n abe_height_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\sin\\\\theta\").scale(0.6).next_to(abe_height_brace, LEFT)\n )\n\n self.play(Write(abe_base_brace), Write(abe_height_brace))\n self.play(Write(abe_base_brace_label), Write(abe_height_brace_label))\n self.wait()\n\n \n #back to editing the equation\n abe_text_3 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"R\\\\cos\\\\theta\", \"\\\\times\", \"R\\\\sin\\\\theta\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n self.play(ReplacementTransform(abe_text_2[0:], abe_text_3[0:]))\n self.wait(0.5)\n self.play(FadeOut(abe_base_brace), FadeOut(abe_height_brace),\n FadeOut(abe_base_brace_label), FadeOut(abe_height_brace_label))\n \n abe_text_4 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\cos x\", \"\\\\times\", \"\\\\sin x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_3[0:], abe_text_4[0:]))\n\n abe_text_5 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\sin x\", \"\\\\cos x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_4[0:2], abe_text_5[0:2]),\n ReplacementTransform(abe_text_4[2:], abe_text_5[2:]))\n\n #vgroup for drawing box\n abe_group = VGroup(abe_text_5, area_ABE_copy)\n abe_formula_box = SurroundingRectangle(abe_group, color=PINK)\n\n self.play(Write(abe_formula_box))\n self.wait()\n\n #remove all elements\n self.play(FadeOut(abe_formula_box), FadeOut(abe_text_5), FadeOut(area_ABE_copy), FadeOut(area_ABE))", "def isAMANDATrig(string, pos):\n return string == 0 and pos == 92", "def disp_sec_str(aa_seq):\n return re.sub(\"(.{80})\", \"\\\\1\\n\", aa_seq, 0, re.DOTALL)", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def _getPblockArea(self, pblock_def):\n assert re.search(r'CLOCKREGION_X\\d+Y\\d+:CLOCKREGION_X\\d+Y\\d+', pblock_def), f'unexpected format of the slot name {pblock_def}'\n DL_x, DL_y, UR_x, UR_y = [int(val) for val in re.findall(r'[XY](\\d+)', pblock_def)] # DownLeft & UpRight\n\n # treat the pseudo SLR with 0 area\n UR_y = min(self.CR_NUM_VERTICAL-1, UR_y) \n\n area = {\n 'BRAM' : 0,\n 'DSP' : 0,\n 'FF' : 0,\n 'LUT' : 0,\n 'URAM' : 0\n }\n \n if DL_y > self.CR_NUM_VERTICAL-1:\n return area \n\n for item in ['BRAM', 'DSP', 'FF', 'LUT', 'URAM']:\n # the total area of one row\n area[item] = sum(self.CR_AREA[i][j][item] for i in range(DL_x, UR_x + 1) for j in range(DL_y, UR_y+1))\n\n return area", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def no_blank_line_before_section(): # noqa: D416", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def align_code(self, boundary):\n word_align = boundary / 4\n\n while len(self._code) % word_align:\n if len(self._code) % 2 == 0:\n self.add(spu.nop(0), True)\n else:\n self.add(spu.lnop(0), True)\n\n return", "def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r", "def test_parse_bar_code_field(self):\n fields = {'Bar code': {'offset': 438,\n 'length': 15}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Bar code': '4156778061'}\n msg = 'Bar code field parse incorrect'\n self.assertEqual(received, expected, msg)", "def generate_aa_sequence_for_disp(aa_seq):\n return re.sub(\"(.{50})\", \"\\\\1\\n\", aa_seq, 0, re.DOTALL)", "def test_reformat_paragraph_new_code_5_of_8(self):\n before_b = \"\"\"\\\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 22.\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n after_b = \"\"\"\\\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 22.\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"2.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_polygon_indentations():\n lines = inspect.getsource(polygon)\n spaces = re.findall('\\n +.', lines)\n for space in spaces:\n assert len(space) % 4 == 2, \"Your script contains misplaced indentations\"\n assert len(re.sub(r'[^ ]', '', space)) % 4 == 0, \"Your code indentation does not follow PEP8 guidelines\"", "def retab(code):\n tabs, tabbed_code = 0, \"\"\n for line in code.split(\"\\n\"):\n if line.strip() == \"}\":\n tabs -= 1\n\n tabbed_code += tabs * \"\\t\" + line + \"\\n\"\n if line.strip().endswith(\"{\"):\n tabs+=1\n\n return tabbed_code", "def area(lado):\n\treturn \"El area de un cuadrado es \"+ str(lado*lado)", "def check_message_for_code(in_lines):\n global _is_good_code\n # Loop through every line, and track its index\n for index, line in enumerate(in_lines):\n # Remove all tabs and newlines and bad stuff\n line = re.sub('\\s+', '', line)\n # Check if this is formatted code.\n if line.find('```') >= 0:\n print(line.find('```'))\n print(\"This code is fine, probably\")\n _is_good_code = True\n return\n # Check for code-like stuffs :D\n else:\n _check_last_character(index, line, ';')\n _check_last_character(index, line, '{')\n _check_last_character(index, line, '}')\n _check_last_character(index, line, ')')", "def test_reformat_paragraph_new_code_6_of_8(self):\n before_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n after_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"4.0\", \"4.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def Synopsis(self, line):\n nest = 0 # [...] nesting level.\n no_split = 0 # buf[no_split:i] should not be split across lines.\n # String append on buf used below because of no_split lookbehind.\n buf = ' ' * self._indent[0]\n n = len(buf) + 1\n i = 0\n while i < len(line):\n c = line[i]\n if c == self._csi_char:\n control_len = self._attr.GetControlSequenceLen(line[i:])\n if control_len:\n j = i\n i += control_len\n buf += line[j:i]\n continue\n if c == '[':\n # [...] nesting.\n nest += 1\n if nest == 1:\n # A new [...] group - don't split until the end of the group.\n no_split = len(buf)\n elif c in [']', ' ']:\n if c == ']':\n nest -= 1\n if not nest:\n # Outside [...]. OK to split at this point if needed.\n if n >= self._width:\n # Split the line up to no_split, eliminate trailing space and write\n # the line up to no_split.\n n = no_split\n while n > 0 and buf[n - 1] == ' ':\n n -= 1\n self._out.write(buf[:n] + '\\n')\n # Reset indentation for the next line which will start at no_split.\n buf = ' ' * self._indent[0] * 2 + buf[no_split:]\n n = len(buf) + 1\n elif c == ' ':\n # Space outside [...]. Set a new split point.\n no_split = len(buf)\n if c == ' ' and buf and buf[-1] == ' ':\n # Collapse adjacent spaces to one space.\n i += 1\n continue\n buf += c\n n += 1\n i += 1\n self._out.write(buf + '\\n\\n')", "def test_reformat_paragraph_new_code_4_of_8(self):\n before_b = \"\"\"\\\n - Point 1. xxxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 11.\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n after_b = \"\"\"\\\n - Point 1. xxxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 11.\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def reformat_code(code):\n if code.startswith('\\n'):\n code = code[1:]\n return dedent(code)", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def Example(self, line):\n self._fill = self._indent[self._level] + self._INDENT\n self._out.write(' ' * self._fill + line + '\\n')\n self._blank = False\n self._fill = 0", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def change_code(code):\n if len(str(code)) == 9:\n code = str(0) + str(code)\n else:\n code = str(code)\n return code", "def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)", "def test_match_multiple_brackets_right_context():\r\n runmatch(lcode)", "def code_format(self):\n return \"^\\\\d{%s}$\" % self._digits", "def basic_char_setup( self ):\n\t\tsmall_bar = 3 # number of points per bar\n\t\twide_bar = round(small_bar * 2.25,0) # 2.25 x small_bar\n\t\tdpl = 50 # dots per line 300dpi/6lpi = 50dpl\n\t\tself._nb = bytes( self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ( '*c%02ia%ib0P' % (small_bar, self.bc_height*dpl) ) + \n\t\t\t\t\t\t self.owner.PRINTER_ESC + \n\t\t\t\t\t\t (\"*p+%02iX\" % small_bar) )\n\t\tself._wb = bytes( self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ('*c%02ia%ib0P' % (wide_bar, self.bc_height*dpl) )+\n\t\t\t\t\t\t self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ('*p+%02iX' % wide_bar ) )\n\t\tself._ns = bytes( self.owner.PRINTER_ESC + ( '*p+%02iX' % small_bar ) )\n\t\tself._ws = bytes( self.owner.PRINTER_ESC + ( '*p+%02iX' % wide_bar ) )\n \n\t\t# DONE nb = bc39_esc+\"*c\"+TRANSFORM(small_bar,'99')+\"a\"+Alltrim(STR(bc39_height*dpl))+\"b0P\"+bc39_esc+\"*p+\"+TRANSFORM(small_bar,'99')+\"X\"\n\t\t# DONE wb = bc39_esc+\"*c\"+TRANSFORM(wide_bar,'99')+\"a\"+Alltrim(STR(bc39_height*dpl))+\"b0P\"+bc39_esc+\"*p+\"+TRANSFORM(wide_bar,'99')+\"X\"\n\t\t# DONE ns = bc39_esc+\"*p+\"+TRANSFORM(small_bar,'99')+\"X\"\n\t\t# DONE ws = bc39_esc+\"*p+\"+TRANSFORM(wide_bar,'99')+\"X\"\n \n\t\t# adjust cusor position to start at top of line and return to bottom of line\n\t\tself._bc_start = bytes( self.owner.PRINTER_ESC + '*p-50Y' )\n\t\tself._bc_end = bytes( self.owner.PRINTER_ESC + '*p+50Y' )\n\t\t# DONE bc39_start = bc39_esc+\"*p-50Y\"\n\t\t# DONE bc39_END = bc39_esc+\"*p+50Y\"\n\n\t\t# setup the structure allowing to print the code codebar section for various LETTERS\n\t\tself._char39 = { u'1' : 'wb+ns+nb+ws+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'2' : 'nb+ns+wb+ws+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'3' : 'wb+ns+wb+ws+nb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'4' : 'nb+ns+nb+ws+wb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'5' : 'wb+ns+nb+ws+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'6' : 'nb+ns+wb+ws+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'7' : 'nb+ns+nb+ws+nb+ns+wb+ns+wb' , \n\t\t\t\t\t\t u'8' : 'wb+ns+nb+ws+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'9' : 'nb+ns+wb+ws+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'0' : 'nb+ns+nb+ws+wb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'A' : 'wb+ns+nb+ns+nb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'B' : 'nb+ns+wb+ns+nb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'C' : 'wb+ns+wb+ns+nb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'D' : 'nb+ns+nb+ns+wb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'E' : 'wb+ns+nb+ns+wb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'F' : 'nb+ns+wb+ns+wb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'G' : 'nb+ns+nb+ns+nb+ws+wb+ns+wb' , \n\t\t\t\t\t\t u'H' : 'wb+ns+nb+ns+nb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'I' : 'nb+ns+wb+ns+nb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'J' : 'nb+ns+nb+ns+wb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'K' : 'wb+ns+nb+ns+nb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'L' : 'nb+ns+wb+ns+nb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'M' : 'wb+ns+wb+ns+nb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'N' : 'nb+ns+nb+ns+wb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'O' : 'wb+ns+nb+ns+wb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'P' : 'nb+ns+wb+ns+wb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'Q' : 'nb+ns+nb+ns+nb+ns+wb+ws+wb' , \n\t\t\t\t\t\t u'R' : 'wb+ns+nb+ns+nb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'S' : 'nb+ns+wb+ns+nb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'T' : 'nb+ns+nb+ns+wb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'U' : 'wb+ws+nb+ns+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'V' : 'nb+ws+wb+ns+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'W' : 'wb+ws+wb+ns+nb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'X' : 'nb+ws+nb+ns+wb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'Y' : 'wb+ws+nb+ns+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'Z' : 'nb+ws+wb+ns+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'-' : 'nb+ws+nb+ns+nb+ns+wb+ns+wb' , \n\t\t\t\t\t\t u'.' : 'wb+ws+nb+ns+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u' ' : 'nb+ws+wb+ns+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'*' : 'nb+ws+nb+ns+wb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'$' : 'nb+ws+nb+ws+nb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'/' : 'nb+ws+nb+ws+nb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'+' : 'nb+ws+nb+ns+nb+ws+nb+ws+nb' , \n\t\t\t\t\t\t u'%' : 'nb+ns+nb+ws+nb+ws+nb+ws+nb' }", "def fix_line(line):\n line = line.strip()\n if line.startswith('#LOC'):\n return \"#LOC\\tSVTYPE\\tDIFF\\tFIXED\"\n loc, svtype, instr_orig = line.split('\\t')\n instructions = parse_instr_set(instr_orig, loc)\n\n if upper(svtype) == 'ALUJ':\n dna = ALUJ\n elif upper(svtype) == 'ALUS':\n dna = ALUS\n elif upper(svtype) == 'ALUY':\n dna = ALUY\n elif upper(svtype) == 'LINE1':\n dna = LINE1\n else:\n raise ValueError(\"Invalid svtyp:%s\" % svtype)\n\n for instr in instructions:\n dna = apply_instr(instr, dna, \"%s %s\" % (loc, svtype))\n\n line = \"%s\\t%s\" % (line, dna)\n return line", "def highlight_source(linenumber, index, lines, offset=None):\n # The following if statements are left-over diagnostic\n # from the hack to integrate into Idle.\n # they are harmless tests which could potentially be useful.\n if lines is None:\n return \"\", \"\"\n if index is None:\n print(\"problem in highlight_source(): index is None\")\n index = 0\n\n # The weird index arithmetic below is based on the information returned\n # by Python's inspect.getinnerframes()\n\n new_lines = []\n problem_line = \"\"\n nb_digits = len(str(linenumber + index))\n no_mark = \" {:%d}: \" % nb_digits\n with_mark = \" -->{:%d}: \" % nb_digits\n if offset is not None:\n offset_mark = \" \" * (8 + nb_digits + offset) + \"^\"\n i = linenumber - index\n\n for line in lines:\n if i == linenumber:\n num = with_mark.format(i)\n problem_line = line\n new_lines.append(num + line.rstrip())\n if offset is not None:\n new_lines.append(offset_mark)\n break\n else:\n num = no_mark.format(i)\n new_lines.append(num + line.rstrip())\n i += 1\n return \"\\n\".join(new_lines), problem_line", "def parse_postalCA(self):\n \n index = self.index\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal = self.words[index]['word']\n index += 1\n if index == self.length:\n return None, 0\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal += self.words[index]['word']\n \n return postal, 2", "def highlight_lines(self) -> str:\n\n info = self._info\n buffer = info.buffer\n\n startl, endl = info.line, info.endline\n startp, endp = self.get_text_positions()\n\n above_lines = strip_newlines(buffer.get_lines(max(startl - 5, 0), startl - 1))\n below_lines = strip_newlines(buffer.get_lines(endl + 1, endl + 5))\n\n source = list(strip_newlines(self._info.text_lines()))\n\n red = colorama.Fore.RED\n white = colorama.Fore.WHITE\n normal = colorama.Style.NORMAL\n reset = colorama.Style.RESET_ALL + colorama.Fore.RESET\n dim = colorama.Style.DIM\n bright = colorama.Style.BRIGHT\n\n def make_red(s):\n return reset + red + s + white\n\n def make_dim(s):\n return reset + dim + s + normal\n\n def make_bright(s):\n return reset + bright + s + normal\n\n line_pad = \" \" * 5 # 5 chars are used by the linecount that need to be padded on the arrows\n\n def fmtr(counter):\n if len(source) == 1:\n # start and end on same line, only need simple fmt\n yield add_line_once(source[0], counter)\n if startp == endp: # only emit single carat when the error is a single character\n yield make_red(line_pad + f\"{'^':>{startp}}\")\n else:\n width = (endp - startp) - 1 # leave space for carats + off by one\n separator = '-' * width\n yield make_red(line_pad + f\"{'^':>{startp}}{separator}^\")\n else:\n width = (len(source[0]) - startp)\n separator = '-' * width\n yield add_line_once(source[0], counter)\n yield make_red(line_pad + f\"{'^':>{startp}}{separator}\")\n for i in source[1:-1]:\n yield add_line_once(i, counter)\n yield make_red(line_pad + '-' * len(i))\n width = endp - 1 # - len(source[endl])\n separator = '-' * width\n yield add_line_once(source[-1], counter)\n yield make_red(line_pad + f\"{separator}^\")\n\n line_counter = count(max(startl - 5, 1))\n\n above_lines = \"\\n\".join(add_line_count(above_lines, line_counter))\n if above_lines:\n above_lines += \"\\n\"\n error_lines = \"\\n\".join(fmtr(line_counter))\n\n below_lines = \"\\n\".join(add_line_count(below_lines, line_counter))\n if below_lines:\n below_lines = \"\\n\" + below_lines\n\n return make_dim(above_lines) + make_bright(error_lines) + make_dim(below_lines)", "def get_keymap(lines: str):\n parsed = [line for line in lines.split(\"\\n\") if line]\n\n start = end = 0\n for i, line in enumerate(parsed):\n if \"qmkformat start\" in line:\n start = i + 1\n if \"qmkformat end\" in line:\n end = i\n break\n\n layout = \"\".join(parsed[start:end])\n return layout[layout.find(\"{\") + 1 : layout.find(\"}\")]", "def _clean_code(self) -> str:\n if len(self.code) >= self.BARCODE_LENGTH:\n code = self.code[:self.BARCODE_LENGTH]\n\n # Calculate the checksum digit\n check_digit = self.calculate_checksum(code)\n return code + str(check_digit)", "def test_reformat_paragraph_new_code_7_of_8(self):\n before_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n after_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.11\", \"2.11\"),\n after_sel=(\"3.1\", \"3.1\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def dummy_junction13():\n return 'junction:chr1:176-299:+'", "def _do_code_blocks(self, text):\r\n code_block_re = re.compile(r'''\r\n (?:\\n\\n|\\A\\n?)\r\n ( # $1 = the code block -- one or more lines, starting with a space/tab\r\n (?:\r\n (?:[ ]{%d} | \\t) # Lines must start with a tab or a tab-width of spaces\r\n .*\\n+\r\n )+\r\n )\r\n ((?=^[ ]{0,%d}\\S)|\\Z) # Lookahead for non-space at line-start, or end of doc\r\n ''' % (self.tab_width, self.tab_width),\r\n re.M | re.X)\r\n return code_block_re.sub(self._code_block_sub, text)", "def flag_tag_sequence(code: List[str]) -> str:\r\n\r\n tags = \"\".join([chr(ord(c.lower()) + OFFSET_TAG) for c in code])\r\n return BLACKFLAG + tags + CANCELTAG", "def fixIndentation(code, newIndent, governingLine=0):\n\tcodeLines = [line for line in code.split(\"\\n\")]\n\treserved, codeLines = codeLines[:governingLine], codeLines[governingLine:]\n\twhile codeLines:\n\t\tif codeLines[0].strip():\n\t\t\tfirstIndent = re.match(\"^\\s*\", codeLines[0]).group()\n\t\t\tbreak\n\t\telse:\n\t\t\treserved.append(codeLines.pop(0))\n\tif codeLines:\n\t\tfixedLines = []\n\t\tfor line in codeLines:\n\t\t\tif not line.strip():\n\t\t\t\tfixedLines.append(newIndent)\n\t\t\telse:\n\t\t\t\tif line[:len(firstIndent)]!=firstIndent:\n\t\t\t\t\traise Error(\"Bad indent in line %s\"%repr(line))\n\t\t\t\tfixedLines.append(newIndent+line[len(firstIndent):])\n\telse:\n\t\tfixedLines = codeLines\n\treserved = [newIndent+l.lstrip() for l in reserved]\n\treturn \"\\n\".join(reserved+fixedLines)", "def area(self):", "def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')", "def test_reformat_paragraph_new_code_1_of_8(self):\n before_b = \"\"\"\\\n #@@pagewidth 40\n '''\n docstring.\n '''\n \"\"\"\n after_b = \"\"\"\\\n #@@pagewidth 40\n '''\n docstring.\n '''\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def __init__(self, position, left_cont, right_cont, line):\r\n self.line = line\r\n self.position = position\r\n self.left_cont = left_cont\r\n self.right_cont = right_cont\r\n self.end_pattern = re.compile(r'[.?!]\\s[A-ZА-Я]?')\r\n self.start_pattern = re.compile(r'[A-ZА-Я]*[.?!]')", "def dummy_junction14():\n return \"junction:chr1:176-324:+\"", "def _filler(self, char, amount):\n if not isinstance(char, ANSIString):\n line = char * amount\n return ANSIString(\n char * amount,\n code_indexes=[],\n char_indexes=list(range(0, len(line))),\n clean_string=char,\n )\n try:\n start = char._code_indexes[0]\n except IndexError:\n start = None\n end = char._char_indexes[0]\n prefix = char._raw_string[start:end]\n postfix = char._raw_string[end + 1 :]\n line = char._clean_string * amount\n code_indexes = [i for i in range(0, len(prefix))]\n length = len(prefix) + len(line)\n code_indexes.extend([i for i in range(length, length + len(postfix))])\n char_indexes = self._shifter(list(range(0, len(line))), len(prefix))\n raw_string = prefix + line + postfix\n return ANSIString(\n raw_string, clean_string=line, char_indexes=char_indexes, code_indexes=code_indexes\n )", "def __init__(self):\n super(LineStart, self).__init__(r\"^\", regex.MULTILINE)", "def bad_underline_length(): # noqa: D416", "def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n # print line[-1]\n return line + \" .\"", "def solve(data):\n # Starting position\n x = 0\n y = 2\n code = ''\n for line in data:\n for char in line:\n if char == 'R':\n x = 4 if x + 1 > 4 else x + 1\n if KEYPAD[x][y] == 0:\n x -= 1\n elif char == 'L':\n x = 0 if x - 1 < 0 else x - 1\n if KEYPAD[x][y] == 0:\n x += 1\n elif char == 'D':\n y = 0 if y - 1 < 0 else y - 1\n if KEYPAD[x][y] == 0:\n y += 1\n elif char == 'U':\n y = 4 if y + 1 > 4 else y + 1\n if KEYPAD[x][y] == 0:\n y -= 1\n code += str(KEYPAD[x][y])\n return (code)", "def area(self):\n\n return 'Hexagon area'", "def build_initial_line(self):\n # self.init = SimEngine.gui_get('init')\n # if self.init == 'Random':\n # # Set the initial row to random 1/0.\n # # You complete this line.\n # line = \"\".join(random.choice('10') for i in range(self.ca_display_size))\n # else:\n # line = [0] * self.ca_display_size\n # col = 0 if self.init == 'Left' else \\\n # CA_World.ca_display_size // 2 if self.init == 'Center' else \\\n # CA_World.ca_display_size - 1 # self.init == 'Right'\n # line[col] = 1\n # return line\n\n\n #if justification is used to display it is unnecessary to make the initial line as wide as the gui except\n #when random\n self.init = SimEngine.gui_get('init')\n if self.init == 'Random':\n # Set the initial row to random 1/0.\n # You complete this line.\n line = \"\".join(random.choice('10') for i in range(self.ca_display_size))\n\n # check to see if a zero needs to be added to either end\n # useful for the rule 001 or 100\n if line[:2] == [0, 1]:\n line.insert(0, 0)\n\n if line[-2:] == [1, 0]:\n line.append(0)\n\n return line\n\n else:\n return [0,0,1,0,0]", "def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str:\n with open(file_path) as text_file:\n # Highlight code\n code = text_file.read()\n code_lines = code.splitlines()\n # Prepend line number\n code_lines = [\n f\">{lno:3} | {line}\" if line_no == lno else f\"{lno:4} | {line}\"\n for lno, line in enumerate(code_lines, 1)\n ]\n # # Cut out the snippet\n start_line_no = max(0, line_no - context_lines_count - 1)\n end_line_no = line_no + context_lines_count\n code_lines = code_lines[start_line_no:end_line_no]\n # Join lines\n code = \"\\n\".join(code_lines)\n return code", "def __wrapAreas(self, areas):\n areas_list = areas.strip().split('\\n')\n areas_html = ''\n for value in areas_list:\n if len(value)>0:\n areas_html = areas_html + '<span class=\"area-list-item\">' + value +'</span>'\n \n return areas_html", "def print_pos(pos):\n # TO DO: EXCLUDE FIRST LINE\n s = \"%BLOCK POSITIONS_FRAC\\n\" + str(pos) + \"\\n%ENDBLOCK POSITIONS_FRAC\"\n return s", "def num_48():\n frmt = \"\"\"\n :Input formatting option ({}) ...\n :{}\\n\n :Subtitle...\n :{}\\n\n :An array, double indent..\n {}\\n\n :Final line\n \"\"\"\n pad = \":...\"\n pad2 = \" \"\n a = \"Section title...\"\n b = \"{}Text indented by 4 spaces\".format(pad2)\n c = np.arange(4*5).reshape(4, 5)\n # f = \"\\n\".join([i.strip() for i in frmt.split(\":\")])\n # print(f.format(1, a, b, c))\n # print(f.format(2, a, b, indent(str(c), pad2)))\n f = dedent(frmt).format(3, a, b, indent(str(c), pad2*2))\n print(f)\n print(indent(f, pad))\n return frmt, c", "def area(self):\n ...", "def black_format(code_path: Path, line_length=90) -> None:\n check = False\n diff = False\n\n write_back = WriteBack.from_configuration(check=check, diff=diff, color=True)\n report = Report(check=check, diff=diff, quiet=False, verbose=True)\n mode = Mode(\n target_versions={TargetVersion.PY37},\n line_length=line_length,\n is_pyi=False,\n is_ipynb=False,\n string_normalization=True,\n magic_trailing_comma=False,\n experimental_string_processing=False,\n )\n\n reformat_one(\n src=code_path, fast=True, write_back=write_back, mode=mode, report=report\n )\n # TODO: explore how to format a string and return the formatted string\n # (i.e. with reformat_code)", "def uCSIsBraillePatterns(code):\n ret = libxml2mod.xmlUCSIsBraillePatterns(code)\n return ret", "def parse_code(code):\n result = []\n lines = code.splitlines()\n end = len(lines)\n for index, line in enumerate(lines):\n next_index = index + 1\n if line.startswith('>>>') and next_index != end:\n block = [line[4:]]\n while next_index != end and lines[next_index].startswith('...'):\n block.append(lines[next_index][4:])\n next_index += 1\n while (next_index != end and not\n any(lines[next_index].startswith(s)\n for s in ('>>>', 'Trace', ' File'))):\n next_index += 1\n\n if next_index != end and lines[next_index].startswith('>>>'):\n result.append('\\n'.join(block))\n\n if lines[-1].startswith('>>>'):\n result.append(lines[-1][4:])\n return '\\n'.join(result)", "def cleanCode(si):\n while len(si) < 4: si += 'x' # fill out the length of the code string\n so = \"\"\n for ii in range(4):\n if si[ii] in \"1234567890abcdefxyABCDEFX\": # check if this is a valid character\n# [0-9a-fA-FxyX]\n so += si[ii] # valid character\n else:\n so += \"xxxx\" # fill the string with 'x'\n ii = 4 # hit a bad one, stop checking string\n return so[:4] # clean code is 4 characters long", "def format_stub_annotation(frag):\n stack = []\n base = ['Hex', 'HexNAc']\n for k in sorted(frag.glycosylation, key=lambda x: x.mass(), reverse=True):\n if k not in base:\n base.append(k)\n for k in base:\n v = frag.glycosylation[k]\n if not v:\n continue\n stack.append(f\" {monosaccharide_to_symbol[k]}{v}\")\n stack.append(\"Pep\")\n return '\\n'.join(stack)", "def clean_code(code):\n return code", "def bracketed (self,phrase):\r\n\r\n level = 0\r\n left_point = None\r\n right_point = None\r\n \r\n\r\n for count,char in enumerate(phrase):\r\n\r\n if char == '(':\r\n if level == 0:\r\n left_point = count\r\n level+=1\r\n if char == ')':\r\n level-=1\r\n if level == 0:\r\n right_point = count\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False", "def fix_spaces(text):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"19\")\n # END OF SOLUTION", "def block_code(self, code, lang=None):\n code = code.rstrip('\\n')\n return [\"<code>\"] + code", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def _format_line( self, side, flag, linenum, text ):\n\t\ttry:\n\t\t\tline_id = '%d' % linenum\n\t\texcept TypeError:\n\t\t\t# handle blank lines where linenum is '>' or ''\n\t\t\tline_id = ''\n\n\t\ttext = text.rstrip()\n\n\t\tif not self.line_numbers:\n\t\t\treturn text\n\t\treturn '%s %s' % ( self._rpad( line_id, 8 ), text )", "def filter_code_block(inp: str) -> str:\n\n if inp.startswith(\"```\") and inp.endswith(\"```\"):\n inp = inp[3:][:-3]\n elif inp.startswith(\"`\") and inp.endswith(\"`\"):\n inp = inp[1:][:-1]\n\n return inp", "def replace_and_print(line_list):\n line_list = line_list.replace('0', ' ')\n line_list = line_list.replace('1', u\"\\u2588\")\n print(line_list)", "def defineBLOCKSECTION(f,layernamelist):\r\n feilinname_lineheight=2.5\r\n #note_lineheigh=4\r\n layercount=0\r\n feilin_name_pos=[70.0+globalconfig.CUTLINE_X_OFFSET,185.0+globalconfig.CUTLINE_Y_OFFSET]\r\n #note_pos=[190.0+globalconfig.CUTLINE_X_OFFSET,80.0+globalconfig.CUTLINE_Y_OFFSET]\r\n f.write(\"0\\nSECTION\\n2\\nBLOCKS\\n\") #绘制块定义\r\n f.write(\"0\\nBLOCK\\n8\\n0\\n2\\nROUND_1\\n70\\n0\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n\")\r\n f.write(\"0\\nPOLYLINE\\n8\\n0\\n5\\n3F\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n70\\n1\\n\")\r\n f.write(\"40\\n0.04\\n41\\n0.04\")\r\n f.write(\"\\n0\\nVERTEX\\n5\\n406\\n8\\n0\\n10\\n-0.02\\n20\\n0.0\\n30\\n0.0\\n42\\n1.0\")\r\n f.write(\"\\n0\\nVERTEX\\n5\\n407\\n8\\n0\\n10\\n0.02\\n20\\n0.0\\n30\\n0.0\\n42\\n1.0\\n0\\nSEQEND\\n5\\n408\\n8\\n0\\n\")\r\n f.write(\"0\\nENDBLK\\n5\\n43\\n8\\n0\\n\") \r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n f.write(\"0\\nBLOCK\\n8\\n0\\n2\\n*U\"+str(layercount)+\"\\n\") \r\n f.write(\"70\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n\") \r\n f.write(\"0\\nTEXT\\n5\\n46\\n8\\n\"+layername+\"\\n6\\nCONTINUOUS\\n10\\n\"+str(feilin_name_pos[0])+\"\\n20\\n\"+str(feilin_name_pos[1])+\"\\n30\\n0.0\\n\")\r\n f.write(\"40\\n\"+str(feilinname_lineheight)+\"\\n1\\n\"+globalconfig.NAME_OF_FEILIN+\"-\"+layername+\"\\n0\\nENDBLK\\n5\\n47\\n8\\n\"+layername+\"\\n\")\r\n \r\n# layercount=layercount+1\r\n# f.write(\"0\\nBLOCK\\n8\\n0\\n2\\n*U\"+str(layercount)+\"\\n\") \r\n# f.write(\"70\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n\") \r\n# f.write(\"0\\nTEXT\\n5\\n46\\n8\\n\"+layername+\"\\n6\\nCONTINUOUS\\n10\\n\"+str(note_pos[0])+\"\\n20\\n\"+str(note_pos[1])+\"\\n30\\n0.0\\n\")\r\n# f.write(\"40\\n\"+str(note_lineheigh)+\"\\n1\\n\")\r\n# f.write(\"\")\r\n# f.write(\"\\n0\\nENDBLK\\n5\\n47\\n8\\n0\\n\") \r\n \r\n f.write(\"0\\nENDSEC\\n\")", "def remove_line_continuations(code):\n # pat = r\"('.*)(\\.\\.\\.)(.*')\"\n # code = re.sub(pat, r\"\\g<1>\\g<3>\", code, flags=re.MULTILINE)\n\n pat = r\"^([^%'\\\"\\n]*)(\\.\\.\\..*\\n)\"\n code = re.sub(pat, r\"\\g<1>\", code, flags=re.MULTILINE)\n return code", "def f77linebreaks(instr):\n outstr = ''\n for l in instr.splitlines():\n if(len(l.strip())==0): # empty line\n outstr += l+'\\n'\n elif(l[0]!=' ' or l.lstrip()[0]=='!'): # comment line, never touch those\n outstr += l+'\\n'\n else:\n if(len(l) > 7 and l[0:7].strip().isnumeric()): # workaround for parser bug: numeric line labels are printed with an incorrect blank space in column 1. Remove this.\n l = l[0:7].strip().ljust(7) + l[7:]\n while(len(l) > 72):\n outstr += l[0:71]+'\\n'\n l = ' *'+l[71:]\n outstr += l+'\\n'\n return outstr", "def start_with_the_beggining(rna: str):\n return 0", "def considerAreasText(self):\n global consider_area_text\n consider_area_text = MathTex(\"\\\\text{Consider the following areas:}\").scale(0.8).shift(RIGHT*3.55)\n\n self.play(Write(consider_area_text))\n self.play(consider_area_text.animate.to_edge(UP))", "def codespan(self, text):\n if isinstance(text, str):\n text = [' {} '.format(text)]\n return [MdStyleInstructionText('codespan')] + text", "def dummy_junction24():\n return 'junction:chr1:251-399:+'", "def print_asmline(self,adr,mode,op_bytes, ins, op_str):\r\n MODE = \"T\" if mode else \"A\"\r\n line = (\r\n highlight(f\"{ins:<6} {op_str:<20}\", self.asm_hl, self.asm_fmt)\r\n .decode()\r\n .strip(\"\\n\")\r\n )\r\n if len(op_bytes) == 4:\r\n op_bytes = f\" {int(op_bytes, 16):04X} \"\r\n else:\r\n op_bytes = f\" {int(op_bytes, 16):08X} \"\r\n\r\n if self.baseAddr:\r\n print(\"\\n\" + MODE + self.color(\"YELLOW\", f\" {adr - self.baseAddr:08X} {adr:08X}\") + self.color(\"RED\", op_bytes) + line, end=\";\")\r\n else:\r\n print(\"\\n\" + MODE + self.color(\"YELLOW\",f\" {adr:08X}\") + self.color(\"RED\", op_bytes) + line, end=\";\")", "def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"", "def test_reformat_paragraph_new_code_2_of_8(self):\n before_b = \"\"\"\\\n #@@pagewidth 40\n '''\n docstring.\n '''\n \"\"\"\n after_b = \"\"\"\\\n #@@pagewidth 40\n '''\n docstring.\n '''\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"2.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"", "def highlightBlock(self, text):\n\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = expression.cap(nth).length()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n self.setCurrentBlockState(0)", "def at_least(length:int) -> str:\n return f\"{{{length},}}\"", "def end_marker(data):\n if ord(data[-1]) == 10 and data[-2] == '}':\n return True" ]
[ "0.59898686", "0.59150344", "0.56030583", "0.55835414", "0.5426462", "0.54129606", "0.53990805", "0.5327246", "0.52900195", "0.52449477", "0.51972073", "0.51676416", "0.5162044", "0.515133", "0.51285326", "0.5108765", "0.5095171", "0.50828093", "0.5079937", "0.50754184", "0.50461435", "0.50208545", "0.50154287", "0.50128955", "0.49771467", "0.4967152", "0.49436456", "0.4937863", "0.49339172", "0.4926151", "0.4914177", "0.49089313", "0.4905142", "0.48814473", "0.48714778", "0.48624307", "0.4857231", "0.48479268", "0.48459697", "0.48458177", "0.48166344", "0.48163792", "0.48118064", "0.48096693", "0.48052964", "0.47826815", "0.47795582", "0.47681382", "0.4764747", "0.47643307", "0.47638905", "0.47624126", "0.4758503", "0.47526073", "0.47479612", "0.47319126", "0.47300085", "0.47256902", "0.47244522", "0.4712547", "0.47031388", "0.46949458", "0.4693732", "0.4689522", "0.46828917", "0.46776286", "0.4676025", "0.46743605", "0.46721587", "0.46701404", "0.4668848", "0.46636954", "0.46623388", "0.46507794", "0.46454918", "0.46434814", "0.4642866", "0.4638105", "0.46374726", "0.46328485", "0.4624666", "0.4623095", "0.4621445", "0.46195987", "0.4617885", "0.46173114", "0.46135902", "0.46133807", "0.46129793", "0.4608395", "0.46052733", "0.4601109", "0.45962706", "0.45883194", "0.45839164", "0.45836893", "0.45836613", "0.4582513", "0.45800698", "0.45797476" ]
0.5302146
8
Telemarketers' numbers have no parentheses or space, but they start with the area code 140.
def ismobile(number): if number[0] in ['7', '8', '9']: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area_code(self):\n return self.number[:3]", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def area(lado):\n\treturn \"El area de un cuadrado es \"+ str(lado*lado)", "def plurals(num):\r\n if num != 1:\r\n return ('s')\r\n return ('')", "def test_tax_age_bracket_above_65(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertNotEqual(97, net_pay_age)", "def _is_num(w):\n symbols = list(w)\n for s in symbols:\n if s in string.digits:\n return '<NUM>'\n return w", "def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def getKnownTelemarketers():\n telemarketers = set()\n # Get known telemarketer numbers. They start with \"140\"\n for tm in calls:\n if tm[0][:3] == \"140\":\n telemarketers.add(tm[0])\n\n # See if telemarketer numbers receive any calls. If they\n # do remove them from the list.\n for tm in calls:\n if tm[1][:3] == \"140\":\n telemarketers.remove(tm[1])\n\n return telemarketers", "def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r", "def preberi_pot(ukazi):\r\n x_k = 0\r\n y_k = 0\r\n\r\n p = 1\r\n pot = []\r\n pot += (0, 0),\r\n\r\n ukazi = ukazi.split('\\n')\r\n\r\n for m in ukazi:\r\n\r\n if m == \"DESNO\":\r\n if p == 1:\r\n p = 4\r\n else:\r\n p -= 1\r\n elif m == \"LEVO\":\r\n if p == 4:\r\n p = 1\r\n else:\r\n p += 1\r\n elif m.isalnum():\r\n if p == 3:\r\n y_k += int(m)\r\n elif p == 4:\r\n x_k += int(m)\r\n elif p == 1:\r\n y_k -= int(m)\r\n elif p == 2:\r\n x_k -= int(m)\r\n pot += (x_k, y_k),\r\n\r\n return pot", "def test_150612_ptsdy1_3(self):\n spc = parser(get_file('PTSDY1_3.txt'))\n outlook = spc.get_outlook('CATEGORICAL', 'SLGT')\n self.assertAlmostEqual(outlook.geometry.area, 53.94, 2)", "def grab_area_code(phone_number):\r\n #number of form +1 XXX XXX XXXX (this should be the form get_twilio_client provides)\r\n if \"+1\" == phone_number[:2]:\r\n return phone_number[2:5]\r\n # number of form 1 XXX XXX XXXX\r\n if len(phone_number) == 11 and phone_number[0] == '1':\r\n return phone_number[1:4]\r\n # number of form XXX XXX XXXX\r\n if len(phone_number) == 10:\r\n return phone_number[:3]\r\n raise BadPhoneNumberError('\"%s\" is an invalid phone number.' % phone_number)", "def pentakis(self):\n return self.nlegomena(5)", "def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def coursecode(el):\n txt = text(el)\n return re.sub(r\"\\s*\\[\\d+\\]$\", \"\", txt, re.UNICODE)", "def spell_triade(triade):\n spelled_triade = ''\n\n if triade[1] == 1:\n\n spelled_triade += numerals_dictionary[triade[1] * 10 + triade[2]]\n\n elif triade[1] == 0:\n\n spelled_triade += numerals_dictionary[triade[2]]\n\n else:\n\n if triade[2] == 0:\n\n spelled_triade = numerals_dictionary[triade[1] * 10]\n\n else:\n\n spelled_triade = '-'.join((numerals_dictionary[triade[1] * 10], numerals_dictionary[triade[2]]))\n\n if triade[0] in range(1, 10):\n\n hundreds = ' '.join((numerals_dictionary[triade[0]], 'hundred'))\n\n if spelled_triade != 'zero':\n\n spelled_triade = ' and '.join((hundreds, spelled_triade))\n\n else:\n\n spelled_triade = hundreds\n\n return spelled_triade", "def fix_teen(n):\n if 13<=n<=14 or 17<=n<=19:\n return 0\n else:\n return n", "def phoneDisplay(number):\n return number[0:3] + \"&nbsp;&middot;&nbsp;\" + number[3:6] + \"&nbsp;&middot;&nbsp;\" + number[6:10]", "def test_trapezoid_area(self):\n self.assertEqual(16, trapezoid_area(\n self.values['base_minor'], self.values['base_major'], self.values['height']))", "def _format_details_of_charges_71A(self, val):\n return val", "def test_parens_tall(self):\r\n self._each_parens(r'\\left(x^y\\right)', 'x^y', '(', tall=True)", "def minimum(self):\n return self.cleaning('Mínimo')", "def test_number_simple(self):\r\n self.assertEquals(preview.latex_preview('3.1415'), '3.1415')", "def test_tax_age_bracket_65(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertNotEqual(95, net_pay_age)", "def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))", "def test_hasta_el_numeral(self):\n fwa = FakeWikiArchivo('abcd <a href=\"/wiki/foobar#xy\">FooBar</a> dcba')\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [(u'foobar', SCORE_PEISHRANC)])", "def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new", "def is_19_pandigital(n):\n return len(str(n)) == 9 and set(list(str(n))) == pan", "def identifyCounty(line):\n matches = re.findall('[a-zA-Z]', line)\n if len(matches) > 0 and ''.join(matches) != \"Total\":\n return True", "def koordinatpunkt( wktstring ):\r\n\r\n wkt_type = wktstring.split()[0]\r\n if 'Z' in wktstring[0:30].upper():\r\n wkt_type += ' Z'\r\n\r\n wktstring = re.sub( r'[a-zA-Z]+', '', wktstring)\r\n wktstring = re.sub( r'\\(+', '', wktstring) \r\n wktstring = re.sub( r'\\)+', '', wktstring)\r\n antall_koordinatpunkt = len( wktstring.split() )\r\n\r\n return (wkt_type, antall_koordinatpunkt)", "def tetrakis(self):\n return self.nlegomena(4)", "def code_format(self):\n return \"^\\\\d{%s}$\" % self._digits", "def test_number_sci_notation(self):\r\n self.assertEquals(\r\n preview.latex_preview('6.0221413E+23'),\r\n r'6.0221413\\!\\times\\!10^{+23}'\r\n )\r\n self.assertEquals(\r\n preview.latex_preview('-6.0221413E+23'),\r\n r'-6.0221413\\!\\times\\!10^{+23}'\r\n )", "def test_080731_invalid(self):\n spc = parser(get_file('PTSDY1_biggeom.txt'))\n # spc.draw_outlooks()\n outlook = spc.get_outlook('WIND', 'SIGN', 1)\n self.assertAlmostEquals(outlook.geometry.area, 15.82, 2)\n self.assertEquals(len(spc.warnings), 1)", "def parse_precision(p):\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max", "def clean_num(quote):\n for char in ROMAN:\n quote = quote.replace(*char)\n return quote", "def area(self):\n\n return 'Hexagon area'", "def test_greek(self):\r\n self.assertEquals(preview.latex_preview('pi'), r'\\pi')", "def areaTriangulo(base,altura):\n\n\treturn \"El area del triangulo es: \"+str((base*altura)/2)", "def test_unicode_parenthization():\n alpha = symbols('alpha')\n printer = SympyUnicodePrinter()\n printer.parenthesize(alpha, 0) == 'α'", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x", "def nm_152263_exons():\n return [(0, 234), (234, 360), (360, 494), (494, 612), (612, 683), (683, 759),\n (759, 822), (822, 892), (892, 971), (971, 7099)]", "def phoneNumberExtractor(self,data):\n\t\tdata = data.replace(\"\\r\", \" \")\n\t\tdata = data.replace(\"\\r\\n\", \" \")\n\n\t\t#first is identifying 10 digits code\n\t\tdata = data.split()\n\t\tresult = []\n\t\tfor word in data:\n\t\t\tres = None\n\t\t\tres = word if word.isdecimal() and len(word) == 10 and not res else res\n\t\t\tres = word[2:] if word.isdecimal() and len(word) == 12 and not res else res\n\t\t\tres = word[3:] if word[3:].isdecimal() and len(word) == 10 and not res else res\n\t\t\tif (\"(\" and \")\") in word or \"-\" in word:\n\t\t\t\tword = word.replace(\"(\",\"\")\n\t\t\t\tword = word.replace(\")\",\"\")\n\t\t\t\tword = word.replace (\"-\",\"\")\n\t\t\t\tres = word if(len(word) == 10) else None\n\t\t\tif res:\n\t\t\t\tresult.append(res)\n\t\t\t\tdel(res)\n\t\treturn set(result)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def parse_postalUS(self):\n \n index = self.index\n \n # US Postal Code\n if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():\n return None, 0\n postal = self.words[index]['word']\n \n if index + 1 < self.length:\n if self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():\n postal += '-' + self.words[index]['word']\n return postal, 3\n else:\n return postal, 1\n \n return postal, 1", "def test_tax_court_docket_number_extractor(self):\n\n test_pairs = (\n (\n \"\"\" 1 UNITED STATES TAX COURT REPORT (2018)\n \n \n \n UNITED STATES TAX COURT\n \n \n \n BENTLEY COURT II LIMITED PARTNERSHIP, B.F. BENTLEY, INC., TAX\n MATTERS PARTNER, Petitioner v.\n COMMISSIONER OF INTERNAL REVENUE, Respondent\n \n \n \n Docket No. 5393-04. Filed May 31, 2006.\n \n \n \n Nancy Ortmeyer Kuhn, for petitioner.\n \"\"\",\n \"5393-04\",\n ),\n (\n \"\"\"\n MICHAEL KEITH SHENK, PETITIONER v. COMMISSIONER\n OF INTERNAL REVENUE, RESPONDENT\n \n Docket No. 5706-12. Filed May 6, 2013.\n \n P was divorced from his wife, and their 2003 ‘‘Judgment of\n Absolute Divorce’’ provided that his ex-wife would have pri-\n mary residential custody of their three minor children. The\n judgment provided that the dependency exemption deductions\n for the three children would be divided between the two ex-\n spouses according to various conditions but did not provide\n that the ex-wife must execute in P’s favor a Form 8332,\n ‘‘Release of Claim to Exemption for Child of Divorced or Sepa-\n rated Parents’’. The children resided with P’s ex-wife for more\n than half of 2009, and P’s ex-wife did not execute in P’s favor\n any Form 8332 or equivalent document for any year. For 2009\n P timely filed a Federal income tax return on which he\n claimed dependency exemption deductions and the child tax\n credit for two of the children, consistent with his under-\n standing of the terms of the judgment, but he did not attach\n any Form 8332 to his return. He also claimed head-of-house-\n hold filing status. His ex-wife, the custodial parent, timely\n filed a Federal income tax return for 2009 on which she also\n \n 200\n \n \n \n \n VerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00001 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n \f (200) SHENK v. COMMISSIONER 201\n \n \n claimed two dependency exemption deductions, so that one\n child was claimed on both parents’ returns. R allowed to P the\n dependency exemption deduction for one of the children but\n disallowed his claim for the dependency exemption deduction\n for the child who had also been claimed by the custodial\n parent. At trial P contended he is entitled to a dependency\n exemption deduction for all three children. Held: Since the\n custodial parent did not execute, and P could not and did not\n attach to his return, any Form 8332 or equivalent release, P\n is not entitled under I.R.C. sec. 152(e)(2)(A) to claim the\n dependency exemption deduction or the child tax credit. Held,\n further, where both the custodial parent and the noncustodial\n parent have claimed for the same year a dependency exemp-\n tion deduction for the same child, a declaration signed by the\n custodial parent after the period of limitations for assess-\n ments has expired as to the custodial parent could not qualify\n under I.R.C. sec. 152(e)(2)(A), and therefore there is no reason\n to grant P’s request to leave the record open so that he may\n obtain and proffer such a declaration. Held, further, P is not\n entitled to head-of-household filing status under I.R.C. sec.\n 2(b)(1) nor to the child tax credit under I.R.C. sec. 24.\n \n Michael Keith Shenk, for himself.\n Shari Salu, for respondent.\n GUSTAFSON, Judge: The Internal Revenue Service (IRS)\n determined a deficiency of $3,136 in the 2009 Federal income\n tax of petitioner Michael Keith Shenk. Mr. Shenk petitioned\n this Court, pursuant to section 6213(a), 1 for redetermination\n of the deficiency. After Mr. Shenk’s concession that he\n received but did not report $254 in dividend income, the\n issue for decision is whether Mr. Shenk is entitled to a\n dependency exemption deduction for one of his children\n under section 151(c), a child tax credit for that child under\n section 24(a), and head-of-household filing status under sec-\n tion 2(b)(1). On these issues, we hold for the IRS.\n FINDINGS OF FACT\n \n The judgment of divorce\n Mr. Shenk was married to Julie Phillips, and they have\n three minor children—M.S., W.S., and L.S. They divorced in\n 2003. The family court’s ‘‘Judgment of Absolute Divorce’’ pro-\n 1 Unless otherwise indicated, all citations of sections refer to the Internal\n \n Revenue Code (26 U.S.C.) in effect for the tax year at issue, and all cita-\n tions of Rules refer to the Tax Court Rules of Practice and Procedure.\n \n \n \n \n VerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00002 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n \f 202 140 UNITED STATES TAX COURT REPORTS (200)\n \n \n vided: that Ms. Phillips was ‘‘awarded primary residential\n custody’’ of the parties’ three children; and that Mr. Shenk\n would be liable for child support payments; but that, as to\n dependency exemptions—\"\"\",\n \"5706-12\",\n ),\n )\n site = tax.Site()\n for q, a in test_pairs:\n results = site.extract_from_text(q)\n docket_number = results[\"Docket\"][\"docket_number\"]\n self.assertEqual(docket_number, a)\n print \"✓\", docket_number", "def get_no(data):\n no = re.search(\"[^a-zA-Z](n\\s*o\\s*\\.?\\s*[^a-zA-Z\\n,]+)\",\n data, re.IGNORECASE)\n # Check if the search result contains at least a digit\n if has_num(no.group()):\n return ''.join([num for num in no.group() if num.isdigit()])\n return '(nf)'", "def test_number_suffix(self):\r\n self.assertEquals(preview.latex_preview('1.618k'), r'1.618\\text{k}')", "def isfixline(number):\n if number[0] == '(':\n return True\n return False", "def test_power_parens(self):\r\n self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')", "def fix_greek_in_mathml(self, xml):\r\n def gettag(expr):\r\n return re.sub('{http://[^}]+}', '', expr.tag)\r\n\r\n for k in xml:\r\n tag = gettag(k)\r\n if tag == 'mi' or tag == 'ci':\r\n usym = unicode(k.text)\r\n try:\r\n udata = unicodedata.name(usym)\r\n except Exception:\r\n udata = None\r\n # print \"usym = %s, udata=%s\" % (usym,udata)\r\n if udata:\t\t\t# eg \"GREEK SMALL LETTER BETA\"\r\n if 'GREEK' in udata:\r\n usym = udata.split(' ')[-1]\r\n if 'SMALL' in udata:\r\n usym = usym.lower()\r\n #print \"greek: \",usym\r\n k.text = usym\r\n self.fix_greek_in_mathml(k)\r\n return xml", "def sanitize_ean13(ean13):\n if not ean13:\n return \"0000000000000\"\n ean13 = re.sub(\"[A-Za-z]\",\"0\",ean13);\n ean13 = re.sub(\"[^0-9]\",\"\",ean13);\n ean13 = ean13[:13]\n if len(ean13) < 13:\n ean13 = ean13 + '0' * (13-len(ean13))\n return ean13[:-1] + str(ean_checksum(ean13))", "def checkdia(tarea_mensual):\n\n if tarea_mensual == 1:\n return 1\n else:\n return 0", "def com_google_fonts_check_048(ttFont):\n from fontbakery.utils import get_glyph_name\n\n def getGlyphEncodings(font, names):\n result = set()\n for subtable in font['cmap'].tables:\n if subtable.isUnicode():\n for codepoint, name in subtable.cmap.items():\n if name in names:\n result.add(codepoint)\n return result\n\n if ttFont['post'].formatType == 3.0:\n yield SKIP, \"Font has version 3 post table.\"\n else:\n failed = False\n space_enc = getGlyphEncodings(ttFont, [\"uni0020\", \"space\"])\n nbsp_enc = getGlyphEncodings(\n ttFont, [\"uni00A0\", \"nonbreakingspace\", \"nbspace\", \"nbsp\"])\n space = get_glyph_name(ttFont, 0x0020)\n if 0x0020 not in space_enc:\n failed = True\n yield FAIL, Message(\"bad20\", (\"Glyph 0x0020 is called \\\"{}\\\":\"\n \" Change to \\\"space\\\"\"\n \" or \\\"uni0020\\\"\").format(space))\n\n nbsp = get_glyph_name(ttFont, 0x00A0)\n if 0x00A0 not in nbsp_enc:\n if 0x00A0 in space_enc:\n # This is OK.\n # Some fonts use the same glyph for both space and nbsp.\n pass\n else:\n failed = True\n yield FAIL, Message(\"badA0\", (\"Glyph 0x00A0 is called \\\"{}\\\":\"\n \" Change to \\\"nbsp\\\"\"\n \" or \\\"uni00A0\\\"\").format(nbsp))\n\n if failed is False:\n yield PASS, \"Font has **proper** whitespace glyph names.\"", "def com_google_fonts_check_048(ttFont):\n from fontbakery.utils import get_glyph_name\n\n def getGlyphEncodings(font, names):\n result = set()\n for subtable in font['cmap'].tables:\n if subtable.isUnicode():\n for codepoint, name in subtable.cmap.items():\n if name in names:\n result.add(codepoint)\n return result\n\n if ttFont['post'].formatType == 3.0:\n yield SKIP, \"Font has version 3 post table.\"\n else:\n failed = False\n space_enc = getGlyphEncodings(ttFont, [\"uni0020\", \"space\"])\n nbsp_enc = getGlyphEncodings(\n ttFont, [\"uni00A0\", \"nonbreakingspace\", \"nbspace\", \"nbsp\"])\n space = get_glyph_name(ttFont, 0x0020)\n if 0x0020 not in space_enc:\n failed = True\n yield FAIL, Message(\"bad20\", (\"Glyph 0x0020 is called \\\"{}\\\":\"\n \" Change to \\\"space\\\"\"\n \" or \\\"uni0020\\\"\").format(space))\n\n nbsp = get_glyph_name(ttFont, 0x00A0)\n if 0x00A0 not in nbsp_enc:\n if 0x00A0 in space_enc:\n # This is OK.\n # Some fonts use the same glyph for both space and nbsp.\n pass\n else:\n failed = True\n yield FAIL, Message(\"badA0\", (\"Glyph 0x00A0 is called \\\"{}\\\":\"\n \" Change to \\\"nbsp\\\"\"\n \" or \\\"uni00A0\\\"\").format(nbsp))\n\n if failed is False:\n yield PASS, \"Font has **proper** whitespace glyph names.\"", "def unitsDetector(self, num):\n try:\n num = int(num)\n except:\n sys.exit('Invalid input! Method only takes ints or floats.')\n \n digits = 0\n while num > 1:\n num /= 10\n digits += 1\n \n digits -= 1\n ind = digits // 3\n units = {3: 'B', 2: 'M', 1: 'K', 0: ''}[ind]\n \n return 10 ** (ind * 3), units", "def test_convertsent(self):\n convert6 = cnv()\n\n convert6.setnum(\"also haben wir hundertunddrei nein hundert 4 tausend\")\n self.assertEqual(convert6.getnum(), 104000)\n\n convert6.setnum(\"also ein haben wir hundertunddrei nein tausend\")\n self.assertEqual(convert6.getnum(), 1000)\n\n convert6.setnum(\" \")\n self.assertEqual(convert6.getnum(), 0)\n\n convert6.setnum(\"fünfundzwanzig\")\n self.assertEqual(convert6.getnum(), 25)\n\n convert6.setnum(\"albert ein\")\n self.assertEqual(convert6.getnum(), 1)", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def trapezoid_area(lower, leg , upper):\n area = (((upper+lower)/2)*leg)\n return area", "def is_nine_pandigital(number):\n digits = str(number)\n return bool(len(digits) == len(ALL_NINE) and set(digits) == ALL_NINE)", "def _validate_details_of_charges_71A(self, val):\n return val", "def test_sum_tall(self):\r\n self.assertEquals(\r\n preview.latex_preview('(2+3^2)'),\r\n r'\\left(2+3^{2}\\right)'\r\n )", "def classify(cls, i):\r\n # chars \r\n if i[4] == None:\r\n return 1\r\n elif (float(i[4])) <= 141.5:\r\n return 1\r\n else:\r\n return 1", "def is_numberish(G):\n return True", "def test_bad_parens(self):\r\n with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'):\r\n preview.LatexRendered('x^2', parens='not parens')", "def _parse_othersymbol(line):\n return None", "def test_number(self):\n\n tokens = list(Lexer(\"123 123.456 .456 .123 .\").generate_tokens())\n answer = [Token(TokenType.NUMBER, 123),\n Token(TokenType.NUMBER, 123.456),\n Token(TokenType.NUMBER, 0.456),\n Token(TokenType.NUMBER, 0.123),\n Token(TokenType.NUMBER, 0.0)]\n self.assertEqual(tokens, answer)", "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def test_phred_to_ascii33(self):\r\n self.assertEqual(phred_to_ascii33(0), '!')\r\n self.assertEqual(phred_to_ascii33(30), '?')", "def ConsistentTrees_ASCII_099():\n dt= [('a' , np.float32), #scale(0)\n ('haloid_CT' , np.int64), #id(1)\n ('a_desc' , np.float32), #desc_scale(2)\n ('descID' , np.int64), #desc_id(3)\n ('n_prog' , np.int32), #num_prog(4)\n ('hostid_LM' , np.int64), #pid(5) \n ('hostid_MM' , np.int64), #upid(6)\n ('desc_hostid_MM' , np.int64), #desc_pid(7)\n ('is_phantom' , np.int32), #phantom(8) \n ('sam_mvir' , np.float32), #sam_mvir(9)\n ('mhalo' , np.float32), #mvir(10) \n ('rvir' , np.float32), #rvir(11)\n ('rscale' , np.float32), #rs(12)\n ('vrms' , np.float32), #vrms(13) \n ('is_mmp' , np.int32), #mmp?(14)\n ('a_lastMM' , np.float32), #scale_of_last_MM(15)\n ('vmax' , np.float32), #vmax(16)\n ('x_pos' , np.float32), #x(17)\n ('y_pos' , np.float32), #y(18)\n ('z_pos' , np.float32), #z(19)\n ('x_vel' , np.float32), #vx(20)\n ('y_vel' , np.float32), #vy(21)\n ('z_vel' , np.float32), #vz(22)\n ('x_ang' , np.float32), #Jx(23)\n ('y_ang' , np.float32), #Jy(24)\n ('z_ang' , np.float32), #Jz(25)\n ('spinParameter' , np.float32), #Spin(26)\n ('BFirstID' , np.int64), #Breadth_first_ID(27)\n ('DFirstID' , np.int64), #Depth_first_ID(28) \n ('rootIndex' , np.int64), #Tree_root_ID(29) \n ('haloid' , np.int64), #Orig_halo_ID(30)\n ('snapid' , np.int32), #Snap_num(31) \n ('NextCoDFirstID' , np.int64), #Next_coprogenitor_depthfirst_ID(32)\n ('LastDFirstID' , np.int64), #Last_progenitor_depthfirst_ID(33) \n ('rscale_Klypin' , np.float32), #rs_Klypin\n ('mhalo+unbound' , np.float32), #mvir_all \n ('mhalo_200b' , np.float32), #m200b \n ('mhalo_200c' , np.float32), #m200c\n ('mhalo_500c' , np.float32), #m500c\n ('mhalo_2500c' , np.float32), #m2500c\n ('x_off' , np.float32), #Xoff\n ('v_off' , np.float32), #Yoff\n ('spin_Bullock' , np.float32), #spin_bullock\n ('b_to_a' , np.float32), #b_to_a \n ('c_to_a' , np.float32), #c_to_a\n ('x_a' , np.float32), #A[x]\n ('y_a' , np.float32), #A[y]\n ('z_a' , np.float32), #A[z] \n ('b_to_a_500c' , np.float32), #b_to_a(500c)\n ('c_to_a_500c' , np.float32), #c_to_a(500c) \n ('x_a_500c' , np.float32), #A[x](500c) \n ('y_a_500c' , np.float32), #A[y](500c) \n ('z_a_500c' , np.float32), #A[z](500c)\n ('T_U' , np.float32), #T/|U|\n ('Mpseudo_Behroozi', np.float32), #M_pe_Behroozi\n ('Mpseudo_Diemer' , np.float32), #M_pe_Diemer\n ('rhalf_mass' , np.float32), #Halfmass_Radius \n ] \n return dt", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def onlynumbers(name):\n return re.sub(\"[a-zA-Z:]\\(\\)\\:\",\"\",name)", "def is_dementia(code):\n assert isinstance(code, str)\n code_set = ('294.10', '294.11', '294.20', '294.21', '2941', '29411', '2942', '29421')\n code_set += ('290',)\n code_set += ('F01', 'F02', 'F03')\n return code.startswith(code_set)", "def unphred_string(phred):\n arr = [(ord(c) - 33) / 30. for c in phred]\n return arr", "def get_nummeric_only(text):\n\n nummeric_string =\"\"\n \n for character in text:\n if character.isnumeric():\n \n nummeric_string+=character\n \n return nummeric_string", "def numerify_iso_label(lab):\n from sage.databases.cremona import class_to_int\n if 'CM' in lab:\n return -1 - class_to_int(lab[2:])\n else:\n return class_to_int(lab.lower())", "def make_g_number():\n return fake.bothify(text='??#########')", "def shorten_rtept(rtept):\n return rtept.upper()[:6].strip()", "def test_30_phonenumbers_empty(self):\n number_phone = self.samples[3]\n res = self.pn._symbol_set_char(number_phone)\n self.assertEqual(res, None, 'e164 phone formatting failed')\n res = self.pn._symbol_get(number_phone)\n self.assertEqual(res, None, 'International phone formatting failed')", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def classify(cls, i):\r\n # chars \r\n if i[4] == None:\r\n return 0\r\n elif (float(i[4])) <= 141.5:\r\n return 0\r\n else:\r\n return 1", "def _tenth(self, number):\n if 70 <= number < 80:\n return self._tenth(60) + self.separator \\\n + self._tenth(number - 60) if number - 60 > 0 else ''\n elif 80 <= number < 90:\n return self._units(4) + '-' \\\n + self._tenth(20) + self.separator \\\n + self._units(int(str(number)[1:]))\n elif 90 <= number < 100:\n return self._units(4) + self.separator \\\n + self._tenth(20) + self.separator \\\n + self._tenth(number - 80) if number - 80 > 0 else ''\n\n elif number > 11 and number % 10 == 1:\n return sppasNumEuropeanType._get_lang_dict(self)[int(str(number)[0])*10] \\\n + '-et-' + self._units(int(str(number)[1:]))\n\n else:\n return sppasNumEuropeanType._tenth(self, number)", "def geminates_checker(self, s):\n s = re.sub(r'([йцкгшщзхфвпрлджчсмтб])\\1+', r'\\1', s)\n s = re.sub(r'н{2}([йцкгшщзхфвпрлджчсмтб ])', r'н\\1', s) \n return s", "def com_google_fonts_check_047(ttFont, missing_whitespace_chars):\n if missing_whitespace_chars != []:\n yield FAIL, (\"Whitespace glyphs missing for\"\n \" the following codepoints:\"\n \" {}.\").format(\", \".join(missing_whitespace_chars))\n else:\n yield PASS, \"Font contains glyphs for whitespace characters.\"", "def com_google_fonts_check_047(ttFont, missing_whitespace_chars):\n if missing_whitespace_chars != []:\n yield FAIL, (\"Whitespace glyphs missing for\"\n \" the following codepoints:\"\n \" {}.\").format(\", \".join(missing_whitespace_chars))\n else:\n yield PASS, \"Font contains glyphs for whitespace characters.\"", "def test_20_phonenumbers_UnicodeDecodeError(self):\n number_phone = self.samples[2]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def area_report(\n file=None\n):\n for entry in file:\n elems = entry.strip().split(' ')\n elems = prune(elems)\n if len(elems) >= 3:\n if str(elems[0]) == \"Total\" and str(elems[1]) == \"cell\" and str(elems[2]) == \"area:\":\n area = float(elems[3])\n\n if str(elems[0]) == \"Total\" and str(elems[1]) == \"area:\":\n if str(elems[2]) != \"undefined\":\n if area < float(elems[2]):\n area = float(elems[2])\n \n area /= 1000000.0\n return area", "def phone(raw_phone):\n\n phone = raw_phone.replace('+33', '0')\n phone = '{} {} {} {} {}'.format(\n phone[0:2],\n phone[2:4],\n phone[4:6],\n phone[6:8],\n phone[8:10])\n return phone", "def mccarthy_ninety_one(number):\n if number > 100:\n print('M(%d) since %d is greater than 100' % (number - 10, number))\n return number - 10\n else:\n print('M(M(%d)) since %d is less than or equal to 100' % (number + 11, number))\n return mccarthy_ninety_one(mccarthy_ninety_one(number + 11))", "def main():\r\n lst = list(map(int, list(str(NUMBER))))\r\n product = 0\r\n\r\n for i in range(len(lst)):\r\n\r\n if i + 13 >= len(lst):\r\n break\r\n\r\n thirteen = lst[i:i + 13]\r\n\r\n if prod(thirteen) > product:\r\n product = prod(thirteen)\r\n\r\n print(f'{\" × \".join(list(map(str, thirteen)))} = {product}')", "def get_number_in_portuguese(self, number, result=\"\"):\n number_as_str = str(number)\n\n # Check if the first char is a \"-\" sign\n first_position = number_as_str[0]\n if \"-\" == first_position:\n result = \"menos\"\n # Removes the negative sign from number\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n number_len = len(number_as_str)\n\n if number_len > 1 and self._is_zero_sequence(number_as_str):\n # the rest of the number ends in a zero sequence\n return result.strip()\n\n if first_position == '0':\n if number_len > 1:\n # Cut off the leading zero\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n if not result or result == '-':\n # The number is zero\n return self.ZERO\n\n if number_len > 5:\n # Out of range\n raise NotAcceptable(detail=self.MAX_LIMIT_ERROR)\n\n if number_len == 5:\n # Extract the dozen-thounsands\n first_two_positions = number_as_str[0] + number_as_str[1]\n result = ' '.join([result, self._get_two_digits_number_in_extension(first_two_positions), 'mil'])\n\n if self._is_zero_sequence(number_as_str[2::]):\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n\n return self.get_number_in_portuguese(number=number_as_str[2::], result=result)\n\n if number_len == 4:\n result = ' '.join([result, self.THOUSANDS[first_position]])\n\n if self._is_zero_sequence(number_as_str[1::]):\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n if number_len == 3:\n is_following_zeros = self._is_zero_sequence(number_as_str[1::])\n\n if first_position == '1':\n # Number ends in 1xx\n if is_following_zeros:\n # Number is 100\n result = ' '.join([result, self.CEM])\n return result.strip()\n result = ' '.join([result, 'cento e'])\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n result = ' '.join([result, self.HUNDREDS[first_position]])\n if is_following_zeros:\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n if number_len == 2:\n result = ' '.join([result, self._get_two_digits_number_in_extension(number_as_str)])\n return result.strip()\n\n if number_len == 1:\n result = ' '.join([result, self.UNITS[number_as_str]])\n\n return result.strip()", "def apartment_number():\r\n\r\n type = _random.choice(['Apt.', 'Apartment', 'Suite', 'Ste.'])\r\n letter = _random.choice(string.ascii_letters).capitalize()\r\n directions = ['E', 'W', 'N', 'S']\r\n short = '{} {}'.format(type, _random.randint(1, 999))\r\n long = '{} {}{}'.format(type, _random.randint(1, 999), letter)\r\n alt = '{} {}-{}{}'.format(type, _random.choice(directions),\r\n _random.randint(1, 999), letter)\r\n return _random.choice([short, long, alt])", "def clean_ean(ean):\n return [int(x) for x in ean.translate(None, \" -\")]", "def test_geography_area(self):\n # SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';\n z = Zipcode.objects.annotate(area=Area(\"poly\")).get(code=\"77002\")\n # Round to the nearest thousand as possible values (depending on\n # the database and geolib) include 5439084, 5439100, 5439101.\n rounded_value = z.area.sq_m\n rounded_value -= z.area.sq_m % 1000\n self.assertEqual(rounded_value, 5439000)" ]
[ "0.62425196", "0.57021415", "0.56712484", "0.54320395", "0.54288685", "0.5344969", "0.52775437", "0.52460563", "0.5242392", "0.5193709", "0.5157009", "0.51362306", "0.51212054", "0.50575703", "0.50537056", "0.50524485", "0.5049207", "0.5046117", "0.5036556", "0.5035202", "0.49796414", "0.49796098", "0.4979024", "0.49708945", "0.4951231", "0.49336678", "0.4931934", "0.49067298", "0.48934844", "0.48864698", "0.48815748", "0.48783374", "0.48774844", "0.4876532", "0.48604652", "0.48551565", "0.4851784", "0.4834655", "0.4831831", "0.48280713", "0.48272294", "0.48210913", "0.48121136", "0.4805683", "0.47954825", "0.4790976", "0.47737393", "0.47707322", "0.47707322", "0.47707322", "0.47696826", "0.47670624", "0.47665557", "0.47659454", "0.47651666", "0.4763987", "0.47627836", "0.4761071", "0.47610086", "0.4760697", "0.4760697", "0.47423035", "0.47368038", "0.47357586", "0.47296596", "0.4727258", "0.47238258", "0.47228137", "0.47219408", "0.47202206", "0.4711143", "0.47079548", "0.46969444", "0.46949822", "0.46949822", "0.46918246", "0.4677645", "0.46756375", "0.46741983", "0.4671697", "0.46702787", "0.46699214", "0.46659318", "0.46641192", "0.46619162", "0.46613058", "0.4661289", "0.4660857", "0.46556458", "0.4648599", "0.46469864", "0.46469864", "0.46399656", "0.46341872", "0.46336374", "0.4632298", "0.46265104", "0.4625861", "0.46204597", "0.46182764", "0.46152055" ]
0.0
-1
Mobile numbers have no parentheses, but have a space in the middle of the number to help readability. The prefix of a mobile number is its first four digits, and they always start with 7, 8 or 9.
def istele(number): if number[:3] == '140': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = \"+86-10-\" + new_number[-8:]\n\treturn new_number", "def phone(raw_phone):\n\n phone = raw_phone.replace('+33', '0')\n phone = '{} {} {} {} {}'.format(\n phone[0:2],\n phone[2:4],\n phone[4:6],\n phone[6:8],\n phone[8:10])\n return phone", "def phoneDisplay(number):\n return number[0:3] + \"&nbsp;&middot;&nbsp;\" + number[3:6] + \"&nbsp;&middot;&nbsp;\" + number[6:10]", "def normalize(phone):\n d = re.sub('\\D', '', phone)\n return '+7 (%s) %s-%s-%s' % (d[1:4], d[4:7], d[7:9], d[9:11])", "def clean_phone(self):\n phone = self.cleaned_data['phone']\n if phone.startswith('8') and len(phone) > 7:\n return phone.replace('8', '+7', 1)\n\n return phone", "def format_and_validate_phonenumber(number):\n \n if number.startswith('+'):\n number = number.replace('+', '00', 1)\n \n regex = re.compile('(\\/|\\+|-| )')\n number = regex.sub('', number)\n \n if number.startswith(COUNTRY_CODE_PHONE):\n number = number.replace(COUNTRY_CODE_PHONE, '0', 1)\n\n # if the conversion to int does not fail then\n # there are only numbers included in the string\n try:\n int(number)\n except ValueError:\n raise ValidationError(_('Please enter numbers only.'))\n \n if number.startswith(START_MOBILE_PHONE):\n return number\n else:\n raise ValidationError(_('Please enter a cell phone number.'))", "def clean_phone(self):\n data = self.cleaned_data['phone']\n data = data.strip(' +').replace('-', '')\n if len(data) == 12:\n data = data[3:]\n\n return data", "def phone_number():\r\n\r\n x = ''.join(str(_random.randrange(0, 10)) for i in xrange(10))\r\n y = '%s-%s-%s' % (x[0:3], x[3:6], x[6:])\r\n return y", "def ismobile(number):\n if number[0] in ['7', '8', '9']:\n return True\n return False", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def strip_phone_prefix(self, phone_num):\n # FIXME more accurate check\n if phone_num.startswith('+86'):\n return phone_num.replace('+86', '')\n if len(phone_num) != 11:\n return None\n return phone_num", "def mobile_validator(mobile):\n if mobile[0:4] != '+989':\n raise ValidationError('Invalid mobile')", "def test_address__normalize_phone_number__2():\n assert '+491234567890' == normalize_phone_number(\n '+49 (1234) 5678 - 90X', '+49')", "def test_address__normalize_phone_number__4():\n assert '+491234507090' == normalize_phone_number('01234/5070-90', '+49')", "def test_address__normalize_phone_number__7():\n assert '+421234007891' == normalize_phone_number(\n '0042-1234/0078-91', '+49')", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def test_address__normalize_phone_number__6():\n assert '+421234567891' == normalize_phone_number(\n '0042-1234/5678-91', '+49')", "def test_formatted_number(self):\n node = self.create_xml_patient({'Mobile_Number': '(33)-0001112222'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertEqual(patient.contact.phone, '+330001112222')", "def test_address__normalize_phone_number__3():\n assert '+491234567891' == normalize_phone_number('01234/5678-91', '+49')", "def validate_mobile(self, mobile):\n self.mobile = mobile.strip()\n example = \"mobile number (ex. +2346787646)\"\n if re.match(r'(^[+0-9]{1,3})*([0-9]{10,11}$)',\n self.mobile) is None:\n raise GraphQLError(\n ERROR_RESPONSES[\"invalid_field_error\"].format(example))\n return self.mobile", "def parse_phone(phone):\n if isinstance(phone, int):\n return str(phone)\n else:\n phone = re.sub(r'[+()\\s-]', '', str(phone))\n if phone.isdigit():\n return phone", "def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix", "def test_address__normalize_phone_number__5():\n assert '01234567891' == normalize_phone_number('01234/5678-91', '')", "def test_format_phone_raw(self):\n number = '8095551234'\n self.assertEqual(format_phone(number), '(809) 555-1234')", "def test_address__normalize_phone_number__1():\n assert '+491234567890' == normalize_phone_number('+491234567890', '+49')", "def clean_phone(number_str):\n number_str = number_str or ''\n number_str = number_str.replace('(', '').replace(')', '')\n number_str = number_str.replace('ext. ', 'x').replace('ext ', 'x')\n number_str = number_str.split(',')[0].strip()\n\n if number_str:\n return number_str", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def telephone(value, arg=None):\n \n # Normalise a number\n value = value.replace(\" \", \"\").replace(\"-\", \"\")\n if value.startswith(\"0\"):\n value = \"+44\" + value[1:]\n normalised = value\n \n # Check if it's a number which is formatted in a special way\n if normalised in UNUSUAL_NUMBERS:\n value = UNUSUAL_NUMBERS[normalised]\n else:\n # Figure out how to format that number\n \n # Convert UK numbers into national format\n if value.startswith(\"+44\"):\n value = \"0\" + value[3:]\n \n # Now apply rules on how to split up area codes\n if value[:8] in ('01332050', '01382006'):\n # Direct dial only\n value = value[:5] + \" \" + value[5:]\n elif value[:7] in ('0141005', '0117101') or value[:6] in ('011800',):\n # Direct dial only\n value = value[:4] + \" \" + value[4:7] + \" \" + value[7:]\n elif value[:7] in ('0200003',):\n # Direct dial only\n value = value[:3] + \" \" + value[3:7] + \" \" + value[7:]\n elif value.startswith('01'):\n if value[2] == '1' or value[3] == '1':\n # 4 digit area codes\n area_code = value[:4]\n local_part = value[4:7] + \" \" + value[7:]\n elif value[:6] in (\n '013873', # Langholm\n '015242', # Hornby\n '015394', # Hawkshead\n '015395', # Grange-over-Sands\n '015396', # Sedbergh\n '016973', # Wigton\n '016974', # Raughton Head\n '016977', # Brampton\n '017683', # Appleby\n '017684', # Pooley Bridge\n '017687', # Keswick\n '019467', # Gosforth\n ):\n # 6 digit area codes\n area_code = value[:4] + \" \" + value[4:6]\n local_part = value[6:]\n else:\n # 5 digit\n area_code = value[:5]\n local_part = value[5:]\n \n value = \"(%s) %s\" % (area_code, local_part)\n \n elif value.startswith('02'):\n # 3 digit area codes\n value = \"(%s) %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('0500') or value.startswith('0800'):\n # direct dial - 4 digit prefix, short following\n value = \"%s %s\" % (value[:4], value[4:])\n \n elif value.startswith('03') or value.startswith('08') or value.startswith('09'):\n # direct dial - 4 digit prefix\n value = \"%s %s %s\" % (value[:4], value[4:7], value[7:])\n \n elif value.startswith('05') or value.startswith('070'):\n # direct dial - 3 digit prefix\n value = \"%s %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('07'):\n # direct dial - 5 digit prefix, short following\n value = \"%s %s\" % (value[:5], value[5:])\n\n # Now apply University rules:\n if value[:10] in ('(01865) 27', '(01865) 28', '(01865) 43', '(01865) 61'):\n # Oxford - list of internal number prefixes here:\n # http://www.oucs.ox.ac.uk/telecom/directories/intdiraccess.xml\n value = \"(01865 \" + value[8] + \")\" + value[9:]\n\n if arg == 'nolink':\n return value\n else:\n return mark_safe('<a href=\"tel:%s\">%s</a>' % (normalised, value))", "def number(self):\n return str(self._phone)", "def tidy_telephone(telephone):\n junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']\n telephone = telephone.replace('xxx-xxx-xxxx', '')\n telephone = telephone.replace('ext', ' x')\n telephone = telephone.replace(' cell', '')\n telephone = telephone.replace('\"', '')\n telephone = telephone.replace('%', '')\n if telephone in junk:\n return ''\n else:\n return telephone", "def grab_area_code(phone_number):\r\n #number of form +1 XXX XXX XXXX (this should be the form get_twilio_client provides)\r\n if \"+1\" == phone_number[:2]:\r\n return phone_number[2:5]\r\n # number of form 1 XXX XXX XXXX\r\n if len(phone_number) == 11 and phone_number[0] == '1':\r\n return phone_number[1:4]\r\n # number of form XXX XXX XXXX\r\n if len(phone_number) == 10:\r\n return phone_number[:3]\r\n raise BadPhoneNumberError('\"%s\" is an invalid phone number.' % phone_number)", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def fix_crappy_phone_number_formatting(phone_number):\n m = re.match(r'(\\d)?.?(\\d{3})\\D*(\\d{3})\\D*(\\d{4})\\D*(\\d*)$', phone_number)\n if m:\n fixed_number = f'+{m.group(1) or \"1\"}({m.group(2)}){m.group(3)}-{m.group(4)} {\"x\"+m.group(5) if m.group(5) else \"\"}'\n return fixed_number", "def normalize_prefix(string, logger_=_LOGGER):\n string = str(string)\n\n if not re.match(\"[0-9]\", string):\n if not re.match(\"^[lrmnLRMN]_\", string):\n new_string = string[0].upper() + \"_\" + string[1:]\n return new_string\n return string\n logger.log(level=\"warning\", message=\"Prefix has a number\", logger=logger_)\n return string", "def validate_phone(self, value):\n pattern = re.compile(r'(^[+0-9]{1,3})*([0-9]{8,15}$)', re.IGNORECASE)\n value = value.replace(\" \", \"\")\n if pattern.match(value) is None:\n raise ValidationError(_('Please insert correct phone number.'))\n return value", "def test_format_phone_formatted(self):\n number1 = '809.555.1234'\n self.assertEqual(format_phone(number1), '(809) 555-1234')\n number2 = '(888) 555-3456'\n self.assertEqual(format_phone(number2), '(888) 555-3456')", "def phone(self) -> str:\n return pulumi.get(self, \"phone\")", "def compact(number):\n number = clean(number).strip().replace(' ', '-').split('-')\n if len(number) == 4:\n # zero pad the different sections if they are found\n lengths = (2, 4, 7, 3)\n return ''.join(n.zfill(l) for n, l in zip(number, lengths))\n else:\n # otherwise zero pad the account type\n number = ''.join(number)\n return number[:13] + number[13:].zfill(3)", "def clean(number):\n digits = [c for c in number if c.isdigit()]\n if len(digits) == 11 and digits[0] == \"1\":\n return ''.join(digits[1:])\n elif len(digits) != 10:\n return \"0000000000\"\n else:\n return ''.join(digits)", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def safe_number(self):\n mask = '*' * (len(self.account_number) - 4)\n return '{0}{1}'.format(mask, self.account_number[-4:])", "def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def ad_rep_lead_phone(obj):\n if obj.phone_number is None:\n phone_number = ''\n else:\n phone_number = format_phone(obj.phone_number)\n return \"%s\" % phone_number", "def __str__(self):\n phone_string = \"(\"\n first_three_digits = \"\"\n next_three_digits = \"\"\n last_four_digits = \"\"\n for i in range(self.NUM_OF_DIGITS):\n if i <= 2:\n first_three_digits += self._phone[i]\n elif 3 <= i <= 5:\n next_three_digits += self._phone[i]\n else:\n last_four_digits += self._phone[i]\n phone_string += first_three_digits + \") \" + \\\n next_three_digits + \"-\" + last_four_digits\n return phone_string", "def phone_number_organizer(self, key):\n\t\ttry:\n\t\t\tphone_number = key[u'phone']\n\t\t\tformat_number = '(' + phone_number[0:3] + ') ' + phone_number[3:6] + '-' + phone_number[6:]\n\t\t\treturn format_number\n\t\texcept KeyError:\n\t\t\tprint [u'name'], \"requires manual phone number verification.\"\n\t\t\treturn \"Manual Input\"", "def create_phone_number(n):", "def leading_number(val):\n n=\"\"\n for c in val:\n if c not in \"0123456789.\":\n break\n n = n + c\n return n", "def compact(number):\n return clean(number, ' -').strip()", "def reg_phone(str_phone:str) -> object:\r\n\r\n [ind, nph]=str_phone.strip(\"+\").split(\" \")\r\n #Cut off the local 0\r\n #Create regexes for 3 cases : with IND and without 0, without IND and with 0, without IND and 0\r\n formats=[\\\r\n \"(?P<ind>{})? ?0?(?P<num>{})\".format(ind, ' ?'.join(list(nph.rstrip('0'))))\r\n ]\r\n return re.compile(f'({\"|\".join(formats)})')", "def getMangledNum(self):\n return (\"X\" * (len(self.num)-4)) + self.num[-4:]", "def Space(num):\n return String(num, \" \")", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def check_format_user_phone(phone):\n match = re.match(r'^\\+[0-9]{10,}$', phone)\n if not match:\n raise exceptions.ValidationError('phone is not valid!')\n return phone", "def generate_random_phone():\n first = str(random.randint(100, 999))\n second = str(random.randint(1, 888)).zfill(3)\n last = (str(random.randint(1, 9998)).zfill(4))\n while last in ['1111', '2222', '3333', '4444', '5555', '6666', '7777', '8888']:\n last = (str(random.randint(1, 9998)).zfill(4))\n return '{}-{}-{}'.format(first, second, last)", "def validate_phone(phone:str) -> bool:\r\n phone = phone.replace(\"-\", \"\").replace(\"(\", \"\").replace(\")\", \"\")\r\n return phone.isdigit() and len(phone) == 10", "def test_00_phonenumbers_formatting_en_US(self):\n number_phone = self.samples[0]\n res = self.pn._symbol_set_char(number_phone)\n self.assertEqual(res, '+19545551234', 'e164 phone formatting failed')\n res = self.pn._symbol_get(number_phone)\n self.assertEqual(res, '+1 954-555-1234', 'International phone formatting failed')", "def compact(number):\n return clean(number, ' -./,').strip()", "def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def regular_telephone(telephone_list):\n return_tel_list = []\n for telephone in telephone_list:\n if len(telephone) == 4:\n telephone = '{}{}{}'.format('<b>', telephone, '</b>')\n elif len(telephone) == 5:\n telephone = '{}-{}-{}'.format(telephone[0], telephone[1:3], telephone[3:5])\n elif len(telephone) == 6:\n if telephone[0] == '8':\n telephone = '{}({})-{}{}'.format('<b>', telephone[0:3], telephone[3:6], '</b>')\n else:\n telephone = '{}-{}-{}'.format(telephone[0:2], telephone[2:4], telephone[4:6])\n elif len(telephone) == 7:\n telephone = '{}-{}-{}'.format(telephone[0:3], telephone[3:5], telephone[5:7])\n elif len(telephone) == 10:\n telephone = '{}-{}-{}-{}'.format(telephone[0:3], telephone[3:6], telephone[6:8], telephone[8:10])\n return_tel_list.append(telephone)\n return_tel_list.sort(key=lambda x: len(x), reverse=True)\n return return_tel_list", "def linkify_phone(text: str) -> str:\n\n def strip_whitespace(number: str) -> str:\n return re.sub(r'\\s', '', number)\n\n def is_valid_length(number: str) -> bool:\n if number.startswith('+00'):\n return False\n if number.startswith('00'):\n return len(number) == 13\n elif number.startswith('0'):\n return len(number) == 10\n elif number.startswith('+'):\n return len(number) == 12\n return False\n\n def handle_match(match: 'Match[str]') -> str:\n inside_html = match.group(1)\n number = f'{match.group(2)}{match.group(3)}'\n assert not number.endswith('\\n')\n if inside_html:\n return match.group(0)\n if is_valid_length(strip_whitespace(number)):\n number = remove_repeated_spaces(number).strip()\n return f'<a href=\"tel:{number}\">{number}</a> '\n\n return match.group(0)\n\n return _phone_ch_html_safe.sub(handle_match, text)", "def update_phone(phone, phone_mapping):\n results = []\n for iphone in re.split(',|;',phone):\n patterns = phone_pattern_re.search(iphone)\n if patterns:\n numbers = patterns.groups()\n if numbers[0] == \"852\":\n results.append(re.compile(r'\\D?(\\d{0,4}?)\\D{0,2}(\\d{4})\\D?(\\d{4})$', iphone))\n elif numbers[0] in phone_mapping:\n results.append (\"+852\"+ \" \" + numbers[1] + numbers[2])\n return ';'.join(results)", "def get_phone(self, list_item):\n phone = list_item.find('span', {'class': 'biz-phone'})\n return phone.get_text().strip()", "def onlynumbers(name):\n return re.sub(\"[a-zA-Z:]\\(\\)\\:\",\"\",name)", "def compact(number):\n return clean(number, ' -.').upper().strip()", "def validate_number(number):\n modified = False\n number = number.replace(\"(\", \"\").replace(\")\", \"\").replace(\"-\", \"\").replace(\" \", \"\").replace(\"+\", \"\")\n if len(number) == 11 and number.isdigit() and not number[1] in \"01\":\n number = \"+\" + number\n modified = True\n elif len(number) == 10 and number.isdigit() and not number[0] in \"01\":\n number = \"+1\" + number\n modified = True\n return number, modified", "def clean_phone_number(field_name):\n\n @check_field_is_empty(field_name)\n def wrapper(self):\n \"\"\"Decorator wrapped method.\n \"\"\"\n\n value = self.cleaned_data.get(field_name)\n\n # allow for a '+' prefix which means '00'\n if value[0] == '+':\n value = '00' + value[1:]\n\n if not value.isdigit():\n raise forms.ValidationError(\"Only numerical characters are allowed\")\n\n return value\n return wrapper", "def extract_digits(cls, phone_number):\n extracted_num = \"\"\n for ch in phone_number:\n if ch in cls.INTEGER_STRING:\n extracted_num += ch\n return extracted_num", "def phone_valid(number):\n model = '[0-9]{2} [0-9]{5}-[0-9]{4}'\n\n return re.findall(model, number)", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def cleanInteger(number):\n \n number = str(number).replace(' ', '')\n \n test = number\n for i in range(10):\n test = test.replace(str(i), '')\n \n if test:\n return None\n \n return number", "def phone_number(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"phone_number\")", "def sanitize_phone_number(self, phone_number, message_type=\"sms\", add_controls=False):\n if add_controls:\n phone_number = self.add_caracteres_controle(phone_number)\n\n if message_type == \"whatsapp\":\n return \"{}:{}\".format(message_type, phone_number)\n\n elif message_type == \"sms\":\n return \"{}\".format(phone_number)\n\n return phone_number", "def intspace(value):\n # http://softwaremaniacs.org/forum/django/19392/\n if value is None:\n return None\n orig = force_str(value)\n new = re.sub(r\"^(-?\\d+)(\\d{3})\", r\"\\g<1> \\g<2>\", orig)\n return new if orig == new else intspace(new)", "def mask_acct_no(column):\n return column.str.replace(r'\\d*\\*{3,}\\d*|\\d+(\\-\\d+){2,}', ' $ACCT_NO ')", "def validate_phone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(phone=value).exists():\n raise serializers.ValidationError('phone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def format(number):\n number = compact(number)\n return ' '.join((number[:2], number[2:5], number[5:8], number[8:]))", "def validate_phone_number(phone_number):\n\n check = re.fullmatch(r\"^07\\d{8}\", phone_number)\n\n if check:\n return True\n else:\n return False", "def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")", "def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")", "def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"", "def format_number(number):\n return f'{number:8,}'", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def intspace(value):\n orig = force_unicode(value)\n new = re.sub(\"^(-?\\d+)(\\d{3})\", '\\g<1> \\g<2>', orig)\n if orig == new:\n return new\n else:\n return intspace(new)", "def phoneNumberExtractor(self,data):\n\t\tdata = data.replace(\"\\r\", \" \")\n\t\tdata = data.replace(\"\\r\\n\", \" \")\n\n\t\t#first is identifying 10 digits code\n\t\tdata = data.split()\n\t\tresult = []\n\t\tfor word in data:\n\t\t\tres = None\n\t\t\tres = word if word.isdecimal() and len(word) == 10 and not res else res\n\t\t\tres = word[2:] if word.isdecimal() and len(word) == 12 and not res else res\n\t\t\tres = word[3:] if word[3:].isdecimal() and len(word) == 10 and not res else res\n\t\t\tif (\"(\" and \")\") in word or \"-\" in word:\n\t\t\t\tword = word.replace(\"(\",\"\")\n\t\t\t\tword = word.replace(\")\",\"\")\n\t\t\t\tword = word.replace (\"-\",\"\")\n\t\t\t\tres = word if(len(word) == 10) else None\n\t\t\tif res:\n\t\t\t\tresult.append(res)\n\t\t\t\tdel(res)\n\t\treturn set(result)", "def validate_phone_number(val):\n if not val.isdigit() or len(val) < 3:\n raise argparse.ArgumentTypeError(\"Invalid phone number\")\n return val", "def test_valid_phone_valid():\n assert valid_phone(\"000-000-0000\")\n assert valid_phone(\"0000000000\")", "def validate_telephone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(telephone=value).exists():\n raise serializers.ValidationError('telephone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def ni_number_check(number):\n ni_nuber = re.match(r\"^\\s*[a-zA-Z]{2}(?:\\s*\\d\\s*){6}[a-zA-Z]?\\s*$\", number)\n if ni_nuber:\n return True\n return False", "def test_30_phonenumbers_empty(self):\n number_phone = self.samples[3]\n res = self.pn._symbol_set_char(number_phone)\n self.assertEqual(res, None, 'e164 phone formatting failed')\n res = self.pn._symbol_get(number_phone)\n self.assertEqual(res, None, 'International phone formatting failed')", "def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number", "def __init__(self, phone_number):\n self.number = self.clean(phone_number)", "def __init__(self, number: str) -> object:\n number = re.sub(\" +\", \" \", number).strip()\n nr = re.match((r\"^(?:\\+?1)?\\W?\"\n r\"\\(?([2-9][0-9]{2})\\)?\\W?\"\n r\"([2-9][0-9]{2})\\W?\"\n r\"([0-9]{4})$\"), number)\n if nr is None:\n raise ValueError(\"Not a phonenumber\")\n self.number = ''.join(nr.groups())\n self.area_code = nr.group(1)", "def clean_num(quote):\n for char in ROMAN:\n quote = quote.replace(*char)\n return quote", "def ldap_get_number(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n number = result.get(\"mobile-phone\")[0]\n return number\n\n return None", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"", "def numbers():\n return '<pre>' + '\\n'.join(phone_numbers) + '</pre>'" ]
[ "0.7378809", "0.728158", "0.7243709", "0.71238893", "0.6969404", "0.6930221", "0.6905321", "0.6822984", "0.670466", "0.6703273", "0.6692987", "0.6657773", "0.66574764", "0.66568744", "0.6644845", "0.65901685", "0.6581246", "0.656387", "0.6562163", "0.64722866", "0.64605606", "0.6456879", "0.64397925", "0.64148957", "0.6371591", "0.6366875", "0.63655597", "0.6358356", "0.6264549", "0.62473154", "0.6237998", "0.62138987", "0.6198085", "0.61107254", "0.6086876", "0.6066039", "0.6055655", "0.60540164", "0.60276353", "0.6016293", "0.5991055", "0.5948566", "0.59414196", "0.5929889", "0.58901113", "0.58557343", "0.58289456", "0.5827473", "0.5826941", "0.5812898", "0.5804458", "0.5797472", "0.57683855", "0.5764621", "0.5762099", "0.57523084", "0.5728222", "0.56333685", "0.5598918", "0.55918396", "0.5580842", "0.5547486", "0.5544471", "0.55420923", "0.55392087", "0.55346406", "0.55268526", "0.55080473", "0.5502691", "0.5495822", "0.5495822", "0.5490207", "0.5490207", "0.5489624", "0.54857457", "0.5478639", "0.54752654", "0.54738885", "0.5458845", "0.5455552", "0.54533124", "0.54370195", "0.54370195", "0.54358125", "0.5428129", "0.54262435", "0.54071677", "0.5400218", "0.5394383", "0.5373567", "0.5370347", "0.5365644", "0.5352516", "0.53450507", "0.5332966", "0.53297937", "0.5329267", "0.5324638", "0.53143835", "0.5313836", "0.5309221" ]
0.0
-1
Instantiate a new OperationArgument
def __init__(self, name: str, arg_type_name: str, is_required=False): self.key = name self.value = arg_type_name self.required = is_required
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n self.operation = eval(self._get_op_str())\n self.args = {'operation': self.operation.__name__, 'constargs': constargs, 'randomargs': randomargs}", "def __init__(self, op, op_param_list, op_reg_list):\n self. operation = {\n 'op': op,\n 'op_param_list': op_param_list,\n 'op_reg_list': op_reg_list\n }", "def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand", "def _create_operation(self,\n identifier,\n arguments=None,\n return_type=None,\n extended_attributes=None,\n node=None):\n if not return_type:\n return_type = self._create_type('void')\n elif isinstance(return_type, str):\n return_type = self._create_type(return_type)\n if isinstance(extended_attributes, dict):\n extended_attributes = self._create_extended_attributes(\n extended_attributes)\n debug_info = self._build_debug_info(node) if node else None\n\n return Operation.IR(\n identifier,\n arguments=(arguments or []),\n return_type=return_type,\n extended_attributes=extended_attributes,\n component=self._component,\n debug_info=debug_info)", "def __init__(self, **kwargs):\n\n super(RefactoringOperation, self).__init__(**kwargs)", "def __init__(self, name, flags, attr=None):\n Arg.__init__(self, name, attr)\n self.flags = flags", "def __init__(self, name=\"alpha\", attr=None):\n Arg.__init__(self, name, attr)", "def __init__(self, op, value):\n self.op = op\n self.value = value", "def add_argument(*args, **kwargs):\n return _Argument(args, frozenset(kwargs.items()))", "def create_argument_list(self):\n raise NotImplementedError", "def local_operation_with_args(operation, kwargs, node, environment):\n kwargs = json.loads(kwargs)\n run_operation(operation, node, environment, args=kwargs)", "def __init__(self, op_name, attr_key, attr_value):\n self.op = relay.op.get(op_name)\n self.attr_key = attr_key\n self.attr_value = attr_value", "def __init__(self, op1, op2, transfer_tags=True):\r\n self.op1 = op1\r\n self.op2 = op2\r\n self.transfer_tags = transfer_tags", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, op, expression1, expression2):\n LinearExpression.__init__(self)\n\n self.op = op\n self.expression1 = expression1\n self.expression2 = expression2", "def __init__(self, name=\"info\", attr=None):\n Arg.__init__(self, name, attr)", "def __call__(cls, arg: 'OpNode', **kwargs: Any):\n cls._check_arg(arg)\n cls._check_kwargs(kwargs)\n return OpNode(\n op_type=cls,\n arg=arg,\n output_data_type=cls._return_data_type,\n kwargs=kwargs)", "def __init__(\n self,\n name: Optional[str] = None,\n aliases: Iterable[str] = (),\n args: Iterable[Argument] = (),\n ) -> None:\n self.args = Lexicon()\n self.positional_args: List[Argument] = []\n self.flags = Lexicon()\n self.inverse_flags: Dict[str, str] = {} # No need for Lexicon here\n self.name = name\n self.aliases = aliases\n for arg in args:\n self.add_arg(arg)", "def __init__(self):\n self.inputs = []\n self.op = None\n self.const_attr = None\n self.name = \"\"", "def argument(*args, **kwargs):\n def deco(fct):\n if isinstance(fct, Command):\n cmd = fct\n cmd.add_argument(*args, **kwargs)\n else:\n if not hasattr(fct, '_acmdlib_arguments'):\n fct._acmdlib_arguments = []\n fct._acmdlib_arguments.append((args, kwargs))\n #print \"===\",args,kwargs,type(fct),fct\n return fct\n return deco", "def __init__(self):\n BuiltinFunction.__init__(self, \"arg\",\n conversions=dict(maxima='carg',\n mathematica='Arg',\n sympy='arg'))", "def AddOperationResourceArg(parser, verb):\n concept_parsers.ConceptParser.ForResource(\n 'operation_id',\n GetOperationResourceSpec(),\n 'operation {}.'.format(verb),\n required=True,\n ).AddToParser(parser)", "def make_arg(arg):\n return arg.result if isinstance(arg, (Op, Block)) else arg", "def create_argument(parameter, input_dict):\n arg = []\n position = 0\n\n if 'id' not in parameter:\n exit_perm_fail(\"Error: input parameter given without an id\")\n par_id = parameter['id']\n\n # get parameter type properties\n par_type = parameter.get('type')\n\n is_optional = False\n if par_type.endswith('?'):\n is_optional = True\n par_type = par_type[0:-1]\n\n is_array = False\n if par_type.endswith('[]'):\n is_array = True\n par_type = par_type[0:-2]\n\n # get input value\n value = parameter.get('default')\n if par_id in input_dict:\n value = input_dict[par_id]\n\n # check type a bit\n if not is_optional and value is None:\n exit_perm_fail(\"Error: no input provided for required parameter {}\".format(str(par_id)))\n if is_array and not isinstance(value, list):\n exit_perm_fail(\"Error: expected an array input value for parameter {}\".format(str(par_id)))\n\n if 'inputBinding' in parameter and value is not None:\n binding = parameter['inputBinding']\n\n # get argument creation settings\n separate = 'separate' not in binding or binding['separate']\n item_separator = binding.get('itemSeparator')\n prefix = binding.get('prefix')\n\n # produce argument\n if is_array:\n if par_type == 'File':\n value = list(map(lambda x: x['path'], value))\n else:\n value = list(map(str, value))\n\n if item_separator:\n value = [item_separator.join(value)]\n else:\n if par_type == 'File':\n value = [value['path']]\n else:\n value = [value]\n\n for val in value:\n if prefix:\n if separate:\n arg.append(prefix)\n arg.append(str(val))\n else:\n arg.append(prefix + str(val))\n else:\n arg.append(str(val))\n\n # put it in the right place\n if 'position' in binding:\n position = int(binding['position'])\n\n return position, arg", "def __init__(self, namespace, listOfArgumentNames):\n self.namespace = namespace\n self.listOfArgumentNames = listOfArgumentNames", "def __init__(self, type, _command_input):\n self._command_input = _command_input\n \"\"\"\n Represents the command\n \"\"\"\n self._command = type", "def __init__(self, command, target: str):\n self.command = command\n self.target = target", "def _wrap_FunctionDefArgument(self, expr):\n var = expr.var\n name = var.name\n self.scope.insert_symbol(name)\n collisionless_name = self.scope.get_expected_name(var.name)\n if var.is_ndarray or var.is_optional:\n new_var = Variable(BindCPointer(), self.scope.get_new_name(f'bound_{name}'),\n is_argument = True, is_optional = False, memory_handling='alias')\n arg_var = var.clone(collisionless_name, is_argument = False, is_optional = False,\n memory_handling = 'alias', allows_negative_indexes=False)\n self.scope.insert_variable(arg_var)\n else:\n new_var = var.clone(collisionless_name)\n self.scope.insert_variable(new_var)\n\n return BindCFunctionDefArgument(new_var, value = expr.value, original_arg_var = expr.var,\n kwonly = expr.is_kwonly, annotation = expr.annotation, scope=self.scope)", "def __init__(self, command: str):\n\n # Assign attributes\n self.command_str: str = command\n self.name: str = \"\"\n self.arg: str = \"\"\n\n # Parse the command\n self.parse_command()", "def __init__(self, *terms, op=None):\n\n if not op:\n assert len(terms) == 1\n\n # assign parameters\n self.op = op\n self.terms = terms", "def from_param(cls, arg):\n return cls(arg)", "def arg(\n default=MISSING,\n /,\n *,\n flag=None,\n factory=MISSING,\n init=True,\n repr=True, # noqa: A002\n hash=None, # noqa: A002\n help=None, # noqa: A002\n compare=True,\n metadata=None,\n):\n metadata = metadata or {}\n for k, v in {'flag': flag, 'help': help}.items():\n if v:\n metadata = metadata | {k: v}\n return field( # type: ignore[call-overload]\n default=default,\n default_factory=factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata)", "def add_argument(self, *args, **kwargs):\n self.arguments[args[0]] = self._Argument(*args, **kwargs)", "def __init__(self, resource, *args):\n self.args = list(args)\n self.flags = OrderedDict()\n self.additional_flags = []\n self._AddCommonFlags(resource)", "def __init__(self):\n self._OPERATION = None", "def new(self, gen_op_or_value, context=None):\n return self.CoroutineOperation.new(gen_op_or_value, self.queue, context)", "def __init__(self, *args):\n this = _ida_hexrays.new_operand_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, operations = []):\n self.operations = operations", "def __init__(__self__, *,\n operation: Optional[pulumi.Input[str]] = None,\n property: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None):\n if operation is not None:\n pulumi.set(__self__, \"operation\", operation)\n if property is not None:\n pulumi.set(__self__, \"property\", property)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "def addop(name, fields, args=None, alias=False):\n\n namespace = {\"fields\": fields, \"alias\": alias}\n\n if args is not None:\n namespace[\"args\"] = args\n\n # Dynamically create the \"name\" object\n type(name, (mn_pinky,), namespace)", "def arg(\n cls,\n *flags: str,\n default: Any = EMPTY,\n choices: Sequence[Any] = None,\n help: str = None, # pylint: disable=redefined-builtin\n metavar: str = None,\n ) -> \"Argument\":\n return cls(\n *flags, default=default, choices=choices, help_text=help, metavar=metavar\n )", "def add(self, **kwargs) -> None:\n self.append(Operation(**kwargs))", "def __init__(self):\n\n self.arg = None\n self.output = None", "def __init__(self, variable, reference, operator):\n self._variable = variable\n self._reference = reference\n self._operator = operator", "def __init__(self, a=1.0, name='Id'):\n super(IdentityExpression, self).__init__(name=name)\n ## Factor to multiply the argument with.\n self.a = a", "def _gen_def(self):\n attributes = self.attributes()\n self._def = proto_util.make_operator_def_cpp(\n name=attributes.get('name', 'Op'),\n cache_key=self._cache_key,\n op_type=attributes['op_type'],\n device_option=proto_util.get_device_option(\n self._device.type,\n self._device.index,\n self._seed,\n ),\n **attributes['arguments']\n )", "def __init__(self, op, symbolicExpression1, symbolicExpression2):\n\n SymbolicExpression.__init__(self)\n \n self.op = op\n self.symbolicExpression1 = symbolicExpression1\n self.symbolicExpression2 = symbolicExpression2", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def __init__(self, *args):\n this = _libsbml.new_Parameter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n self.inputs = []\r\n self.op = None\r\n self.const_attr = None\r\n self.name = \"\"\r\n self.dtype = None", "def __init__(self, *args, **kwargs):\n layer_kwargs = lbann.Layer.__init__.__kwdefaults__.copy()\n op_kwargs = {}\n for key, value in kwargs.items():\n if key in layer_kwargs:\n layer_kwargs[key] = value\n else:\n op_kwargs[key] = value\n layer_kwargs['ops'] = [ operator_class(**op_kwargs) ]\n OperatorLayer.__init__(self, *args, **layer_kwargs)", "def __init__(self, operand_string):\n\n # String to hold the operand literal\n self.op_string = operand_string\n\n # Integer value of the operand\n self.op_value = int(operand_string)", "def __init__(self, arg_list: List[_Argument]):\n self.arg_list: List[_Argument] = arg_list", "def __init__(self,\n time: Timestamp,\n duration: Duration,\n operation: ops.Operation) -> None:\n self.time = time\n self.duration = duration\n self.operation = operation", "def __init__(self, name, args):\n self._proc = None\n self._args = [f\"/{name}\"]\n self._args.extend(args)", "def __init__(self, arg: ast3.arg) -> None:\n self.typed = False\n self.line = arg.lineno\n self.column = arg.col_offset\n self.name = arg.arg\n self.type = None\n self.type_line = -1\n self.type_column = -1\n if arg.annotation:\n anno = arg.annotation\n self.typed = True\n self.type = Annotation(anno) # type: ignore\n self.type_line = anno.lineno\n self.type_column = anno.col_offset", "def _create_fake_operation(resource_link, verb, name):\n return {\n 'targetLink': resource_link,\n 'operationType': verb,\n 'name': name,\n 'status': 'DONE',\n 'progress': 100,\n }", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or list of Operators')\n \n # build domain and range\n self.n = len(self.ops)\n op_range = []\n op_domain = []\n for idx in range(self.n):\n op_domain += [self.ops[idx].domain]\n op_range += [self.ops[idx].range]\n \n super(Dstack, self).__init__(domain=superVector(op_domain), range=superVector(op_range))", "def register_operation(self, name, result, args, kwargs):\r\n if not isinstance(result, autodiff.tensor.Tensor):\r\n result = autodiff.tensor.Tensor(result, graph=self)\r\n args = [x if isinstance(x, autodiff.tensor.Tensor) \r\n else autodiff.tensor.Tensor(x, graph=self) for x in args]\r\n self.operation_map[result.id] = Operation(name, result, args, kwargs)", "def __init__(self, name, c_arg):\n super().__init__(name)\n self._c_arg = c_arg", "def __init__(self, operation_inputs):\n\n full_operation_name = ctx.operation.name\n self.operation_name = full_operation_name.split('.').pop()\n\n # These should not make their way into the Operation inputs.\n os.environ['_PAGINATION_OFFSET'] = \\\n text_type(operation_inputs.pop('pagination_offset', 0))\n os.environ['_PAGINATION_SIZE'] = \\\n text_type(operation_inputs.pop('pagination_size', 1000))\n\n # cloudify client\n self.client_config = get_desired_value(\n 'client', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n if self.client_config:\n self.client = CloudifyClient(**self.client_config)\n else:\n self.client = manager.get_rest_client()\n\n # plugins\n self.plugins = get_desired_value(\n 'plugins', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # secrets\n self.secrets = get_desired_value(\n 'secrets', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # resource_config\n self.config = get_desired_value(\n 'resource_config', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties)\n\n # Blueprint-related properties\n self.blueprint = self.config.get('blueprint', {})\n self.blueprint_id = self.blueprint.get('id') or ctx.instance.id\n self.blueprint_file_name = self.blueprint.get('main_file_name')\n self.blueprint_archive = self.blueprint.get('blueprint_archive')\n\n # Deployment-related properties\n self.deployment = self.config.get('deployment', {})\n self.deployment_id = self.deployment.get('id') or ctx.instance.id\n self.deployment_inputs = self.deployment.get('inputs', {})\n self.deployment_outputs = self.deployment.get('outputs')\n self.deployment_all_outputs = self.deployment.get('all_outputs', True)\n self.deployment_logs = self.deployment.get('logs', {})\n\n # Node-instance-related properties\n self.node_instance_proxy = self.config.get('node_instance')\n\n # Execution-related properties\n self.workflow_id = \\\n operation_inputs.get('workflow_id',\n 'create_deployment_environment')\n self.workflow_state = \\\n operation_inputs.get(\n 'workflow_state',\n 'terminated')\n self.reexecute = \\\n self.config.get('reexecute') \\\n or ctx.instance.runtime_properties.get('reexecute') \\\n or False\n\n # Polling-related properties\n self.interval = operation_inputs.get('interval', POLLING_INTERVAL)\n self.state = operation_inputs.get('state', 'terminated')\n self.timeout = operation_inputs.get('timeout', EXECUTIONS_TIMEOUT)\n\n # This ``execution_id`` will be set once execute workflow done\n # successfully\n self.execution_id = None", "def __init__(self, operations=None):\n\n if operations is None:\n operations = self.default_operations\n\n if None in operations:\n operations.update(self.default_operations)\n\n self.operations = operations\n self.special = [\"(\", \")\", \",\"]", "def _register_operation(self, **operation):\n name = operation[\"name\"]\n if name in self.operations:\n raise ValueError(\"operation name already registered: {}\".format(name))\n self.operations[name] = _Operation({**operation, \"resource\": self})", "def __init__(self, operation_string):\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()", "def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper", "def add_argument(self, *args, **kwds):\n # no argument to add to stack\n if not args:\n return self\n\n # consume Command objects if exists\n if isinstance(args[0], Command):\n self._arg_stack.extend(args[0]._arg_stack)\n target = args[0]\n return self.add_argument(*args[1:], **kwds)\n\n # stack args, kwds to pass to parser.add_argument\n self._arg_stack.append(('normal', args, kwds))\n return self", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n action: Optional[pulumi.Input[str]] = None,\n layer_name: Optional[pulumi.Input[str]] = None,\n organization_id: Optional[pulumi.Input[str]] = None,\n principal: Optional[pulumi.Input[str]] = None,\n statement_id: Optional[pulumi.Input[str]] = None,\n version_number: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)", "def create_from_arg_string(cls, arg_string):\n return cls()", "def __init__(__self__,\n resource_name: str,\n args: LayerVersionPermissionArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def from_parameter(cls, name: str, parameter: inspect.Parameter) -> \"Argument\":\n # pylint: disable=too-many-branches,too-many-statements\n positional = parameter.kind is not parameter.KEYWORD_ONLY\n type_ = parameter.annotation\n default = parameter.default\n flag = name.upper() if positional else f\"--{name.replace('_', '-')}\"\n\n # If field is assigned an Argument use that as the starting point\n if isinstance(default, Argument):\n instance = default\n default = EMPTY\n if flag not in instance.name_or_flags:\n instance.name_or_flags = (flag,) + instance.name_or_flags\n else:\n instance = cls(flag)\n\n # Start updating kwargs\n kwargs = instance.kwargs\n if default is not EMPTY:\n kwargs.setdefault(\"default\", default)\n\n # Handle type variances\n origin = getattr(type_, \"__origin__\", None)\n if origin is not None:\n type_ = cls._handle_generics(origin, type_, positional, kwargs)\n elif isinstance(type_, type):\n type_ = cls._handle_types(type_, positional, kwargs)\n elif isinstance(type_, argparse.FileType):\n pass # Just pass as this is an `argparse` builtin\n else:\n raise TypeError(f\"Unsupported type: {type_!r}\")\n\n if type_:\n kwargs[\"type\"] = type_\n\n return instance", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def argument(self, *name_or_flags, **kwargs):\n return self.parser.add_argument(*name_or_flags, **kwargs)", "def __init__(self, namespace, command, path=\"\"):\n super().__init__(namespace, command, path)\n\n # dfuse options\n self.mount_dir = BasicParameter(None, position=0)\n self.pool = BasicParameter(None, position=1)\n self.cont = BasicParameter(None, position=2)\n self.sys_name = FormattedParameter(\"--sys-name {}\")\n self.thread_count = FormattedParameter(\"--thread-count {}\")\n self.eq_count = FormattedParameter(\"--eq-count {}\")\n self.singlethreaded = FormattedParameter(\"--singlethread\", False)\n self.foreground = FormattedParameter(\"--foreground\", False)\n self.enable_caching = FormattedParameter(\"--enable-caching\", False)\n self.enable_wb_cache = FormattedParameter(\"--enable-wb-cache\", False)\n self.disable_caching = FormattedParameter(\"--disable-caching\", False)\n self.disable_wb_cache = FormattedParameter(\"--disable-wb-cache\", False)\n self.multi_user = FormattedParameter(\"--multi-user\", False)", "def __init__(__self__, *,\n action: pulumi.Input[str],\n layer_name: pulumi.Input[str],\n principal: pulumi.Input[str],\n statement_id: pulumi.Input[str],\n version_number: pulumi.Input[int],\n organization_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"action\", action)\n pulumi.set(__self__, \"layer_name\", layer_name)\n pulumi.set(__self__, \"principal\", principal)\n pulumi.set(__self__, \"statement_id\", statement_id)\n pulumi.set(__self__, \"version_number\", version_number)\n if organization_id is not None:\n pulumi.set(__self__, \"organization_id\", organization_id)", "def __add_arguments__(cls, parser):", "def remote_createParameter(self, name, value):\r\n return Parameter(self, name, value)", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def __init__(self, command: Optional[List[str]] = None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.docs_command = DocsCommandContext()\n self.command = command or self.docs_command.sub_commands\n self.command_string = self.docs_command.sub_command_string\n self.command_callback = self.docs_command.command_callback", "def __init__(self, raw_directive: Dict):\n self.name: str = raw_directive.get(\"name\")\n self.description: str = raw_directive.get(\"description\")\n self.locations: List[str] = raw_directive.get(\"locations\", [])\n self.args: Dict[str, Argument] = Schema.parse_arguments(raw_directive.get(\"args\", []))", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or Vstack')\n \n # check range\n self.n = len(self.ops)\n op_range = []\n for idx in range(self.n):\n if idx < self.n - 1:\n if not self.ops[idx].domain.checkSame(self.ops[idx + 1].domain):\n raise ValueError('Domain incompatibility between Op %d and Op %d' % (idx, idx + 1))\n op_range += [self.ops[idx].range]\n \n super(Vstack, self).__init__(domain=self.ops[0].domain, range=superVector(op_range))", "def __init__(self, raw_arg: Dict):\n self.name = raw_arg.get(\"name\")\n self.description = raw_arg.get(\"description\")\n self.type = TypeDefer(raw_arg.get(\"type\")) if raw_arg.get(\"type\") is not None else None\n self.default_value = raw_arg.get(\"defaultValue\")", "def _init_instruction(cls, instruction):\n # Convert circuit to an instruction\n if isinstance(instruction, QuantumCircuit):\n instruction = instruction.to_instruction()\n # Initialize an identity superoperator of the correct size\n # of the circuit\n op = SuperOp(np.eye(4**instruction.num_qubits))\n op._append_instruction(instruction)\n return op", "def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)", "def from_operator(operation=debug):\r\n\r\n def C(*things):\r\n return Container(freezed(operation), list(things), [], [], [], [])\r\n return C", "def __init__(self, operation, left, right):\n self.operation = operation\n self.left = left\n self.right = right", "def __init__(self, name, result, args, kwargs):\r\n self.name = name\r\n self.result = result\r\n self.args = args\r\n self.kwargs = kwargs", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or Hstack')\n \n # check domain\n self.n = len(self.ops)\n domain = []\n for idx in range(self.n):\n if idx < self.n - 1:\n if not self.ops[idx].range.checkSame(self.ops[idx + 1].range):\n raise ValueError('Range incompatibility between Op %d and Op %d' % (idx, idx + 1))\n domain += [self.ops[0].domain]\n super(Hstack, self).__init__(domain=superVector(domain), range=self.ops[0].range)", "def __init__(self, blip_data, context):\n super(OpBasedDocument, self).__init__(blip_data)\n self.__context = context", "def __init__(self, *args, name=''):\n from collections import Iterable\n if len(args) == 1:\n if isinstance(args[0], Point):\n self.data = args[0].data.copy()\n elif isinstance(args[0], Iterable):\n self.data = list(args[0])\n else:\n self.data = list(args)\n self.name = name if not name.isspace() else ''", "def __init__(self, args, kwargs):\n self._args_dec = list(args)\n self._kwargs_dec = dict(kwargs)", "def __init__(self, args):\n self.args = args", "def to_op(self):\n raise NotImplementedError", "def test_operation_kwarg(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"Coherent(alpha=-0.3+2j) | 0\\n\")\n assert bb.operations == [\n {\"modes\": [0], \"op\": \"Coherent\", \"args\": [], \"kwargs\": {\"alpha\": -0.3 + 2j}}\n ]", "def build(self, python_type, op):\n collection = self.fold_args(op.arg, op)\n if collection:\n typename = python_type.__name__\n typ = (typename, collection.types)\n try:\n value = python_type(collection.values)\n except TypeError as e:\n raise ConstantError(f'TypeError: {e.args[0]}', op) from e\n elements = collection.elements\n self.push(_Constant(typ, value, elements, op))", "def __init__(self, node_def, op, message, error_code):\n ..." ]
[ "0.6846367", "0.6258833", "0.6123263", "0.6116206", "0.6112215", "0.6097888", "0.60650367", "0.6055484", "0.6029025", "0.5997257", "0.5876527", "0.58544487", "0.5838185", "0.5809771", "0.5809771", "0.5809771", "0.5809771", "0.5809771", "0.57846487", "0.57816875", "0.5754001", "0.57504725", "0.57318413", "0.5720162", "0.5719573", "0.571905", "0.5718642", "0.57031274", "0.56843585", "0.56638104", "0.56481886", "0.5625258", "0.56244814", "0.5608897", "0.55910236", "0.55887145", "0.55811596", "0.55689365", "0.5555134", "0.5552227", "0.55473983", "0.55435157", "0.5535876", "0.55059177", "0.54836017", "0.5483035", "0.5476765", "0.5469377", "0.5462126", "0.5453183", "0.54452884", "0.5434539", "0.5429375", "0.5427742", "0.5418356", "0.54077226", "0.5386582", "0.53571844", "0.5355017", "0.53521353", "0.5337008", "0.5328642", "0.53278196", "0.5318915", "0.5300338", "0.5295216", "0.5293073", "0.52906966", "0.528147", "0.5277674", "0.5277109", "0.52706677", "0.5266806", "0.52587265", "0.52524006", "0.52516425", "0.5247403", "0.5247339", "0.52465075", "0.5244823", "0.5235799", "0.52349097", "0.523444", "0.5227283", "0.52180195", "0.52075046", "0.5207387", "0.52035016", "0.5187354", "0.51847947", "0.5181874", "0.51817095", "0.5179038", "0.51712835", "0.5158707", "0.51487756", "0.51429754", "0.5135153", "0.513243", "0.51286846" ]
0.5423515
54
Instantiate a new Operation.
def __init__(self, field: "SchemaTypeField", settings: Settings): from qlient import helpers self.settings = settings self.name = field.name self.description = field.description self.arguments = helpers.adapt_arguments(field.args) self.return_type = field.type self._return_fields: Union[Tuple[SelectedField], None] = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n self.operation = eval(self._get_op_str())\n self.args = {'operation': self.operation.__name__, 'constargs': constargs, 'randomargs': randomargs}", "def _create_operation(self,\n identifier,\n arguments=None,\n return_type=None,\n extended_attributes=None,\n node=None):\n if not return_type:\n return_type = self._create_type('void')\n elif isinstance(return_type, str):\n return_type = self._create_type(return_type)\n if isinstance(extended_attributes, dict):\n extended_attributes = self._create_extended_attributes(\n extended_attributes)\n debug_info = self._build_debug_info(node) if node else None\n\n return Operation.IR(\n identifier,\n arguments=(arguments or []),\n return_type=return_type,\n extended_attributes=extended_attributes,\n component=self._component,\n debug_info=debug_info)", "def __init__(self, operations = []):\n self.operations = operations", "def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand", "def __init__(self, op, op_param_list, op_reg_list):\n self. operation = {\n 'op': op,\n 'op_param_list': op_param_list,\n 'op_reg_list': op_reg_list\n }", "async def instantiate(\n self,\n operation: Operation,\n config: BaseConfig,\n *,\n opimp: OperationImplementation = None,\n ) -> bool:\n if opimp is None:\n if await self.instantiable(operation):\n opimp = OperationImplementation.load(operation.name)\n else:\n raise OperationImplementationNotInstantiable(operation.name)\n # Set the correct instance_name\n opimp = copy.deepcopy(opimp)\n opimp.op = operation\n self.operations[\n operation.instance_name\n ] = await self._stack.enter_async_context(opimp(config))", "def __init__(self):\n self._OPERATION = None", "def __init__(self, op, value):\n self.op = op\n self.value = value", "def __init__(self, op, expression1, expression2):\n LinearExpression.__init__(self)\n\n self.op = op\n self.expression1 = expression1\n self.expression2 = expression2", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def new(self, gen_op_or_value, context=None):\n return self.CoroutineOperation.new(gen_op_or_value, self.queue, context)", "def from_operator(operation=debug):\r\n\r\n def C(*things):\r\n return Container(freezed(operation), list(things), [], [], [], [])\r\n return C", "def __init__(self, operations=None):\n\n if operations is None:\n operations = self.default_operations\n\n if None in operations:\n operations.update(self.default_operations)\n\n self.operations = operations\n self.special = [\"(\", \")\", \",\"]", "def __init__(self, operation_string):\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()", "def __init__(self, op1, op2, transfer_tags=True):\r\n self.op1 = op1\r\n self.op2 = op2\r\n self.transfer_tags = transfer_tags", "def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper", "def __init__(self, operation, left, right):\n self.operation = operation\n self.left = left\n self.right = right", "def __init__(self):\n\n self.operations = {}", "def make(self):\n return make_operation_space()", "def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)", "def from_dict(cls, dikt) -> 'Operations':\n return util.deserialize_model(dikt, cls)", "def __init__(self,\n time: Timestamp,\n duration: Duration,\n operation: ops.Operation) -> None:\n self.time = time\n self.duration = duration\n self.operation = operation", "def create_ops(self):\n return self._create_ops", "def __init__(self, op, symbolicExpression1, symbolicExpression2):\n\n SymbolicExpression.__init__(self)\n \n self.op = op\n self.symbolicExpression1 = symbolicExpression1\n self.symbolicExpression2 = symbolicExpression2", "def __init__(self, *terms, op=None):\n\n if not op:\n assert len(terms) == 1\n\n # assign parameters\n self.op = op\n self.terms = terms", "def _init_instruction(cls, instruction):\n # Convert circuit to an instruction\n if isinstance(instruction, QuantumCircuit):\n instruction = instruction.to_instruction()\n # Initialize an identity superoperator of the correct size\n # of the circuit\n op = SuperOp(np.eye(4**instruction.num_qubits))\n op._append_instruction(instruction)\n return op", "async def instantiable(\n self, operation: Operation, *, opimp: OperationImplementation = None\n ) -> bool:\n # This is pure Python, so if we're given an operation implementation we\n # will be able to instantiate it\n if opimp is not None:\n return True\n try:\n opimp = OperationImplementation.load(operation.name)\n except FailedToLoadOperationImplementation as error:\n self.logger.debug(\n \"OperationImplementation %r is not instantiable: %s\",\n operation.name,\n error,\n )\n return False\n return True", "def CreatePersistentOp(op_def):\n CreatePersistentOpCC(_stringify_proto(op_def))", "def __init__(self, op_name, attr_key, attr_value):\n self.op = relay.op.get(op_name)\n self.attr_key = attr_key\n self.attr_value = attr_value", "def some_operation(self):\n\n # Call the factory method to create a Product object.\n product = self.factory_method()\n\n # Now, use the product.\n product.operation()", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def __init__(self, **kwargs):\n\n super(RefactoringOperation, self).__init__(**kwargs)", "def operation_class(self):\n clazzname = self.name.title() + \"Op\"\n if clazzname == \"NopOp\":\n clazz = BaseOp\n else:\n clazz = globals()[clazzname]\n return clazz", "def __init__(self):\n self.inputs = []\n self.op = None\n self.const_attr = None\n self.name = \"\"", "def __init__(self):\r\n self.operation_map = {}", "def add(self, **kwargs) -> None:\n self.append(Operation(**kwargs))", "def _register_operation(self, **operation):\n name = operation[\"name\"]\n if name in self.operations:\n raise ValueError(\"operation name already registered: {}\".format(name))\n self.operations[name] = _Operation({**operation, \"resource\": self})", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def _op(op):\n def _process(self, ty, args=None, result=None, **metadata):\n if args is None:\n args = []\n assert ty is not None\n assert isinstance(args, list), args\n assert not any(arg is None for arg in flatten(args)), args\n result = Op(op, ty, args, result)\n if metadata:\n result.add_metadata(metadata)\n self._insert_op(result)\n return result\n\n def _process_void(self, *args, **kwds):\n result = kwds.pop('result', None)\n op = _process(self, types.Void, list(args), result)\n if kwds:\n op.add_metadata(kwds)\n return op\n\n if ops.is_void(op):\n build_op = _process_void\n else:\n build_op = _process\n\n if config.op_verify:\n build_op = op_verifier(build_op)\n\n return build_op", "def _new_instance(self):\n return self.__class__(self._fmodule, self._tensor_rank)", "def __init__(self, *args, **kwargs):\n layer_kwargs = lbann.Layer.__init__.__kwdefaults__.copy()\n op_kwargs = {}\n for key, value in kwargs.items():\n if key in layer_kwargs:\n layer_kwargs[key] = value\n else:\n op_kwargs[key] = value\n layer_kwargs['ops'] = [ operator_class(**op_kwargs) ]\n OperatorLayer.__init__(self, *args, **layer_kwargs)", "async def add_operation(self, account: Account, currency: Currency, amount: Decimal):\n return await Operation.create(\n document=self.id,\n account=account.id,\n currency=currency.id,\n amount=amount\n )", "def __init__(self, dataset: List, ops: Callable):\n self.dataset = dataset\n self.ops = ops", "def _create_table_operation_aspect(self, table: Table) -> OperationClass:\n\n reported_time = int(time.time() * 1000)\n\n operation = OperationClass(\n timestampMillis=reported_time,\n lastUpdatedTimestamp=int(table.created_at.timestamp() * 1000),\n actor=make_user_urn(table.created_by),\n operationType=OperationTypeClass.CREATE,\n )\n\n if table.updated_at and table.updated_by is not None:\n operation = OperationClass(\n timestampMillis=reported_time,\n lastUpdatedTimestamp=int(table.updated_at.timestamp() * 1000),\n actor=make_user_urn(table.updated_by),\n operationType=OperationTypeClass.UPDATE,\n )\n\n return operation", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, name=None, description=None):\n super().__init__()\n self.name = name or getattr(self, \"name\", type(self).__name__.lower())\n self.description = description or getattr(self, \"description\", None) or self.__doc__ or self.__class__.__name__\n self.operations = {}\n for function in (attr for attr in (getattr(self, nom) for nom in dir(self)) if callable(attr)):\n try:\n operation = function._roax_operation_\n except:\n continue # ignore undecorated functions\n self._register_operation(**operation)", "def New(*args, **kargs):\n obj = itkCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def _init_network_operation(self, operation, parameters):\n message_id = self.operator.async_remote_call(None, operation, parameters, True)", "def __init__(self, *args):\n this = _ida_hexrays.new_operand_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def new_command(self, content=None):\n return PrimitiveControllerCommand(content)", "def register_operation(self, name, result, args, kwargs):\r\n if not isinstance(result, autodiff.tensor.Tensor):\r\n result = autodiff.tensor.Tensor(result, graph=self)\r\n args = [x if isinstance(x, autodiff.tensor.Tensor) \r\n else autodiff.tensor.Tensor(x, graph=self) for x in args]\r\n self.operation_map[result.id] = Operation(name, result, args, kwargs)", "def __call__(cls, arg: 'OpNode', **kwargs: Any):\n cls._check_arg(arg)\n cls._check_kwargs(kwargs)\n return OpNode(\n op_type=cls,\n arg=arg,\n output_data_type=cls._return_data_type,\n kwargs=kwargs)", "def _gen_def(self):\n attributes = self.attributes()\n self._def = proto_util.make_operator_def_cpp(\n name=attributes.get('name', 'Op'),\n cache_key=self._cache_key,\n op_type=attributes['op_type'],\n device_option=proto_util.get_device_option(\n self._device.type,\n self._device.index,\n self._seed,\n ),\n **attributes['arguments']\n )", "def create() -> 'Tokenizer':\n token_op_table = [\n EOS,\n op.Concat,\n op.ConstStr,\n op.SubStr,\n op.GetSpan,\n op.Trim,\n ]\n\n # Nesting operators and their args get \"compacted\" into\n # \"primitive\" tokens\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetToken, type_, index))\n\n for case in op.Case:\n token_op_table.append((op.ToCase, case))\n\n for delim1 in op.DELIMITER:\n for delim2 in op.DELIMITER:\n token_op_table.append((op.Replace, delim1, delim2))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetUpto, dsl_regex))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetFrom, dsl_regex))\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetFirst, type_, index))\n\n for type_ in op.Type:\n token_op_table.append((op.GetAll, type_))\n\n # Primitive types\n\n for type_ in op.Type:\n token_op_table.append(type_)\n\n for boundary in op.Boundary:\n token_op_table.append(boundary)\n\n # Covers op.INDEX\n for position in range(op.POSITION[0], op.POSITION[1]+1):\n token_op_table.append(position)\n\n # This covers op.DELIMITER\n for character in op.CHARACTER:\n token_op_table.append(character)\n\n token_op_table = {\n token: op\n for token, op in enumerate(token_op_table)\n }\n\n op_token_table = {\n op: token\n for token, op in token_op_table.items()\n }\n\n assert len(token_op_table) == len(op_token_table)\n\n string_token_table = {\n char: token\n for token, char in enumerate(op.CHARACTER)\n }\n\n return Tokenizer(\n token_op_table=token_op_table,\n op_token_table=op_token_table,\n string_token_table=string_token_table,\n )", "def __init__(self, opcode):\n self.opcode = opcode", "def __init__(self, operand_string):\n\n # String to hold the operand literal\n self.op_string = operand_string\n\n # Integer value of the operand\n self.op_value = int(operand_string)", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or list of Operators')\n \n # build domain and range\n self.n = len(self.ops)\n op_range = []\n op_domain = []\n for idx in range(self.n):\n op_domain += [self.ops[idx].domain]\n op_range += [self.ops[idx].range]\n \n super(Dstack, self).__init__(domain=superVector(op_domain), range=superVector(op_range))", "def AddOperation(self, op):\n self._operations.append(op)", "def _new(cls, rep, shape, domain):\n cls._check(rep, shape, domain)\n obj = object.__new__(cls)\n obj.rep = rep\n obj.shape = obj.rows, obj.cols = shape\n obj.domain = domain\n return obj", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)", "def restore_operation(cls, operation_record):\n classname = operation_record[\"OPE_TYPE\"]\n module = \"\" #TODO Implement modulename from database if Operation belongs to Module\n is_operation_of_module = False\n exec \"\"\"\ntry:\n type(%(class)s)\nexcept NameError,e:\n is_operation_of_module = True\"\"\"%{'class':classname}\n\n if is_operation_of_module:\n exec \"\"\"\nfrom %(module)s import %(class)s\noperation = %(class)s(cls._core)\"\"\"%{'class':classname,'module':module}\n else:\n exec \"\"\"\noperation = %(class)s(cls._core)\"\"\"%{'class':classname}\n\n operation.set_id(operation_record['OPE_ID'])\n db = cls._core.get_db()\n stmnt = \"SELECT OPD_KEY, OPD_VALUE, OPD_TYPE FROM OPERATIONDATA WHERE OPD_OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation_record[\"OPE_ID\"],))\n for row in cur.fetchallmap():\n val = row[\"OPD_VALUE\"]\n exec \"\"\"val = %s(val)\"\"\"%row[\"OPD_TYPE\"]\n operation.set_value(row[\"OPD_KEY\"], val)\n return operation", "def __init__(self, op_complete_url, op_key, i, tp, sparql_http_method, addon):\n self.url_parsed = urlsplit(op_complete_url)\n self.op_url = self.url_parsed.path\n self.op = op_key\n self.i = i\n self.tp = tp\n self.sparql_http_method = sparql_http_method\n self.addon = addon\n\n self.operation = {\"=\": eq, \"<\": lt, \">\": gt}\n\n self.dt = DataType()", "def make_operation_space():\n operation_space = {}\n\n # Set integInfo and integBranch\n operation_space['prepare_delenv'] = rmdmod.PrepareDelEnvOperation()\n\n # Call p4 integ for delete revisions\n operation_space['call_p4_integ'] = rmdmod.CallIntegOperation()\n\n # checkout README and place into a pending cln\n operation_space['create_changelist'] = rmdmod.CreateChangelistOperation()\n\n # open file for edit within changelist\n operation_space['reopen'] = rmdmod.ReopenOperation()\n\n # list history of deleted files\n operation_space['list_history'] = rmdmod.ListDelHistoryOperation()\n\n return operation_space", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def __init__(self):\r\n self.inputs = []\r\n self.op = None\r\n self.const_attr = None\r\n self.name = \"\"\r\n self.dtype = None", "def New(*args, **kargs):\n obj = itkSingleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or Hstack')\n \n # check domain\n self.n = len(self.ops)\n domain = []\n for idx in range(self.n):\n if idx < self.n - 1:\n if not self.ops[idx].range.checkSame(self.ops[idx + 1].range):\n raise ValueError('Range incompatibility between Op %d and Op %d' % (idx, idx + 1))\n domain += [self.ops[0].domain]\n super(Hstack, self).__init__(domain=superVector(domain), range=self.ops[0].range)", "def __init__(__self__, *,\n operation: Optional[pulumi.Input[str]] = None,\n property: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None):\n if operation is not None:\n pulumi.set(__self__, \"operation\", operation)\n if property is not None:\n pulumi.set(__self__, \"property\", property)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "def __init__(self, operation_inputs):\n\n full_operation_name = ctx.operation.name\n self.operation_name = full_operation_name.split('.').pop()\n\n # These should not make their way into the Operation inputs.\n os.environ['_PAGINATION_OFFSET'] = \\\n text_type(operation_inputs.pop('pagination_offset', 0))\n os.environ['_PAGINATION_SIZE'] = \\\n text_type(operation_inputs.pop('pagination_size', 1000))\n\n # cloudify client\n self.client_config = get_desired_value(\n 'client', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n if self.client_config:\n self.client = CloudifyClient(**self.client_config)\n else:\n self.client = manager.get_rest_client()\n\n # plugins\n self.plugins = get_desired_value(\n 'plugins', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # secrets\n self.secrets = get_desired_value(\n 'secrets', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # resource_config\n self.config = get_desired_value(\n 'resource_config', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties)\n\n # Blueprint-related properties\n self.blueprint = self.config.get('blueprint', {})\n self.blueprint_id = self.blueprint.get('id') or ctx.instance.id\n self.blueprint_file_name = self.blueprint.get('main_file_name')\n self.blueprint_archive = self.blueprint.get('blueprint_archive')\n\n # Deployment-related properties\n self.deployment = self.config.get('deployment', {})\n self.deployment_id = self.deployment.get('id') or ctx.instance.id\n self.deployment_inputs = self.deployment.get('inputs', {})\n self.deployment_outputs = self.deployment.get('outputs')\n self.deployment_all_outputs = self.deployment.get('all_outputs', True)\n self.deployment_logs = self.deployment.get('logs', {})\n\n # Node-instance-related properties\n self.node_instance_proxy = self.config.get('node_instance')\n\n # Execution-related properties\n self.workflow_id = \\\n operation_inputs.get('workflow_id',\n 'create_deployment_environment')\n self.workflow_state = \\\n operation_inputs.get(\n 'workflow_state',\n 'terminated')\n self.reexecute = \\\n self.config.get('reexecute') \\\n or ctx.instance.runtime_properties.get('reexecute') \\\n or False\n\n # Polling-related properties\n self.interval = operation_inputs.get('interval', POLLING_INTERVAL)\n self.state = operation_inputs.get('state', 'terminated')\n self.timeout = operation_inputs.get('timeout', EXECUTIONS_TIMEOUT)\n\n # This ``execution_id`` will be set once execute workflow done\n # successfully\n self.execution_id = None", "def __init__(self, node_def, op, message, error_code):\n ...", "def create(cls, **props: Any) -> 'ResolverOp':\n real_instance = super().__call__()\n cls._check_kwargs(props)\n for name, value in props.items():\n setattr(real_instance, name, value)\n return real_instance", "def create(cls, **props: Any) -> 'ResolverOp':\n real_instance = super().__call__()\n cls._check_kwargs(props)\n for name, value in props.items():\n setattr(real_instance, name, value)\n return real_instance", "def add_operation(session, node_id, operation, operation_sent=None,\n operation_received=None, username='system_user'):\n session = validate_session(session)\n add_oper = Operations(node_id, operation, operation_sent,\n operation_received, username\n )\n if add_oper:\n session.add(add_oper)\n session.commit()\n return add_oper", "def add_operation(self, op):\n\n self.operations[op.name] = op", "def __init__(self, name, operator, values):\n self.name = name\n self.operator = operator\n self.values = values", "def operation(self, operation: str):\n\n self._operation = operation", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def __init__(self, operator=None, vector=None, constant=0):\n if operator is None and vector is None:\n raise ValueError('need to provide at least one of `operator` and '\n '`vector`')\n if operator is not None:\n domain = operator.domain\n elif vector is not None:\n domain = vector.space\n\n if (operator is not None and vector is not None and\n vector not in operator.domain):\n raise ValueError('domain of `operator` and space of `vector` need '\n 'to match')\n\n self.__operator = operator\n self.__vector = vector\n self.__constant = constant\n\n super().__init__(space=domain,\n linear=(operator is None and constant == 0))\n\n if self.constant not in self.range:\n raise ValueError('`constant` must be an element in the range of '\n 'the functional')", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or Vstack')\n \n # check range\n self.n = len(self.ops)\n op_range = []\n for idx in range(self.n):\n if idx < self.n - 1:\n if not self.ops[idx].domain.checkSame(self.ops[idx + 1].domain):\n raise ValueError('Domain incompatibility between Op %d and Op %d' % (idx, idx + 1))\n op_range += [self.ops[idx].range]\n \n super(Vstack, self).__init__(domain=self.ops[0].domain, range=superVector(op_range))", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def __init__(self, root=None, scatter_op=None):\n self.root = root\n self.scatter_op = scatter_op", "def __operate(self, input, op):\n assert isinstance(input, (int, float, Variable.Variable, Polynomial))\n\n if self.poly is None:\n return Polynomial(input=input)\n else:\n newPoly = self.__merge(input, op)\n\n if isinstance(input, Polynomial):\n newPoly.vars = input.vars | self.vars\n elif isinstance(input, Variable.Variable):\n newPoly.vars = {input} | self.vars\n else:\n newPoly.vars = self.vars\n return newPoly", "def __init__(self, variable, reference, operator):\n self._variable = variable\n self._reference = reference\n self._operator = operator", "def __init__(__self__, *,\n action: Optional[pulumi.Input[str]] = None,\n layer_name: Optional[pulumi.Input[str]] = None,\n organization_id: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[str]] = None,\n principal: Optional[pulumi.Input[str]] = None,\n revision_id: Optional[pulumi.Input[str]] = None,\n statement_id: Optional[pulumi.Input[str]] = None,\n version_number: Optional[pulumi.Input[int]] = None):\n if action is not None:\n pulumi.set(__self__, \"action\", action)\n if layer_name is not None:\n pulumi.set(__self__, \"layer_name\", layer_name)\n if organization_id is not None:\n pulumi.set(__self__, \"organization_id\", organization_id)\n if policy is not None:\n pulumi.set(__self__, \"policy\", policy)\n if principal is not None:\n pulumi.set(__self__, \"principal\", principal)\n if revision_id is not None:\n pulumi.set(__self__, \"revision_id\", revision_id)\n if statement_id is not None:\n pulumi.set(__self__, \"statement_id\", statement_id)\n if version_number is not None:\n pulumi.set(__self__, \"version_number\", version_number)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def create(cls, _):\n return cls", "def operation(cls):\n return relationship.many_to_one(cls, 'operation')", "def operation(cls):\n return relationship.many_to_one(cls, 'operation')", "def generate_operation(interface_name,\n result_type_name,\n oper_name,\n arguments,\n result_nullable=False):\n \"\"\" Arguments is a list of argument where each argument is:\n [IDLType, argument_name, optional_boolean] \"\"\"\n\n syn_op = IDLOperation(None, interface_name, oper_name)\n\n syn_op.type = IDLType(None, result_type_name)\n syn_op.type = resolveTypedef(syn_op.type)\n syn_op.type.nullable = result_nullable\n\n for argument in arguments:\n arg = IDLArgument(None, argument[1])\n arg.type = argument[0]\n arg.optional = argument[2] if len(argument) > 2 else False\n syn_op.arguments.append(arg)\n\n return syn_op", "def New(*args, **kargs):\n obj = itkMultipleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMultipleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
[ "0.7434616", "0.7018311", "0.6947794", "0.68689376", "0.68165016", "0.6655647", "0.651799", "0.64971197", "0.6486915", "0.6486268", "0.64711845", "0.6437761", "0.64105827", "0.6380135", "0.63713217", "0.6350577", "0.6326084", "0.6294592", "0.6282748", "0.6282437", "0.6206516", "0.61994714", "0.6089819", "0.6074392", "0.60718703", "0.60575914", "0.60201466", "0.599203", "0.5967572", "0.5946694", "0.59442616", "0.5937558", "0.5936997", "0.5930927", "0.5930045", "0.5926782", "0.587631", "0.5874392", "0.58611566", "0.58559805", "0.5844288", "0.5840646", "0.583885", "0.582869", "0.58283794", "0.5808227", "0.5805806", "0.57892054", "0.576678", "0.576678", "0.576678", "0.576678", "0.576678", "0.5753707", "0.5747174", "0.5697748", "0.5694831", "0.5694667", "0.5688547", "0.5684452", "0.5669718", "0.5663783", "0.5657893", "0.5657028", "0.5656563", "0.56543744", "0.56503856", "0.5643974", "0.5643974", "0.5633591", "0.5611259", "0.56109786", "0.56090355", "0.56037945", "0.5599448", "0.55981505", "0.55943114", "0.5592664", "0.5583382", "0.55822635", "0.55822635", "0.55774343", "0.5577412", "0.55634546", "0.5560979", "0.5537587", "0.5517247", "0.55158424", "0.5512447", "0.5503448", "0.5499321", "0.54931515", "0.5477703", "0.5475641", "0.5475641", "0.5473691", "0.54663986", "0.54663986", "0.54646677", "0.5462818", "0.5462818" ]
0.0
-1
Recursively look up a certain amount of return fields depending on the current recursion depth. The depth can be set via the settings. client = Client("...", settings=Settings(max_recursion_depth=3))
def get_return_fields(self, all_types: "Dict[str, SchemaType]") -> Tuple[SelectedField]: if self._return_fields is None: from qlient import helpers self._return_fields = helpers.adapt_return_fields( self.return_type, all_types, self.settings.max_recursion_depth ) return self._return_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recurse(self):\n url = self._api + '?recursive=1'\n json = self._json(self._get(url), 200)\n return Tree(json, self._session) if json else None", "def depth_limited_search(initial_state, goal_state, limit):\n\n return recursive_dls(createRootNode(initial_state), goal_state, limit)", "def get_recursively(search_dict, field):\n fields_found = []\n for key, value in search_dict.items():\n if key == field:\n fields_found.append(value)\n elif isinstance(value, dict):\n results = get_recursively(value, field)\n for result in results:\n fields_found.append(result)\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n more_results = get_recursively(item, field)\n for another_result in more_results:\n fields_found.append(another_result)\n return fields_found", "def getrecursionlimit(): # real signature unknown; restored from __doc__\n pass", "def build_recursive_traversal_spec(client_factory):\r\n visit_folders_select_spec = build_selection_spec(client_factory,\r\n \"visitFolders\")\r\n # For getting to hostFolder from datacenter\r\n dc_to_hf = build_traversal_spec(client_factory, \"dc_to_hf\", \"Datacenter\",\r\n \"hostFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting to vmFolder from datacenter\r\n dc_to_vmf = build_traversal_spec(client_factory, \"dc_to_vmf\", \"Datacenter\",\r\n \"vmFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting Host System to virtual machine\r\n h_to_vm = build_traversal_spec(client_factory, \"h_to_vm\", \"HostSystem\",\r\n \"vm\", False,\r\n [visit_folders_select_spec])\r\n\r\n # For getting to Host System from Compute Resource\r\n cr_to_h = build_traversal_spec(client_factory, \"cr_to_h\",\r\n \"ComputeResource\", \"host\", False, [])\r\n\r\n # For getting to datastore from Compute Resource\r\n cr_to_ds = build_traversal_spec(client_factory, \"cr_to_ds\",\r\n \"ComputeResource\", \"datastore\", False, [])\r\n\r\n rp_to_rp_select_spec = build_selection_spec(client_factory, \"rp_to_rp\")\r\n rp_to_vm_select_spec = build_selection_spec(client_factory, \"rp_to_vm\")\r\n # For getting to resource pool from Compute Resource\r\n cr_to_rp = build_traversal_spec(client_factory, \"cr_to_rp\",\r\n \"ComputeResource\", \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to child res pool from the parent res pool\r\n rp_to_rp = build_traversal_spec(client_factory, \"rp_to_rp\", \"ResourcePool\",\r\n \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to Virtual Machine from the Resource Pool\r\n rp_to_vm = build_traversal_spec(client_factory, \"rp_to_vm\", \"ResourcePool\",\r\n \"vm\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # Get the assorted traversal spec which takes care of the objects to\r\n # be searched for from the root folder\r\n traversal_spec = build_traversal_spec(client_factory, \"visitFolders\",\r\n \"Folder\", \"childEntity\", False,\r\n [visit_folders_select_spec, dc_to_hf,\r\n dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,\r\n rp_to_rp, h_to_vm, rp_to_vm])\r\n return traversal_spec", "def _getFields(obj, tree=None, retval=None, fileobj=None):\n fieldAttributes = {'/FT': 'Field Type', '/Parent': 'Parent', '/T': 'Field Name', '/TU': 'Alternate Field Name',\n '/TM': 'Mapping Name', '/Ff': 'Field Flags', '/V': 'Value', '/DV': 'Default Value'}\n if retval is None:\n retval = OrderedDict()\n catalog = obj.trailer[\"/Root\"]\n # get the AcroForm tree\n if \"/AcroForm\" in catalog:\n tree = catalog[\"/AcroForm\"]\n else:\n return None\n if tree is None:\n return retval\n\n obj._checkKids(tree, retval, fileobj)\n for attr in fieldAttributes:\n if attr in tree:\n # Tree is a field\n obj._buildField(tree, retval, fileobj, fieldAttributes)\n break\n\n if \"/Fields\" in tree:\n fields = tree[\"/Fields\"]\n for f in fields:\n field = f.getObject()\n obj._buildField(field, retval, fileobj, fieldAttributes)\n\n return retval", "def get_recursively(search_dict, field):\n fields_found = []\n\n for key, value in search_dict.items():\n\n if key == field:\n fields_found.append(value)\n\n elif isinstance(value, dict):\n results = get_recursively(value, field)\n for result in results:\n fields_found.append(result)\n\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n more_results = get_recursively(item, field)\n for another_result in more_results:\n fields_found.append(another_result)\n\n return fields_found", "def _recursive_search(self, current_dict: dict, keys: List[str], max_depth: int, current_depth: int = 0) -> tuple:\n current_key = keys[current_depth]\n if current_depth < max_depth:\n\n if current_key not in current_dict:\n current_dict[current_key] = {}\n\n return self._recursive_search(current_dict[current_key], keys, max_depth, current_depth + 1)\n else:\n return current_key, current_dict", "def get_recursively(search_dict, field):\n fields_found = []\n\n for key, value in search_dict:\n\n if key == field:\n fields_found.append(value)\n\n elif isinstance(value, dict):\n results = get_recursively(value, field)\n for result in results:\n fields_found.append(result)\n\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n more_results = get_recursively(item, field)\n for another_result in more_results:\n fields_found.append(another_result)\n\n return fields_found", "def get_recursively(search_dict):\n fields_found = {}\n\n for key, value in search_dict.items():\n if isinstance(value, OrderedDict):\n results = get_recursively(value)\n for dict_key, dict_result in results.items():\n fields_found[dict_key] = dict_result\n\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n more_results = get_recursively(item)\n local_list = []\n for another_result in more_results:\n local_list.append(another_result)\n\n fields_found[key] = local_list\n else:\n fields_found[key] = value\n\n return fields_found", "def _get_nested(nested_dict, field):\n print(nested_dict, field)\n keys = field.split('.')\n current = nested_dict\n for k in keys:\n print('key', k, 'current', current)\n # return None for nested fields without a value in this doc\n if isinstance(current, list):\n # this list could contain anything. skip objects not containing `k`.\n return [x[k] for x in current if x.get(k) is not None]\n if not k in current:\n current = None\n break\n current = current[k]\n return current", "def get_recursively(self, search_dict, field):\n fields_found = []\n for key, value in iter(search_dict.items()):\n\n if key == field:\n fields_found.append(value)\n\n elif isinstance(value, dict):\n results = self.get_recursively(value, field)\n for result in results:\n fields_found.append(result)\n\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n more_results = self.get_recursively(item, field)\n for another_result in more_results:\n fields_found.append(another_result)\n\n return fields_found", "def searchAndFetch(self, fields={}, mainRequest={}, sideRequests={}, json=False):\n import itertools\n if not 'Form' in mainRequest:\n raise IOError('GISWEB.UTILS ERROR: A kay for the parent form is required!')\n\n mainResults = self.idx.dbsearch(mainRequest)\n # sideResults = dict(<parentId> = dict(<form_name> = [{**kwargs}, ...], ...))\n sideResults = dict()\n for form_name, sideRequest in sideRequests.items():\n if 'Form' not in sideRequest:\n sideRequest['Form'] = form_name\n sideRequest[self.parentKey] = {'query': [i.id for i in mainResults], 'operator': 'or'}\n\n sideResult = self.idx.dbsearch(sideRequest, sortindex=self.parentKey)\n\n for sideRecord in sideResult:\n tmp_dict = dict()\n for i in fields.get(form_name, []):\n try:\n value = sideRecord[i[0]]\n except:\n value = sideRecord.getObject().getItem(i[0], '')\n tmp_dict[i[-1]] = value\n\n if sideRecord[self.parentKey] not in sideResults:\n sideResults[sideRecord[self.parentKey]] = {form_name: [tmp_dict]}\n else:\n if form_name not in sideResults[sideRecord[self.parentKey]]:\n sideResults[sideRecord[self.parentKey]][form_name] = [tmp_dict]\n else:\n sideResults[sideRecord[self.parentKey]][form_name].append(tmp_dict)\n\n for rec in mainResults:\n mainForm = rec.getObject().Form\n tmp_dict = dict([(i[-1], rec.get(i[0]) or rec.getObject().getItem(i[0], '')) for i in fields.get(mainForm, [])])\n if rec.id not in sideResults:\n sideResults[rec.id] = {mainForm: [tmp_dict]}\n else:\n if mainForm not in sideResults[rec.id]:\n sideResults[rec.id][mainForm] = [tmp_dict]\n else:\n sideResults[rec.id][mainForm].append(tmp_dict)\n\n\n # sideResults2 = dict(<plominoId> = [[{**kwargs}, ...], ...], ...)\n sideResults2 = dict()\n for key,value in sideResults.items():\n sideResults2[key] = [x for x in itertools.product(*sideResults[key].values())]\n\n sideResults3 = dict()\n for key,prod in sideResults2.items():\n for lista in prod:\n it = [i.items() for i in lista]\n s = sum(it[1:], it[0])\n v = dict([(key,v) for k,v in s])\n if not key in sideResults3:\n sideResults3[key] = [v]\n else:\n sideResults3[key].append(v)\n\n it = []\n for tt in prod:\n mit = [i.items() for i in tt]\n somma = sum(mit[1:], mit[0])\n it.append(dict([(k,v) for k,v in somma]))\n\n sideResults3[key] = it\n\n aaData = []\n for mainId,v in sideResults3.items():\n\n mainDict = dict()\n if mainRequest['Form'] in fields:\n mainDoc = self.db.getDocument(mainId)\n for x in fields[mainRequest['Form']]:\n mainDict[x[-1]] = mainDoc.getItem(x[0], '')\n\n for i in v:\n it = i.items() + mainDict.items()\n aaRecord = dict()\n for key,value in it:\n# if isinstance(value, Missing.Value): # to be tested\n vtype = '%s' % type(value) # could be better to use isinstance for the test\n if vtype == \"<type 'Missing.Value'>\":\n value = None\n aaRecord[key] = value\n# aaRecord = dict([(key,value) for key,value in it])\n if aaRecord:\n aaData.append(aaRecord)\n\n if json:\n return json_dumps(aaData)\n else:\n return aaData", "def item_recurse(path, index):\n global count\n if index == len(item_keys):\n print(count,path,sep='')\n count +=1\n return\n for filename in db[item_keys[index]]['files']:\n newpath = str.join('-', (path, filename))\n item_recurse(newpath, index+1)", "def recurse_entities(\n input_data, entity_results=[], G=nx.Graph(), current_depth=0, depth=2, limit=3\n):\n if isinstance(input_data, str):\n # Starting fresh. Make sure variables are fresh.\n entity_results = []\n G = nx.Graph()\n current_depth = 0\n if not validators.url(input_data):\n input_data = get_wikipedia_url(input_data)\n if not input_data:\n print(\"No Wikipedia URL Found.\")\n return None, None\n else:\n print(\"Wikipedia URL: \", input_data)\n name = load_page_title(input_data).split(\"-\")[0].strip()\n else:\n name = load_page_title(input_data)\n input_data = (\n [\n {\n \"name\": name.title(),\n \"type\": \"START\",\n \"salience\": 0.0,\n \"wikipedia\": input_data,\n }\n ]\n if input_data\n else []\n )\n\n # Regex for wikipedia terms to not bias entities returned\n subs = r\"(wikipedia|wikimedia|wikitext|mediawiki|wikibase)\"\n\n for d in input_data:\n url = d[\"wikipedia\"]\n name = d[\"name\"]\n\n print(\n \" \" * current_depth + \"Level: {0} Name: {1}\".format(current_depth, name)\n )\n\n html = load_text_from_url(url)\n\n # html_to_text will default to all text if < 4 `p` elements found.\n if \"wikipedia.org\" in url:\n html = html_to_text(html, target_elements=\"p\")\n else:\n html = html_to_text(html)\n\n # Kill brutally wikipedia terms.\n html = re.sub(subs, \"\", html, flags=re.IGNORECASE)\n\n results = [\n r\n for r in google_nlp_entities(\n html, input_type=\"text\", limit=None, result_type=\"wikipedia\"\n )\n if \"wiki\" not in r[\"name\"].lower() and not G.has_node(r[\"name\"])\n ][:limit]\n _ = [G.add_edge(name, r[\"name\"]) for r in results]\n entity_results.extend(results)\n\n new_depth = int(current_depth + 1)\n if results and new_depth <= depth:\n recurse_entities(results, entity_results, G, new_depth, depth, limit)\n\n if current_depth == 0:\n return entity_results, G", "def max_recursion_depth(self) -> ConfigNodePropertyInteger:\n return self._max_recursion_depth", "def test_depth_limit(self):\n with self.assertRaisesRegexp(\n RemoteException,\n r'.*DepthLimitExceeded: Depth limit of 2 ' +\n 'exceeded at localhost -> localhost -> localhost'):\n recursive()", "def test_client_get_domains_pagination(\n mocker, client_all_domains_input, client_all_domains_has_next_input\n):\n\n def mock_return(query, params):\n if params[\"after\"] == \"abc\":\n return client_all_domains_input\n return client_all_domains_has_next_input\n\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mock_return\n\n domain_list = test_client.get_domains()\n\n # If get_domains didn't try to paginate, len(domain_list) will be 3.\n # If it didn't stop trying to get more domains after hasNextPage became false\n # then the length will be greater than 6.\n assert len(domain_list) == 6", "def complex_recursion_regression_test(self):\n\n class User(Strongbox):\n ID = attr(int)\n username = attr(str)\n domains = linkset((lambda : Domain),\"user\")\n sites = linkset((lambda : Site),\"user\")\n class Domain(Strongbox):\n ID = attr(int)\n user = link(User)\n name = attr(str)\n site = link(lambda : Site) \n class Site(Strongbox):\n ID = attr(int)\n user = link(User)\n domain = link(Domain)\n dbMap = Schema({\n User:\"user\",\n Domain:\"domain\",\n Domain.user: \"userID\",\n Domain.site: \"siteID\",\n Site:\"site\",\n Site.user: \"userID\",\n Site.domain: \"domainID\",\n })\n\n clerk = Clerk(RamStorage(), dbMap)\n u = clerk.store(User(username=\"ftempy\"))\n u = clerk.match(User,username=\"ftempy\")[0]\n d = clerk.store(Domain(name=\"ftempy.com\", user=u))\n assert d.user, \"didn't follow link before fetch\"\n d = clerk.match(Domain, name=\"ftempy.com\")[0]\n\n # the bug was here: it only happened if User had .domains\n # I think because it was a linkset, and the linkset had\n # an injector. Fixed by inlining the injector test into\n # Clekr.store:\n assert d.user, \"didn't follow link after fetch\"\n assert d.user.ID == u.ID\n\n # ah, but then we had an infinite recursion problem\n # with site, but I fixed that with private.isDirty:\n d.site = clerk.store(Site(domain=d))\n d = clerk.store(d)\n assert d.site.domain.name == \"ftempy.com\"\n\n # and again here:\n d = clerk.fetch(Domain, 1)\n assert not d.private.isDirty\n assert not d.site.private.isDirty # this failed.\n clerk.store(d) # so this would recurse forever", "def _get_lineage(\n self,\n entity: Union[Type[T], str],\n path: str,\n up_depth: int = 1,\n down_depth: int = 1,\n ) -> Optional[Dict[str, Any]]:\n entity_name = self.get_entity_type(entity)\n search = (\n f\"?upstreamDepth={min(up_depth, 3)}&downstreamDepth={min(down_depth, 3)}\"\n )\n\n try:\n res = self.client.get(\n f\"{self.get_suffix(AddLineage)}/{entity_name}/{path}{search}\"\n )\n return res\n except APIError as err:\n logger.error(\n f\"Error {err.status_code} trying to GET linage for {entity.__class__.__name__} and {path}\"\n )\n return None", "def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content", "def test_nested_query():\n schema = graphene.Schema(query=NestedQuery)\n response = schema.execute(\"{topLevel {name, leaf {value , leaflets {value} } } }\")\n assert to_dict(response.data) == {\n \"topLevel\": {\n \"name\": \"top level name\",\n \"leaf\": {\n \"value\": \"some leaf value\",\n \"leaflets\": [{\"value\": \"subleaf1\"}, {\"value\": \"subleaf2\"}],\n },\n }\n }", "def get_rows_from_deepsearch_result_api(\n dataframe,\n datainfo,\n row_start,\n row_end,\n zwisd):\n if len(dataframe) < row_end:\n print('Get Deep Search Result API row larger than dataframe lenght.')\n return\n\n for zpid in dataframe.iloc[row_start:row_end]['zpid']:\n # Get response in XML format\n if datainfo.loc[datainfo['zpid'] == zpid]['street'].empty:\n continue\n\n response = get_deepsearch_result_api(\n zwisd,\n datainfo.loc[datainfo['zpid'] == zpid]['street'],\n datainfo.loc[datainfo['zpid'] == zpid]['city'] + ', ' + datainfo.loc[datainfo['zpid'] == zpid]['state'])\n # Remove namespace for better using.\n contents = re.sub('<!--(.*?)-->', '', str(response.text))\n contents = re.sub(':searchresults+.*xsd/SearchResults.xsd\"', '', contents)\n contents = re.sub(':searchresults', '', contents)\n\n root = ET.fromstring(contents)\n\n if root[1][1].text != '0':\n print('Get Deep Search Result API cannot get data with zpid:' + str(zpid))\n continue\n\n if len(root[1]) > 2:\n # The call are approaching the limit per day.\n print('Get Deep Search Result API are approaching call limit today. Call Terminate.')\n return\n\n # root[1][1].text message code\n # root[2][0][0][0].text zpid\n # root[2][0][0][7].text yearbuilt\n # root[2][0][0][8].text lotSizeSqFt\n # root[2][0][0][9].text finishedSqFt\n # root[2][0][0][10].text bathrooms\n # root[2][0][0][11].text bedrooms\n # root[2][0][0][2][4].text latitude\n # root[2][0][0][2][5].text longitude\n # root[2][0][0][14][0].text amount\n\n try:\n dataframe.loc[dataframe['zpid'] == zpid, 'bathrooms'] = root[2][0][0][10].text\n dataframe.loc[dataframe['zpid'] == zpid, 'bedrooms'] = root[2][0][0][11].text\n dataframe.loc[dataframe['zpid'] == zpid, 'finishedSqFt'] = root[2][0][0][9].text\n dataframe.loc[dataframe['zpid'] == zpid, 'lotsizeSqFt'] = root[2][0][0][8].text\n dataframe.loc[dataframe['zpid'] == zpid, 'latitude'] = str(float(root[2][0][0][2][4].text) * 1000000)\n dataframe.loc[dataframe['zpid'] == zpid, 'longitude'] = str(float(root[2][0][0][2][5].text) * 1000000)\n dataframe.loc[dataframe['zpid'] == zpid, 'yearbuilt'] = root[2][0][0][7].text\n dataframe.loc[dataframe['zpid'] == zpid, 'amount'] = root[2][0][0][14][0].text\n except IndexError:\n print('Get Deep Search Result API cannot get data with zpid:' + str(zpid))\n\n # dataframe.to_csv('../input/' + HouseInfoFileName, index=False)\n return dataframe", "def test_client_get_organizations_pagination(\n mocker, client_all_orgs_input, client_all_orgs_has_next_input\n):\n\n def mock_return(query, params):\n if params[\"after\"] == \"abc\":\n return client_all_orgs_input\n return client_all_orgs_has_next_input\n\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mock_return\n\n org_list = test_client.get_organizations()\n\n # If get_domains didn't try to paginate, len(domain_list) will be 2.\n # If it didn't stop trying to get more domains after hasNextPage became false\n # then the length will be greater than 4.\n assert len(org_list) == 4", "def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;", "def walk(query):\n stack = [[query, 0]]\n while len(stack) != 0:\n query, index = stack[-1]\n if isinstance(query, queries.QueryCombination):\n if index < len(query.subqs):\n stack[-1][1] = index + 1\n stack.append([query.subqs[index], None])\n continue\n yield len(stack) - 1, query\n del stack[-1]", "def recursive_search(i, F, t, s, explored, leaders, order):\n x = len(explored)\n if x % 10 == 0:\n print(\"Length of explored: {}\".format(x))\n explored.append(i)\n if order == 2:\n leaders[i] = s\n arc_list = db.Database.find_one(collection=\"biggraph\", query={\"key\": i})\n if arc_list:\n for node in arc_list['value']:\n if node not in explored:\n F, t, leaders, explored = recursive_search(node, F, t, s, explored, leaders, order)\n if order == 1:\n t += 1\n F[i] = t\n return F, t, leaders, explored", "def get_associated_companies_info_by_company(company_no, officers_done, depth, companies_track, ret):\n \n \n # print(company_no)\n companies_track.append(company_no)\n company_info = get_company_info(company_no)\n new_depth = depth - 1\n \n \n for i in company_info:\n \n if i['name'] not in officers_done:\n # print(i['name'])\n officers_done.append(i['name'])\n url_patch = i['links']['officer']['appointments']\n bash_command = 'curl -s -X GET -u yLwgnyHvwlYxkbOBAoLEwsaEfVQ_a7kAuCUTNtSt: https://api.companieshouse.gov.uk{}'.format(url_patch)\n url = bash_command.split()\n check = subprocess.check_output(url)\n info = json.loads(check.decode('utf8'))['items']\n \n ret.append(info)\n companies_appointed = [x['appointed_to']['company_number'] for x in info]\n \n if new_depth > 0:\n for _ in range(new_depth):\n for j in companies_appointed:\n ret.append(get_associated_companies_info_by_company(j, officers_done, new_depth, companies_track, ret))\n \n \n return ret", "def preRead(force,cat,path,depth = 0,retries = 0):\n limit = 6 # Lower limit for printing debug messages in this function\n root = None\n if cat == 'p':\n global people\n root = people\n elif cat == 'l':\n global places\n root = places\n elif cat == 'c':\n global cities\n root = cities\n elif cat == 's':\n global states\n root = states\n elif cat == 'o':\n global orgs\n root = orgs\n elif cat == 'i':\n global items\n root = items\n else:\n print \"ERR: Invalid category %s passed to markChanged.\" % cat\n if not root:\n print \"preRead: Invalid category %s?\" % cat\n return False\n if depth > len(path): depth = len(path)\n if depth > 7: depth = 7\n if path[0] in root.keys():\n if depth <= 1:\n return True\n if path[1] in root[path[0]].keys():\n if depth <= 2:\n return True\n if path[2] in root[path[0]][path[1]].keys():\n if depth <= 3:\n return True\n if path[3] in root[path[0]][path[1]][path[2]].keys():\n if depth <= 4:\n return True\n if path[4] in root[path[0]][path[1]][path[2]][path[3]].keys():\n if depth <= 5:\n return True\n if path[5] in root[path[0]][path[1]][path[2]][path[3]][path[4]].keys():\n if depth <= 6:\n return True\n if path[6] in root[path[0]][path[1]][path[2]][path[3]][path[4]][path[5]].keys():\n return True # Maximum depth reached\n elif force:\n root[path[0]][path[1]][path[2]][path[3]][path[4]][path[5]][path[6]] = {}\n if retries >= depth: force = False\n return preRead(force,cat,path,depth,retries + 1)\n else: # Not found, and not forcing it to be found\n if config['debug'] > limit: debugPath(root,path)\n return False\n elif force:\n root[path[0]][path[1]][path[2]][path[3]][path[4]][path[5]] = {}\n if retries >= depth: force = False\n return preRead(force,cat,path,depth,retries + 1)\n else: # Not found, and not forcing it to be found\n if config['debug'] > limit: debugPath(root,path)\n return False\n elif force:\n root[path[0]][path[1]][path[2]][path[3]][path[4]] = {}\n if retries >= depth: force = False\n return preRead(force,cat,path,depth,retries + 1)\n else: # Not found, and not forcing it to be found\n if config['debug'] > limit: debugPath(root,path)\n return False\n elif force:\n root[path[0]][path[1]][path[2]][path[3]] = {}\n if retries >= depth: force = False\n return preRead(force,cat,path,depth,retries + 1)\n else: # Not found, and not forcing it to be found\n if config['debug'] > limit: debugPath(root,path)\n return False\n elif force:\n root[path[0]][path[1]][path[2]] = {}\n if retries >= depth: force = False\n return preRead(force,cat,path,depth,retries + 1)\n else: # Not found, and not forcing it to be found\n if config['debug'] > limit: debugPath(root,path)\n return False\n elif force:\n root[path[0]][path[1]] = {}\n if retries >= depth: force = False\n return preRead(force,cat,path,depth,retries + 1)\n else: # Not found, and not forcing it to be found\n if config['debug'] > limit: debugPath(root,path)\n return False\n else: # First level (fileid) can't be generated.\n if config['debug'] > limit: debugPath(root,path)\n return False", "def annotate_depth(self, limit=None):\n queryset = self\n stack = []\n for p in queryset:\n try:\n prev_p = stack[-1]\n except IndexError:\n prev_p = None\n if prev_p is not None:\n while (p.prefix not in prev_p.prefix) or p.prefix == prev_p.prefix:\n stack.pop()\n try:\n prev_p = stack[-1]\n except IndexError:\n prev_p = None\n break\n if prev_p is not None:\n prev_p.has_children = True\n stack.append(p)\n p.depth = len(stack) - 1\n if limit is None:\n return queryset\n return filter(lambda p: p.depth <= limit, queryset)", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def get_with_limit(obj, path, limit=None):\n res = demisto.get(obj, path)\n try:\n if limit:\n if len(res) > limit:\n if isinstance(res, dict):\n return {k: res[k] for k in list(res.keys())[:limit]}\n elif isinstance(res, list):\n return res[:limit]\n # If res has no len, or if not a list or a dictionary return res\n except Exception:\n return res\n return res", "def paginated_call(self) -> global___Snippet.ClientCall:", "def test_get_children(self):\n c1 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c2 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c3 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c4 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c5 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c6 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n\n self.hiarc_collections.add_child_to_collection(c1.key, c2.key)\n self.hiarc_collections.add_child_to_collection(c1.key, c3.key)\n self.hiarc_collections.add_child_to_collection(c1.key, c4.key)\n self.hiarc_collections.add_child_to_collection(c2.key, c5.key)\n self.hiarc_collections.add_child_to_collection(c4.key, c6.key)\n\n children = self.hiarc_collections.get_collection_children(c1.key)\n assert len(children) == 3\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c2)), None) is not None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c3)), None) is not None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c4)), None) is not None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c5)), None) is None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c6)), None) is None", "def data(self, *_except_fields, **kwargs):\n\n _max_level = kwargs.get('max_level', 2)\n\n def model_to_dict(obj, ignore_fields=list(), back_relationships=set(), max_level=2,\n current_level=0):\n current_level += 1\n ignore_in_cur_iteration = list()\n for field in ignore_fields:\n final_exclusion = len(field) == 1\n if final_exclusion:\n ignore_in_cur_iteration.append(field[0])\n\n serialized_data = dict()\n for c in obj.__table__.columns:\n if c.key not in ignore_in_cur_iteration:\n serialized_data[c.key] = getattr(obj, c.key)\n relationships = class_mapper(obj.__class__).relationships\n visitable_relationships = [(name, rel) for name, rel in relationships.items() if\n name not in back_relationships and name not in _except_fields]\n\n for name, relation in visitable_relationships:\n ignore_in_next_iteration = list()\n\n if name in ignore_in_cur_iteration:\n continue\n\n for i in ignore_fields:\n if len(i) > 1 and name == i[0]:\n ignore_in_next_iteration.append(i[1:])\n\n if relation.backref:\n if type(relation.backref) == str:\n back_relationships.add(relation.backref)\n elif type(relation.backref) == tuple:\n back_relationships.add(relation.backref[0])\n relationship_children = getattr(obj, name)\n if relationship_children is not None:\n if relation.uselist and current_level != max_level:\n children = []\n for child in [c for c in relationship_children]:\n if current_level < max_level:\n children.append(model_to_dict(child,\n ignore_in_next_iteration,\n back_relationships,\n max_level,\n current_level))\n serialized_data[name] = children\n else:\n if current_level < max_level:\n serialized_data[name] = model_to_dict(relationship_children,\n ignore_in_next_iteration,\n back_relationships,\n max_level,\n current_level)\n return serialized_data\n\n try:\n normalized_except_fields = []\n for f in _except_fields:\n normalized_except_fields.append(f.split('>'))\n return model_to_dict(self, ignore_fields=normalized_except_fields, max_level=_max_level)\n except SQLAlchemyError:\n self.s.rollback()\n raise", "def get_reply_fields(self): \n def alter_request_edges(self, jdata):\n \"\"\"\n From the jsonified request template, converts\n \"edges\" : { \"node\" : { \"key1\" : value1, ... } }\n to something resembling a reply message body:\n \"edges\" : [ { \"key1\" : value, ... } }\n so that flatten_json can be run against it to extract\n valid field names.\n \"\"\"\n if isinstance(jdata, list):\n for entry in jdata:\n self._alter_request_edges(entry)\n if isinstance(jdata, dict):\n for key in jdata:\n if key == \"edges\":\n edge_dict = jdata[key]\n jdata[key] = []\n for subkey in edge_dict:\n jdata[key].append(edge_dict[subkey]) \n self._alter_request_edges(jdata[key]) \n\n json1 = re.sub(r'([z-zA-z0-9_-]+)(?:\\(.*?\\))*\\s*([\\[\\{])', r'\"\\1\" : \\2', self.template_text)\n json2 = re.sub(r'\\.*([a-zA-Z0-9]+)\\s*\\n', r'\"\\1\" : true,\\n', json1)\n json3 = re.sub(r'(\"[a-zA-Z0-9_-]+\"\\s*:[^,]+),(\\s*\\n\\s*[\\}\\]].*)', r'\\1\\2', json2)\n jdata = json.loads(json3)\n alter_request_edges(jdata)\n jreply = self.flatten_json(jdata, self.flatpath)\n self.reply_fields = [ key for key in jdata[0] ]\n return self._reply_fields", "def test_team_template_folders_id_children_count_get(self):\n pass", "def paginate_query(\n self,\n node,\n project_id=None,\n props=[\"id\", \"submitter_id\"],\n chunk_size=2500,\n format=\"json\",\n args=None,\n ):\n\n if node == \"datanode\":\n query_txt = \"\"\"{ %s (%s) { type } }\"\"\" % (node, args)\n response = self.sub.query(query_txt)\n if \"data\" in response:\n nodes = [record[\"type\"] for record in response[\"data\"][\"datanode\"]]\n if len(nodes) > 1:\n print(\n \"\\tMultiple files with that file_name exist across multiple nodes:\\n\\t{}.\".format(\n nodes\n )\n )\n elif len(nodes) == 1:\n node = nodes[0]\n else:\n return nodes\n\n if project_id != None:\n program, project = project_id.split(\"-\", 1)\n if args == None:\n query_txt = \"\"\"{_%s_count (project_id:\"%s\")}\"\"\" % (node, project_id)\n else:\n query_txt = \"\"\"{_%s_count (project_id:\"%s\", %s)}\"\"\" % (\n node,\n project_id,\n args,\n )\n else:\n if args == None:\n query_txt = \"\"\"{_%s_count}\"\"\" % (node)\n else:\n query_txt = \"\"\"{_%s_count (%s)}\"\"\" % (node, args)\n\n # First query the node count to get the expected number of results for the requested query:\n\n try:\n res = self.sub.query(query_txt)\n count_name = \"_\".join(map(str, [\"\", node, \"count\"]))\n qsize = res[\"data\"][count_name]\n print(\n \"\\n\\tFound {} records in '{}' node of project '{}'. \".format(\n qsize, node, project_id\n )\n )\n except:\n print(\"\\n\\tQuery to get _{}_count failed! {}\".format(node, query_txt))\n\n # Now paginate the actual query:\n properties = \" \".join(map(str, props))\n offset = 0\n total = {}\n total[\"data\"] = {}\n total[\"data\"][node] = []\n count = 0\n while offset < qsize:\n\n if project_id != None:\n if args == None:\n query_txt = (\n \"\"\"{%s (first: %s, offset: %s, project_id:\"%s\"){%s}}\"\"\"\n % (node, chunk_size, offset, project_id, properties)\n )\n else:\n query_txt = (\n \"\"\"{%s (first: %s, offset: %s, project_id:\"%s\", %s){%s}}\"\"\"\n % (node, chunk_size, offset, project_id, args, properties)\n )\n else:\n if args == None:\n query_txt = \"\"\"{%s (first: %s, offset: %s){%s}}\"\"\" % (\n node,\n chunk_size,\n offset,\n properties,\n )\n else:\n query_txt = \"\"\"{%s (first: %s, offset: %s, %s){%s}}\"\"\" % (\n node,\n chunk_size,\n offset,\n args,\n properties,\n )\n\n res = self.sub.query(query_txt)\n if \"data\" in res:\n records = res[\"data\"][node]\n\n if len(records) < chunk_size:\n if qsize == 999999999:\n return total\n\n total[\"data\"][node] += records # res['data'][node] should be a list\n offset += chunk_size\n elif \"error\" in res:\n print(res[\"error\"])\n if chunk_size > 1:\n chunk_size = int(chunk_size / 2)\n print(\"Halving chunk_size to: \" + str(chunk_size) + \".\")\n else:\n print(\"Query timing out with chunk_size of 1!\")\n exit(1)\n else:\n print(\"Query Error: \" + str(res))\n\n pct = int((len(total[\"data\"][node]) / qsize) * 100)\n msg = \"\\tRecords retrieved: {} of {} ({}%), offset: {}, chunk_size: {}.\".format(\n len(total[\"data\"][node]), qsize, pct, offset, chunk_size\n )\n # print(msg)\n sys.stdout.write(\"\\r\" + str(msg).ljust(200, \" \"))\n\n if format == \"tsv\":\n df = json_normalize(total[\"data\"][node])\n return df\n else:\n return total", "def get_deep(tree, path):\n for key in path[:-1]:\n tree = tree.get(key, {})\n return tree.get(path[-1])", "def test_get_pagination(mockclient_cl1):\n # There should be 600 statements in testset.\n r = mockclient_cl1.get(TEST_URL + \"?size=700\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 600\n\n # Get the first 500\n r = mockclient_cl1.get(TEST_URL + \"?size=500\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 500\n\n # Get the remaining 100\n r = mockclient_cl1.get(TEST_URL + \"?size=500&page=2\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 100", "def find_nested_models(self, model, definitions):\n for key, value in model.items():\n if isinstance(value, dict):\n model[key] = self.find_nested_models(value, definitions)\n elif key == '$ref':\n def_name = value.split('/')[-1]\n def_property = definitions[def_name]['properties']\n return self.find_nested_models(def_property, definitions)\n return model", "def traverse(self, filelist, depth=0):\n if depth > 10:\n return ['depth > 10']\n level = {}\n for entry in (path for path in self.connection.nlst() if path not in ('.', '..')):\n try:\n self.connection.cwd(entry)\n level[entry] = self.traverse(filelist, depth+1)\n self.connection.cwd('..')\n except ftplib.error_perm:\n level[entry] = None\n return level", "def _recursive_gh_get(href, items, password=None):\n response = GitHub._request('GET', href, token=password)\n response.raise_for_status()\n items.extend(response.json())\n if \"link\" not in response.headers:\n return\n # links = link_header.parse(response.headers[\"link\"])\n # rels = {link.rel: link.href for link in links.links}\n # if \"next\" in rels:\n # ghRelease._recursive_gh_get(rels[\"next\"], items)", "def bulk_lookup_rdap(addresses=None, inc_raw=False, retry_count=3, depth=0,\n excluded_entities=None, rate_limit_timeout=60,\n socket_timeout=10, asn_timeout=240, proxy_openers=None):\n\n if not isinstance(addresses, list):\n\n raise ValueError('addresses must be a list of IP address strings')\n\n # Initialize the dicts/lists\n results = {}\n failed_lookups_dict = {}\n rated_lookups = []\n stats = {\n 'ip_input_total': len(addresses),\n 'ip_unique_total': 0,\n 'ip_lookup_total': 0,\n 'lacnic': {'failed': [], 'rate_limited': [], 'total': 0},\n 'ripencc': {'failed': [], 'rate_limited': [], 'total': 0},\n 'apnic': {'failed': [], 'rate_limited': [], 'total': 0},\n 'afrinic': {'failed': [], 'rate_limited': [], 'total': 0},\n 'arin': {'failed': [], 'rate_limited': [], 'total': 0},\n 'unallocated_addresses': []\n }\n asn_parsed_results = {}\n\n if proxy_openers is None:\n\n proxy_openers = [None]\n\n proxy_openers_copy = iter(proxy_openers)\n\n # Make sure addresses is unique\n unique_ip_list = list(unique_everseen(addresses))\n\n # Get the unique count to return\n stats['ip_unique_total'] = len(unique_ip_list)\n\n # This is needed for iteration order\n rir_keys_ordered = ['lacnic', 'ripencc', 'apnic', 'afrinic', 'arin']\n\n # First query the ASN data for all IPs, can raise ASNLookupError, no catch\n bulk_asn = get_bulk_asn_whois(unique_ip_list, timeout=asn_timeout)\n\n # ASN results are returned as string, parse lines to list and remove first\n asn_result_list = bulk_asn.split('\\n')\n del asn_result_list[0]\n\n # We need to instantiate IPASN, which currently needs a Net object,\n # IP doesn't matter here\n net = Net('1.2.3.4')\n ipasn = IPASN(net)\n\n # Iterate each IP ASN result, and add valid RIR results to\n # asn_parsed_results for RDAP lookups\n for asn_result in asn_result_list:\n\n temp = asn_result.split('|')\n\n # Not a valid entry, move on to next\n if len(temp) == 1:\n\n continue\n\n ip = temp[1].strip()\n\n # We need this since ASN bulk lookup is returning duplicates\n # This is an issue on the Cymru end\n if ip in asn_parsed_results.keys(): # pragma: no cover\n\n continue\n\n try:\n\n results = ipasn.parse_fields_whois(asn_result)\n\n except ASNRegistryError: # pragma: no cover\n\n continue\n\n # Add valid IP ASN result to asn_parsed_results for RDAP lookup\n asn_parsed_results[ip] = results\n stats[results['asn_registry']]['total'] += 1\n\n # Set the list of IPs that are not allocated/failed ASN lookup\n stats['unallocated_addresses'] = list(k for k in addresses if k not in\n asn_parsed_results)\n\n # Set the total lookup count after unique IP and ASN result filtering\n stats['ip_lookup_total'] = len(asn_parsed_results)\n\n # Track the total number of LACNIC queries left. This is tracked in order\n # to ensure the 9 priority LACNIC queries/min don't go into infinite loop\n lacnic_total_left = stats['lacnic']['total']\n\n # Set the start time, this value is updated when the rate limit is reset\n old_time = time.time()\n\n # Rate limit tracking dict for all RIRs\n rate_tracker = {\n 'lacnic': {'time': old_time, 'count': 0},\n 'ripencc': {'time': old_time, 'count': 0},\n 'apnic': {'time': old_time, 'count': 0},\n 'afrinic': {'time': old_time, 'count': 0},\n 'arin': {'time': old_time, 'count': 0}\n }\n\n # Iterate all of the IPs to perform RDAP lookups until none are left\n while len(asn_parsed_results) > 0:\n\n # Sequentially run through each RIR to minimize lookups in a row to\n # the same RIR.\n for rir in rir_keys_ordered:\n\n # If there are still LACNIC IPs left to lookup and the rate limit\n # hasn't been reached, skip to find a LACNIC IP to lookup\n if (\n rir != 'lacnic' and lacnic_total_left > 0 and\n (rate_tracker['lacnic']['count'] != 9 or\n (time.time() - rate_tracker['lacnic']['time']\n ) >= rate_limit_timeout\n )\n ): # pragma: no cover\n\n continue\n\n # If the RIR rate limit has been reached and hasn't expired,\n # move on to the next RIR\n if (\n rate_tracker[rir]['count'] == 9 and (\n (time.time() - rate_tracker[rir]['time']\n ) < rate_limit_timeout)\n ): # pragma: no cover\n\n continue\n\n # If the RIR rate limit has expired, reset the count/timer\n # and perform the lookup\n elif ((time.time() - rate_tracker[rir]['time']\n ) >= rate_limit_timeout): # pragma: no cover\n\n rate_tracker[rir]['count'] = 0\n rate_tracker[rir]['time'] = time.time()\n\n # Create a copy of the lookup IP dict so we can modify on\n # successful/failed queries. Loop each IP until it matches the\n # correct RIR in the parent loop, and attempt lookup\n tmp_dict = asn_parsed_results.copy()\n\n for ip, asn_data in tmp_dict.items():\n\n # Check to see if IP matches parent loop RIR for lookup\n if asn_data['asn_registry'] == rir:\n\n log.debug('Starting lookup for IP: {0} '\n 'RIR: {1}'.format(ip, rir))\n\n # Add to count for rate-limit tracking only for LACNIC,\n # since we have not seen aggressive rate-limiting from the\n # other RIRs yet\n if rir == 'lacnic':\n\n rate_tracker[rir]['count'] += 1\n\n # Get the next proxy opener to use, or None\n try:\n\n opener = next(proxy_openers_copy)\n\n # Start at the beginning if all have been used\n except StopIteration:\n\n proxy_openers_copy = iter(proxy_openers)\n opener = next(proxy_openers_copy)\n\n # Instantiate the objects needed for the RDAP lookup\n net = Net(ip, timeout=socket_timeout, proxy_opener=opener)\n rdap = RDAP(net)\n\n try:\n\n # Perform the RDAP lookup. retry_count is set to 0\n # here since we handle that in this function\n results = rdap.lookup(\n inc_raw=inc_raw, retry_count=0, asn_data=asn_data,\n depth=depth, excluded_entities=excluded_entities\n )\n\n log.debug('Successful lookup for IP: {0} '\n 'RIR: {1}'.format(ip, rir))\n\n # Lookup was successful, add to result. Set the nir\n # key to None as this is not supported\n # (yet - requires more queries)\n results[ip] = results\n results[ip]['nir'] = None\n\n # Remove the IP from the lookup queue\n del asn_parsed_results[ip]\n\n # If this was LACNIC IP, reduce the total left count\n if rir == 'lacnic':\n\n lacnic_total_left -= 1\n\n log.debug(\n '{0} total lookups left, {1} LACNIC lookups left'\n ''.format(str(len(asn_parsed_results)),\n str(lacnic_total_left))\n )\n\n # If this IP failed previously, remove it from the\n # failed return dict\n if (\n ip in failed_lookups_dict.keys()\n ): # pragma: no cover\n\n del failed_lookups_dict[ip]\n\n # Break out of the IP list loop, we need to change to\n # the next RIR\n break\n\n except HTTPLookupError: # pragma: no cover\n\n log.debug('Failed lookup for IP: {0} '\n 'RIR: {1}'.format(ip, rir))\n\n # Add the IP to the failed lookups dict if not there\n if ip not in failed_lookups_dict.keys():\n\n failed_lookups_dict[ip] = 1\n\n # This IP has already failed at least once, increment\n # the failure count until retry_count reached, then\n # stop trying\n else:\n\n failed_lookups_dict[ip] += 1\n\n if failed_lookups_dict[ip] == retry_count:\n\n del asn_parsed_results[ip]\n stats[rir]['failed'].append(ip)\n\n if rir == 'lacnic':\n\n lacnic_total_left -= 1\n\n # Since this IP failed, we don't break to move to next\n # RIR, we check the next IP for this RIR\n continue\n\n except HTTPRateLimitError: # pragma: no cover\n\n # Add the IP to the rate-limited lookups dict if not\n # there\n if ip not in rated_lookups:\n\n rated_lookups.append(ip)\n stats[rir]['rate_limited'].append(ip)\n\n log.debug('Rate limiting triggered for IP: {0} '\n 'RIR: {1}'.format(ip, rir))\n\n # Since rate-limit was reached, reset the timer and\n # max out the count\n rate_tracker[rir]['time'] = time.time()\n rate_tracker[rir]['count'] = 9\n\n # Break out of the IP list loop, we need to change to\n # the next RIR\n break\n\n return_tuple = namedtuple('return_tuple', ['results', 'stats'])\n return return_tuple(results, stats)", "def query4(transaction, **params):\n query = f'''\n match $person isa person,\n has age > {params['age_lower']}, has age < {params['age_upper']};\n $country isa country, has name $country-name;\n (contains-city: $country, in-country: $city) isa has-city;\n (contains-residence: $city, in-city: $person) isa has-residence;\n get; group $country-name; count;\n '''\n print(f\"\\nQuery 4:\\n {query}\")\n iterator = transaction.query(query)\n result = []\n\n for item in list(iterator): # Consume ResponseIterator into a list\n counts = item.answers()[0].number()\n country = item.owner().value()\n result.append({'country': country, 'personCounts': counts})\n\n sorted_results = sorted(result, key=lambda x: x['personCounts'], reverse=True)\n print(f\"3 Countries with the most people with age > {params['age_lower']} and < {params['age_upper']}: \\\n \\n{sorted_results[:3]}\")\n return sorted_results", "def nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n if isinstance(value, (list, tuple)):\n # assuming we have a list of dict with k as one of the keys\n stype = set([type(e) for e in value])\n if not stype:\n return None\n assert len(stype) == 1 and stype == {dict}, \"Expecting a list of dict, found types: %s\" % stype\n value = [e[k] for e in value if e.get(k)]\n # can't go further ?\n return value\n else:\n value = value[k]\n except KeyError:\n return None\n\n return value", "def _dereferencing_iterator(self, base_url, partial, path, recursions):\n from .iterators import reference_iterator\n for _, refstring, item_path in reference_iterator(partial):\n # Split the reference string into parsed URL and object path\n ref_url, obj_path = _url.split_url_reference(base_url, refstring)\n\n if self._skip_reference(ref_url):\n continue\n\n # The reference path is the url resource and object path\n ref_path = (_url.urlresource(ref_url), tuple(obj_path))\n\n # Count how often the reference path has been recursed into.\n from collections import Counter\n rec_counter = Counter(recursions)\n next_recursions = recursions + (ref_path,)\n\n if rec_counter[ref_path] >= self.__reclimit:\n # The referenced value may be produced by the handler, or the handler\n # may raise, etc.\n ref_value = self.__reclimit_handler(self.__reclimit, ref_url,\n next_recursions)\n else:\n # The referenced value is to be used, but let's copy it to avoid\n # building recursive structures.\n ref_value = self._dereference(ref_url, obj_path, next_recursions)\n\n # Full item path\n full_path = path + item_path\n\n # First yield parent\n yield full_path, ref_value", "def get_key_recursive(key, config):\n if not isinstance(key, list):\n key = key.split(\"/\") # subdict indexing split using slash\n assert key[0] in config, f\"missing key '{key[0]}' in metadata dictionary: {config}\"\n val = config[key[0]]\n if isinstance(val, (dict, collections.OrderedDict)):\n assert len(key) > 1, \"missing keys to index metadata subdictionaries\"\n return get_key_recursive(key[1:], val)\n return int(val)", "def query1(transaction):\n query = f'''\n match $person isa person;\n (follower: $follower, followee: $person) isa connection;\n get; group $person; count;\n '''\n print(f\"\\nQuery 1:\\n {query}\")\n iterator = transaction.query(query)\n # To obtain the result for the \"count\" query, we need to look up the grakn python-client\n # source code: https://github.com/graknlabs/client-python/tree/master/grakn/service/Session\n # The object hierarchy needs to be looked up from the source code\n result = []\n\n for item in list(iterator): # Consume ResponseIterator into a list\n\n # Convert AnswerGroup object --> Value and apply the number() method of this instance\n counts = item.answers()[0].number()\n\n # Apply the owner() method of AnswerGroup object to identify parent concepts\n # This returns an Attribute instance, on which we apply the value() method\n person = next(item.owner().attributes()).value()\n result.append({'personID': person, 'numFollowers': counts})\n\n sorted_results = sorted(result, key=lambda x: x['numFollowers'], reverse=True)\n print(f\"Top 3 most-followed persons:\\n{sorted_results[:3]}\")\n\n return sorted_results", "def _handle_result_by_index(self, idx):\n if idx < 0:\n return None\n opts = dict(self.options)\n skip = opts.pop('skip', 0)\n limit = opts.pop('limit', None)\n py_to_couch_validate('skip', skip)\n py_to_couch_validate('limit', limit)\n if limit is not None and idx >= limit:\n # Result is out of range\n return dict()\n return self._ref(skip=skip+idx, limit=1, **opts)", "def _query(\n self,\n root=None,\n depth=0,\n query_method=\"propfind\",\n url=None,\n expected_return_value=None,\n ):\n body = \"\"\n if root:\n if hasattr(root, \"xmlelement\"):\n body = etree.tostring(\n root.xmlelement(), encoding=\"utf-8\", xml_declaration=True\n )\n else:\n body = root\n if url is None:\n url = self.url\n ret = getattr(self.client, query_method)(url, body, depth)\n if ret.status == 404:\n raise error.NotFoundError(errmsg(ret))\n if (\n expected_return_value is not None and ret.status != expected_return_value\n ) or ret.status >= 400:\n ## COMPATIBILITY HACK - see https://github.com/python-caldav/caldav/issues/309\n body = to_wire(body)\n if ret.status == 500 and not b\"getetag\" in body:\n body = body.replace(\n b\"<C:calendar-data/>\", b\"<D:getetag/><C:calendar-data/>\"\n )\n return self._query(\n body, depth, query_method, url, expected_return_value\n )\n raise error.exception_by_method[query_method](errmsg(ret))\n return ret", "def depth_from_match(function):\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #print self._current_node\n self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap", "def get(klass, n=10, ancestor=None, order=None, cursor=None,\n projection=None, keys_only=None, **filter_kwargs):\n logging.info(u'{}.get(n={}, ancestor={}, order={}, cursor={}, '\n 'projection={}, keys_only={}, filter_kwargs={})'\n .format(klass.__name__, n, ancestor, order, cursor,\n projection, keys_only, filter_kwargs))\n\n # @todo: permissions here?\n # if n > 10 and not self.user:\n # raise PermissionDenied(\"Public cannot change result set size.\")\n\n # Uniquify any requested ids. Allows other code to be lazy, and may\n # save on subqueries.\n if 'uid' in filter_kwargs and type(filter_kwargs['uid']) is list:\n filter_kwargs['uid'] = list(set(filter_kwargs['uid']))\n\n fetch_kwargs = {}\n\n if projection is not None:\n # Change from string names of attributes to properties themselves.\n fetch_kwargs['projection'] = [getattr(klass, p)\n for p in projection]\n if keys_only is not None:\n fetch_kwargs['keys_only'] = keys_only\n if cursor is not None:\n fetch_kwargs['start_cursor'] = cursor\n\n if n == float('inf'):\n query = klass._query(ancestor=ancestor, order=order,\n **filter_kwargs)\n results = query.iter(**fetch_kwargs)\n else:\n # To do paging with reverse cursors, there must be an order. If\n # none specified, use the default: klass.key represented by ''.\n order_str = '' if order is None else order\n order = klass.convert_order_str(order_str)\n reverse_order = klass.convert_order_str(\n reverse_order_str(order_str))\n\n query = klass._query(ancestor=ancestor, **filter_kwargs)\n f_query = query.order(order)\n r_query = query.order(reverse_order)\n f_results, f_cursor, f_more = f_query.fetch_page(n, **fetch_kwargs)\n r_results, r_cursor, r_more = r_query.fetch_page(n, **fetch_kwargs)\n\n results = CursorResult(f_results)\n # Cursors are None if results are empty.\n results.next_cursor = f_cursor\n results.previous_cursor = r_cursor\n results.more = f_more\n\n # post-processing, if necessary\n if len(query.unsafe_kwargs) > 0:\n results = klass.post_process(results, query.unsafe_kwargs)\n\n return results", "def _getProtoPathFields(self, parent, field_names:list, is_all:bool, results:list):\n if not field_names:\n results.append(parent)\n return\n field_name = field_names[0]\n field = getattr(parent, field_name)\n field_descriptor = parent.DESCRIPTOR.fields_by_name[field_name]\n if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n if isinstance(field, MessageMap):\n self._getProtoPathMapFields(field, field_descriptor, field_names, is_all, results)\n else:\n self._getProtoPathListFields(field, field_descriptor, field_names, is_all, results)\n else:\n self._getProtoPathFields(field, field_names[1:], is_all, results)", "def get_items(rs):\n for k, v in rs.items():\n if isinstance(v, list):\n for x in v:\n if isinstance(x, dict):\n yield from get_items(x)\n elif isinstance(v, dict):\n yield from get_items(v)\n yield (k, v)", "def expand(obj):\r\n if isinstance(obj, list):\r\n for i,o in enumerate(obj):\r\n obj[i] = expand(o)\r\n elif isinstance(obj, dict):\r\n if 'paging' in obj:\r\n current = obj\r\n i = 0\r\n while 'next' in current['paging']:\r\n i += 1\r\n logger.info('...{}'.format(i))\r\n current = GraphQuery.request_until_success(\r\n current['paging']['next']\r\n )\r\n obj['data'].extend(current['data'])\r\n return obj", "def get_fields():\n return jsonify(result=Tree.fields())", "def recursive():\n with Local() as tun:\n tun.call(recursive)", "def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)", "def crawl_database():\n\n LOGGING.push(\"Attempting to request featured games.\")\n participants = get_featured()\n LOGGING.push(\"Got @\" + str(len(participants)) + \"@ participants.\")\n\n # NOTE: Only 40 summoners can be requested at a time\n participants = random.sample(participants, min(40, len(participants)))\n\n ids = SESSION.get_ids(participants)\n search_players = [ids[player]['id'] for player in ids.keys()]\n\n LOGGING.push(\n \"Now attempting to crawl players with a breadth of @\" +\n str(BREADTH) + \"@ and depth of ^\" + str(DEPTH) + \"^.\"\n )\n\n # NOTE: Creates the original call stack to crawl players\n for player in search_players:\n crawl_player(player, DEPTH, BREADTH)\n\n LOGGING.push(\"Finished crawling database.\")", "async def root(\n p: str,\n item_id: int = Path(\n ...,\n title=\"The ID of the item to get\",\n ge=1, # constraint greater than or equal to 1\n lt=10 # less than 10\n ),\n size: Optional[float] = Query(None, gt=0., lt=33.),\n q: Optional[int] = Query(None, alias=\"item-query\")\n):\n results = dict(item_id=item_id)\n if q:\n results.update(q=q)\n if size:\n results.update(size=size)\n return results", "def explore_all_nf_data():\n request = app.current_request\n resource_type = request.query_params[\"resource_type\"]\n offset = int(request.query_params[\"offset\"])\n limit = int(request.query_params[\"limit\"])\n explorer = UnogsExplorer(resource_type)\n success = explorer.explore(limit, offset)\n return {\"success\": success}", "def recurse(subreddit, hot_list=[]):\n headers = {'User-Agent': 'Mauricio'}\n\n url = 'http://www.reddit.com/r/' + subreddit + '/hot/.json'\n r = requests.get(url, headers=headers, params=parameters)\n if r.status_code == 200:\n answer_list_10 = r.json().get('data').get('children')\n for top in range(len(answer_list_10)):\n hot_list.append((answer_list_10[top].get('data').get('title')))\n if len(answer_list_10) >= 100:\n parameters['after'] = r.json().get('data').get('after')\n recurse(subreddit, hot_list)\n return(hot_list)\n else:\n return(None)", "def raw_to_tree_builder(query_set, fields_list):\n res_dict = {}\n\n #if no fields left to iterate\n if len(fields_list) == 0:\n return res_dict\n\n #getting all values for all fields and attach them to tree node\n for k in query_set.values_list(fields_list[0], flat=True):\n #pushing next node name to kwargs for QuerySet.filter() func\n kwargs = {fields_list[0]: k}\n #getting children nodes\n res_dict[k] = raw_to_tree_builder(query_set.filter(**kwargs), fields_list[1:])\n\n return res_dict", "def batch_get(func: object, filt: str, catg: str):\n offset = 0\n running = True\n returned = []\n notified = False\n while running:\n lookup = func(filter=filt, offset=offset, limit=5000, fields=\"__full__\")\n total = lookup[\"body\"][\"meta\"].get(\"pagination\", {}).get(\"total\", 0)\n if not notified:\n notify = f\"Retrieving {total:,} {catg} results.\"\n if total > 50000:\n notify = f\"Retrieving first 50,000 of {total:,} {catg} results.\"\n print(notify)\n notified = True\n else:\n progress.next()\n if lookup[\"body\"][\"resources\"]:\n offset += len(lookup[\"body\"][\"resources\"])\n returned.extend(lookup[\"body\"][\"resources\"])\n if offset >= total:\n running = False\n\n return returned", "def partial_tree(s, n):\n if n == 1:\n return (Tree(s.first), s.rest)\n elif n == 2:\n return (Tree(s.first, [Tree(s.rest.first)]), s.rest.rest)\n else:\n left_size = (n-1)//2\n right_size = n - left_size - 1\n \"*** YOUR CODE HERE ***\"", "def paginate(client_fun, *args, **kwargs):\n resp = client_fun(*args, **kwargs)\n yield from resp['content']\n total_elements = resp['totalElements']\n page_size = resp['pageSize']\n page_number = resp['pageNumber'] + 1\n if 'page_number' in kwargs:\n kwargs.pop('page_number')\n while page_number * page_size < total_elements:\n resp = client_fun(*args, page_number=page_number, **kwargs)\n yield from resp['content']\n page_number = resp['pageNumber'] + 1", "def test_recursive_select_by_deprecated(self):\n m = mapper(User, users, properties={\n 'orders':relation(mapper(Order, orders), backref='user'),\n 'addresses':relation(mapper(Address, addresses), backref='user'),\n })\n q = create_session().query(m)\n q.select_by(email_address='foo')", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def RecurseKeys(self):\n yield self\n for subkey in self.GetSubkeys():\n for key in subkey.RecurseKeys():\n yield key", "def findChildren(self, depth):\n assert depth in (\"0\", \"1\", \"infinity\"), \"Invalid depth: %s\" % (depth,)\n if depth == \"0\" or not self.isCollection():\n return ()\n else:\n unimplemented(self)", "def _get_from_nest(nest, path):\n if not path or not nest:\n return nest\n return _get_from_nest(nest.get(path[0], None), path[1:])", "def _recurse_children(self, offset):\n while offset < self.obj_offset + self.Length:\n item = obj.Object(\"VerStruct\", offset = offset, vm = self.obj_vm, parent = self)\n if item.Length < 1 or item.get_key() == None:\n raise StopIteration(\"Could not recover a key for a child at offset {0}\".format(item.obj_offset))\n yield item.get_key(), item.get_children()\n offset = self.offset_pad(offset + item.Length)\n raise StopIteration(\"No children\")", "def test_ten_results_returned(delete_previous_db_record):\n request = create_client().gateway.getResults(\n search=\"some string\").response()\n\n # Assert sucessful request\n assert_that(request.result.status, equal_to('200'))\n\n \"\"\"\n I'm assuming the json object uses a list to contain\n the results\n \"\"\"\n assert_that(len(request.result.results, equal_to(10)))", "def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return", "def _recursion_helper(self, iterator, recursion_level):\n for resource in iterator:\n # Check if we need to display contents of a container.\n if resource.is_container() and recursion_level > 0:\n yield _HeaderFormatWrapper(\n resource,\n display_detail=self._display_detail,\n use_gsutil_style=self._use_gsutil_style)\n\n # Get container contents by adding wildcard to URL.\n nested_iterator = self._get_container_iterator(\n resource.storage_url, recursion_level-1)\n for nested_resource in nested_iterator:\n yield nested_resource\n\n else:\n # Resource wasn't a container we can recurse into, so just yield it.\n yield _ResourceFormatWrapper(\n resource,\n all_versions=self._all_versions,\n display_detail=self._display_detail,\n include_etag=self._include_etag,\n readable_sizes=self._readable_sizes,\n full_formatter=self._full_formatter)", "def setrecursionlimit(n): # real signature unknown; restored from __doc__\n pass", "def _RecursePrint(self, blr):\n num_bytes = 0\n num_objs = 0\n\n if blr.HasKey():\n blr_iterator = iter([blr])\n elif blr.HasPrefix():\n blr_iterator = self.WildcardIterator(\n '%s/*' % blr.GetRStrippedUriString(), all_versions=self.all_versions)\n elif blr.NamesBucket():\n blr_iterator = self.WildcardIterator(\n '%s*' % blr.GetUriString(), all_versions=self.all_versions)\n else:\n # This BLR didn't come from a bucket listing. This case happens for\n # BLR's instantiated from a user-provided URI.\n blr_iterator = PluralityCheckableIterator(\n UriOnlyBlrExpansionIterator(\n self, blr, all_versions=self.all_versions))\n if blr_iterator.is_empty() and not ContainsWildcard(blr.GetUriString()):\n raise CommandException('No such object %s' % blr.GetUriString())\n\n for cur_blr in blr_iterator:\n if self.exclude_patterns:\n tomatch = cur_blr.GetUriString()\n skip = False\n for pattern in self.exclude_patterns:\n if fnmatch.fnmatch(tomatch, pattern):\n skip = True\n break\n if skip:\n continue\n if cur_blr.HasKey():\n # Object listing.\n no, nb = self._PrintInfoAboutBucketListingRef(cur_blr)\n else:\n # Subdir listing.\n if cur_blr.GetUriString().endswith('//'):\n # Expand gs://bucket// into gs://bucket//* so we don't infinite\n # loop. This case happens when user has uploaded an object whose\n # name begins with a /.\n cur_blr = BucketListingRef(self.suri_builder.StorageUri(\n '%s*' % cur_blr.GetUriString()), None, None, cur_blr.headers)\n no, nb = self._RecursePrint(cur_blr)\n num_bytes += nb\n num_objs += no\n\n if blr.HasPrefix() and not self.summary_only:\n self._PrintSummaryLine(num_bytes, blr.GetUriString().encode('utf-8'))\n\n return num_objs, num_bytes", "def _calculate_recursive_length(self, msg_dict):\n delimiter = \"\\r\\n\\r\\n\"\n initial_length = len(\n json.dumps(msg_dict) + delimiter)\n initial_list = [initial_length, msg_dict]\n recursive_length = len(\n json.dumps(initial_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n while len(json.dumps(recursive_list) + delimiter) != recursive_list[0]:\n recursive_length = len(\n json.dumps(recursive_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n return recursive_list[0]", "def get_deep(config, key_seq):\n if 1 == len(key_seq):\n return config[key_seq[0]]\n else:\n return get_deep(config[key_seq[0]], key_seq[1:])", "def flatten_json_struct(data, count_fields=[], datetime_fields=[]):\n for k,v in data.items():\n if v and type(v) != dict and type(v) != list:\n if k in datetime_fields and re_prog.match(v):\n #print('> yielding date {0}'.format(k))\n yield k, date_parser.parse(v).date()\n else:\n #print('> yielding value {0}: {1}'.format(k, v))\n yield k, v\n elif type(v) == list:\n if k in count_fields:\n #print('> yielding count of {0}'.format(k))\n yield k, len(v)\n else:\n new_data = { _generate_name(k,idx):val for idx,val in enumerate(v) }\n #print ('recursing %s' % new_data)\n for item in flatten_json_struct(new_data,\n count_fields=count_fields,\n datetime_fields=datetime_fields):\n #print('> yielding {0}: {1}'.format(item, type(item)))\n yield item[0], item[1] \n elif type(v) == dict:\n new_data = { _generate_name(k, k1): v1 for k1, v1 in v.items()}\n #print ('recursing %s' % new_data)\n for item in flatten_json_struct(new_data,\n count_fields=count_fields,\n datetime_fields=datetime_fields):\n #print('> yielding {0}: {1}'.format(item, type(item)))\n yield item[0], item[1]", "def recurse(subreddit, hot_list=[], after=\"\"):\n\n if after is None:\n return None\n\n elif subreddit is None:\n return None\n\n else:\n try:\n params = {\"after\": after, \"limit\": 20}\n\n url = \"https://www.reddit.com/r/{}/hot.json?\".\\\n format(subreddit)\n\n response = requests.get(url,\n params=params,\n allow_redirects=False,\n headers={\"User-agent\": \"please-thanks\"})\n\n results = response.json()\n\n for elmt_dicts in results[\"data\"][\"children\"]:\n for title in elmt_dicts[\"data\"][\"title\"]:\n if title == \"title\":\n hot_list.append(title)\n\n return recurse(subreddit, hot_list=[], after=results[\"data\"])\n except KeyError:\n return None", "def recurse(subreddit, hot_list=[], after=None):\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\\\n (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'\n headers = {'User-Agent': user_agent}\n url = 'https://www.reddit.com/r/{}/hot/.json?limit=100'.format(subreddit)\n if after is not None:\n url += '&after=' + after\n resp = requests.get(url,\n headers=headers,\n allow_redirects=False)\n if resp.status_code == 200:\n resp_dict = resp.json()\n data_dict = resp_dict.get('data', {})\n a_list = data_dict.get('children', [])\n for post in a_list:\n post_dict = post.get('data', {})\n title = post_dict.get('title')\n if title is not None:\n hot_list.append(title)\n after = data_dict.get('after')\n if after is None:\n return hot_list\n else:\n return (recurse(subreddit, hot_list, after))\n else:\n return None", "def CHILDREN(cls, parentid, reftable, refcolumn, **kwargs):\n limit = kwargs.get ( \"limit\", None )\n order = kwargs.get ( \"order\", ['o.objectid'] )\n reprfunc = kwargs.get ( \"reprfunc\", None )\n gettxt = kwargs.get ( \"gettxt\", True )\n select = kwargs.get ( \"select\", [\"*\"] )\n if limit:\n limit = \"OFFSET {0} LIMIT {1}\".format ( *limit )\n else:\n limit = \"\"\n order = \",\".join(order)\n select = \",\".join(select)\n if gettxt:\n gettxtq = (\"LEFT JOIN {0}.object_search_txt t ON t.objectid = o.objectid\".format(CFG.DB.SCHEMA), \", t.txt as _astxt\" )\n else:\n gettxtq = ( \"\", \"\" )\n query = \"SELECT {6}{8} FROM {5}.{0} o {7} WHERE \\\"{1}\\\" = '{2}' ORDER BY {3} {4}\".format (\n reftable, refcolumn, int(parentid), order, limit, CFG.DB.SCHEMA,\n select, gettxtq[0], gettxtq[1] )\n rowset = CFG.CX.query ( query ).dictresult()\n table = Table.Get ( reftable )\n for row in rowset:\n record = cls.EMPTY (table, reprfunc = reprfunc )\n record.feedDataRow ( row )\n yield record", "def prefetch_one_level(instances, prefetcher, lookup, level):\n # prefetcher must have a method get_prefetch_queryset() which takes a list\n # of instances, and returns a tuple:\n\n # (queryset of instances of self.model that are related to passed in instances,\n # callable that gets value to be matched for returned instances,\n # callable that gets value to be matched for passed in instances,\n # boolean that is True for singly related objects,\n # cache or field name to assign to,\n # boolean that is True when the previous argument is a cache name vs a field name).\n\n # The 'values to be matched' must be hashable as they will be used\n # in a dictionary.\n\n (\n rel_qs,\n rel_obj_attr,\n instance_attr,\n single,\n cache_name,\n is_descriptor,\n ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))\n # We have to handle the possibility that the QuerySet we just got back\n # contains some prefetch_related lookups. We don't want to trigger the\n # prefetch_related functionality by evaluating the query. Rather, we need\n # to merge in the prefetch_related lookups.\n # Copy the lookups in case it is a Prefetch object which could be reused\n # later (happens in nested prefetch_related).\n additional_lookups = [\n copy.copy(additional_lookup)\n for additional_lookup in getattr(rel_qs, \"_prefetch_related_lookups\", ())\n ]\n if additional_lookups:\n # Don't need to clone because the manager should have given us a fresh\n # instance, so we access an internal instead of using public interface\n # for performance reasons.\n rel_qs._prefetch_related_lookups = ()\n\n all_related_objects = list(rel_qs)\n\n rel_obj_cache = {}\n for rel_obj in all_related_objects:\n rel_attr_val = rel_obj_attr(rel_obj)\n rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)\n\n to_attr, as_attr = lookup.get_current_to_attr(level)\n # Make sure `to_attr` does not conflict with a field.\n if as_attr and instances:\n # We assume that objects retrieved are homogeneous (which is the premise\n # of prefetch_related), so what applies to first object applies to all.\n model = instances[0].__class__\n try:\n model._meta.get_field(to_attr)\n except exceptions.FieldDoesNotExist:\n pass\n else:\n msg = \"to_attr={} conflicts with a field on the {} model.\"\n raise ValueError(msg.format(to_attr, model.__name__))\n\n # Whether or not we're prefetching the last part of the lookup.\n leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level\n\n for obj in instances:\n instance_attr_val = instance_attr(obj)\n vals = rel_obj_cache.get(instance_attr_val, [])\n\n if single:\n val = vals[0] if vals else None\n if as_attr:\n # A to_attr has been given for the prefetch.\n setattr(obj, to_attr, val)\n elif is_descriptor:\n # cache_name points to a field name in obj.\n # This field is a descriptor for a related object.\n setattr(obj, cache_name, val)\n else:\n # No to_attr has been given for this prefetch operation and the\n # cache_name does not point to a descriptor. Store the value of\n # the field in the object's field cache.\n obj._state.fields_cache[cache_name] = val\n else:\n if as_attr:\n setattr(obj, to_attr, vals)\n else:\n manager = getattr(obj, to_attr)\n if leaf and lookup.queryset is not None:\n qs = manager._apply_rel_filters(lookup.queryset)\n else:\n qs = manager.get_queryset()\n qs._result_cache = vals\n # We don't want the individual qs doing prefetch_related now,\n # since we have merged this into the current work.\n qs._prefetch_done = True\n obj._prefetched_objects_cache[cache_name] = qs\n return all_related_objects, additional_lookups", "def query2(transaction):\n # Part 2a: Obtain ID of most-followed (same as query1)\n top_3_followed = query1(transaction)\n top_1_followed = top_3_followed[0]['personID']\n print(f\"Top most-followed person ID:\\n{top_1_followed}\")\n\n # Part 2b: Use ID of most-followed person and find their city of residence\n city_query = f'''\n match $person isa person, has person-id {top_1_followed};\n $residence(contains-residence: $city, in-city: $person) isa has-residence;\n get;\n '''\n print(f\"\\nQuery 2 (Obtain city in which most-followed person lives):\\n {city_query}\")\n iterator = transaction.query(city_query)\n answer = [ans.get('city') for ans in iterator][0]\n result = next(answer.attributes()).value()\n print(f\"City in which most-followed person lives:\\n{result}\")\n\n return result", "def get_fields(self, resource):\n\n def _get_fields_key(resource):\n \"\"\"Returns the fields key from a resource dict\n\n \"\"\"\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None\n\n if isinstance(resource, dict) and 'resource' in resource:\n resource_id = resource['resource']\n elif (isinstance(resource, basestring) and (\n SOURCE_RE.match(resource) or DATASET_RE.match(resource) or\n MODEL_RE.match(resource) or PREDICTION_RE.match(resource))):\n resource_id = resource\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n else:\n LOGGER.error(\"Wrong resource id\")\n return\n # Tries to extract fields information from resource dict. If it fails,\n # a get remote call is used to retrieve the resource by id.\n fields = None\n try:\n fields = _get_fields_key(resource)\n except KeyError:\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n fields = _get_fields_key(resource)\n\n return fields", "def execute(self, shallow=False):\r\n # start by constructing the url\r\n url = self._make_url()\r\n # now make the request\r\n logger.info('querying {}'.format(url))\r\n result = GraphQuery.request_until_success(url)\r\n # now we will go through and try and expand anything that needs it\r\n if 'data' in result:\r\n result = {'lol':result} # yeah this seems odd\r\n for key in result:\r\n GraphQuery.expand(result[key])\r\n return result", "def recursion_loop(pulls, discount, grid_n):\n\n r_grid = np.linspace(0, 1, grid_n)\n gittins, values = initial_approximation(pulls, discount, grid_n)\n n = pulls - 2 # Note that the 2 comes from (1) the initial approximation and (2) python indexing\n while n >= 1:\n g, v = recursion_step(values[:n + 1, n, :], r_grid, discount)\n values[:n, n - 1] = v\n gittins[:n, n - 1] = g\n n -= 1\n return gittins, values", "def tri_recursion(k):\r\n if(k>0):\r\n result = k + tri_recursion(k-1)\r\n # print(result)\r\n else:\r\n result = 0\r\n\r\n return result", "def set_option_max_depth(self, integer, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionMaxDepth/', {'Integer': integer, 'apikey': apikey})))", "def build_random_function(min_depth, max_depth):\n\n # your code goes here", "def findHierarchy(self):\n def __recursiveHelper(key_name, output, indent):\n if key_name in self.relations:\n for employee in self.relations[key_name].employees:\n output += \" \" * indent + str(employee) +\"\\n\"\n # return __recursiveHelper(employee, output, indent+1)\n __recursiveHelper(employee, output, indent+1)\n else:\n print(output)\n return output\n\n\n #experimenting with Iter() and next() iterators/generators\n #and a while loop in the recursive function:\n\n # def __recursiveHelper(key_name, output, indent):\n # if key_name in self.relations:\n # employees = iter(self.relations[key_name].employees)\n # employee = next(employees, \"stop\")\n # while employees and employee != 'stop':\n # output += \" \" * indent + str(employee) +\"\\n\"\n # __recursiveHelper(next(employees, \"stop\"), output, indent+1)\n # else:\n # employee = next(employees, \"stop\")\n #\n # else:\n # return output\n\n\n\n\n\n output = \"\"\n indent = -1\n # self.relations is a dictionary of manager-name string keys.\n # The employees of None are the top-ranking managers.\n # only issue:\n # having trouble returning the concatenated output\n # from the recursive function:\n return __recursiveHelper(None, output, indent+1)", "def test_get_limit_4_dependants(self):\n self.assertEqual(\n gross_income.get_limit(dependant_children=4),\n gross_income.BASE_LIMIT\n )", "def drupal_db_read(db_obj, db_cur, key_cv, value_cv):\n\n # get the chain type\n chain_type = get_drupal_chain_type(key_cv, value_cv)\n if not chain_type:\n nori.core.email_logger.error(\n'''Internal Error: invalid field list supplied in call to\ndrupal_db_read(); call was (in expanded notation):\n\ndrupal_db_read(\n db_obj={0},\n db_cur={1},\n key_cv={2},\n value_cv={3}\n)\n\nExiting.'''.format(*map(nori.pps, [db_obj, db_cur, key_cv, value_cv]))\n )\n sys.exit(nori.core.exitvals['internal']['num'])\n\n ########### assemble the query string and argument list ###########\n\n #\n # node -> field(s) (including term references)\n #\n if chain_type == 'n-f':\n # node details\n node_cv = key_cv[0]\n node_ident = node_cv[0]\n node_value_type = node_cv[1]\n if len(node_cv) > 2:\n node_value = node_cv[2]\n node_type = node_ident[1]\n node_id_type = node_ident[2]\n\n # handle node ID types\n if node_id_type == 'id':\n key_column = 'node.nid'\n elif node_id_type == 'title':\n key_column = 'node.title'\n\n # handle specified node value\n node_value_cond = ''\n if len(node_cv) > 2:\n node_value_cond = 'AND {0} = %s'.format(key_column)\n\n field_idents = {}\n field_value_types = {}\n field_values = []\n field_names = {}\n value_columns = []\n field_joins = []\n term_joins = []\n field_value_conds = []\n field_deleted_conds = []\n v_order_columns = []\n for i, field_cv in enumerate(value_cv):\n # field details\n field_idents[i] = field_cv[0]\n field_value_types[i] = field_cv[1]\n if len(field_cv) > 2:\n field_values.append(field_cv[2])\n field_names[i] = field_idents[i][1]\n\n # field join\n field_joins.append(\n 'LEFT JOIN field_data_field_{0} AS f{1}\\n'\n ' ON f{1}.entity_id = node.nid\\n'\n ' AND f{1}.revision_id = node.vid' .\n format(field_names[i], i)\n )\n\n # handle value types\n if field_value_types[i].startswith('term: '):\n value_columns.append('t{0}.name'.format(i))\n term_joins.append(\n 'LEFT JOIN taxonomy_term_data AS t{0}\\n'\n ' ON t{0}.tid = f{0}.field_{1}_tid' .\n format(i, field_names[i])\n )\n elif field_value_types[i] == 'ip':\n value_columns.append(\n 'f{0}.field_{1}_start'.format(i, field_names[i])\n )\n else:\n value_columns.append(\n 'f{0}.field_{1}_value'.format(i, field_names[i])\n )\n\n # handle specified field value\n if len(field_cv) > 2:\n field_value_conds.append(\n 'AND {0} = %s'.format(value_columns[-1])\n )\n\n # not deleted\n field_deleted_conds.append(\n 'AND f{0}.deleted = 0'.format(i)\n )\n\n # order column\n v_order_columns.append('f{0}.delta'.format(i))\n\n # query string and arguments\n query_str = (\n'''\nSELECT {0}, {1}\nFROM node\n{2}\n{3}\nWHERE node.vid IN\n (SELECT MAX(vid)\n FROM node_revision\n GROUP BY nid)\nAND node.type = %s\n{4}\n{5}\n{6}\nORDER BY node.title, node.nid, {7}\n''' .\n format(key_column, ', '.join(value_columns),\n '\\n'.join(field_joins),\n '\\n'.join(term_joins),\n node_value_cond,\n '\\n'.join(field_value_conds),\n '\\n'.join(field_deleted_conds),\n ', '.join(v_order_columns))\n )\n query_args = [node_type]\n if len(node_cv) > 2:\n query_args.append(node_value)\n query_args += field_values\n\n #\n # node -> relation -> node\n #\n elif chain_type == 'n-r-n':\n # key-node details\n k_node_cv = key_cv[0]\n k_node_ident = k_node_cv[0]\n k_node_value_type = k_node_cv[1]\n if len(k_node_cv) > 2:\n k_node_value = k_node_cv[2]\n k_node_type = k_node_ident[1]\n k_node_id_type = k_node_ident[2]\n\n # handle key-node ID types\n if k_node_id_type == 'id':\n node_key_column = 'k_node.nid'\n elif k_node_id_type == 'title':\n node_key_column = 'k_node.title'\n\n # handle specified key-node value\n k_node_value_cond = ''\n if len(k_node_cv) > 2:\n k_node_value_cond = 'AND {0} = %s'.format(node_key_column)\n\n # relation details\n relation_cv = key_cv[1]\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n\n # handle key relation-field\n relation_key_column = ''\n relation_field_join = ''\n relation_field_cond = ''\n relation_value_cond = ''\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n\n # field join\n relation_field_join = (\n 'LEFT JOIN field_data_field_{0} AS k_rf\\n'\n ' ON k_rf.entity_id = e2.entity_id\\n'\n ' AND k_rf.revision_id = e2.revision_id' .\n format(relation_field_name)\n )\n\n # conditions\n relation_field_cond = (\n \"AND k_rf.entity_type = 'relation'\\n\"\n \"AND k_rf.deleted = 0\"\n )\n\n # handle value type\n if relation_value_type.startswith('term: '):\n relation_key_column = 'k_rf_t.name'\n relation_field_join += (\n '\\nLEFT JOIN taxonomy_term_data AS k_rf_t\\n'\n 'ON k_rf_t.tid = k_rf.field_{0}_tid' .\n format(relation_field_name)\n )\n elif relation_value_type == 'ip':\n relation_key_column = (\n 'k_rf.field_{0}_start'.format(relation_field_name)\n )\n else:\n relation_key_column = (\n 'k_rf.field_{0}_value'.format(relation_field_name)\n )\n\n # handle specified field value\n if len(relation_cv) > 2:\n relation_value = relation_cv[2]\n relation_value_cond = (\n 'AND {0} = %s'.format(relation_key_column)\n )\n\n # value-node details\n v_node_cv = value_cv[0]\n v_node_ident = v_node_cv[0]\n v_node_value_type = v_node_cv[1]\n if len(v_node_cv) > 2:\n v_node_value = v_node_cv[2]\n v_node_type = v_node_ident[1]\n v_node_id_type = v_node_ident[2]\n\n # handle value-node ID types\n if v_node_id_type == 'id':\n value_column = 'v_node.nid'\n elif v_node_id_type == 'title':\n value_column = 'v_node.title'\n\n # handle value-node type\n extra_value_cols = ''\n v_node_type_cond = ''\n if v_node_type is None:\n extra_value_cols = ', v_node.type'\n else:\n v_node_type_cond = 'AND v_node.type = %s'\n\n # handle specified value-node value\n v_node_value_cond = ''\n if len(v_node_cv) > 2:\n v_node_value_cond = 'AND {0} = %s'.format(value_column)\n\n # query string and arguments\n query_str = (\n'''\nSELECT {0}, {1}{2}{3}\nFROM node AS k_node\nLEFT JOIN field_data_endpoints AS e1\n ON e1.endpoints_entity_id = k_node.nid\nLEFT JOIN field_data_endpoints AS e2\n ON e2.entity_id = e1.entity_id\n AND e2.revision_id = e1.revision_id\n AND e2.endpoints_r_index > e1.endpoints_r_index\n{4}\nLEFT JOIN node AS v_node\n ON v_node.nid = e2.endpoints_entity_id\nWHERE k_node.vid IN\n (SELECT MAX(vid)\n FROM node_revision\n GROUP BY nid)\nAND k_node.type = %s\n{5}\nAND e1.revision_id IN\n (SELECT MAX(vid)\n FROM relation_revision\n GROUP BY rid)\nAND e1.entity_type = 'relation'\nAND e1.bundle = %s\nAND e1.endpoints_entity_type = 'node'\nAND e1.deleted = 0\nAND e2.endpoints_entity_type = 'node'\nAND e2.deleted = 0\n{6}\n{7}\nAND v_node.vid IN\n (SELECT MAX(vid)\n FROM node_revision\n GROUP BY nid)\n{8}\n{9}\nORDER BY k_node.title, k_node.nid, e1.entity_id, v_node.title, v_node.nid\n''' .\n format(node_key_column,\n (relation_key_column + ', ') if relation_key_column\n else '',\n value_column,\n extra_value_cols,\n relation_field_join,\n k_node_value_cond,\n relation_field_cond,\n relation_value_cond,\n v_node_type_cond,\n v_node_value_cond)\n )\n query_args = [k_node_type]\n if len(k_node_cv) > 2:\n query_args.append(k_node_value)\n query_args.append(relation_type)\n if len(relation_ident) > 2 and len(relation_cv) > 2:\n query_args.append(relation_value)\n if v_node_type is not None:\n query_args.append(v_node_type)\n if len(v_node_cv) > 2:\n query_args.append(v_node_value)\n\n #\n # node -> relation & node -> relation_field(s) (incl. term refs)\n #\n elif chain_type == 'n-rn-rf':\n # node1 details\n node1_cv = key_cv[0]\n node1_ident = node1_cv[0]\n node1_value_type = node1_cv[1]\n if len(node1_cv) > 2:\n node1_value = node1_cv[2]\n node1_type = node1_ident[1]\n node1_id_type = node1_ident[2]\n\n # handle node1 ID types\n if node1_id_type == 'id':\n node1_key_column = 'node1.nid'\n elif node1_id_type == 'title':\n node1_key_column = 'node1.title'\n\n # handle specified node1 value\n node1_value_cond = ''\n if len(node1_cv) > 2:\n node1_value_cond = 'AND {0} = %s'.format(node1_key_column)\n\n # relation details\n relation_cv = key_cv[1]\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n\n # handle key relation-field\n relation_key_column = ''\n relation_field_join = ''\n relation_field_cond = ''\n relation_value_cond = ''\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n\n # field join\n relation_field_join = (\n 'LEFT JOIN field_data_field_{0} AS k_rf\\n'\n ' ON k_rf.entity_id = e2.entity_id\\n'\n ' AND k_rf.revision_id = e2.revision_id' .\n format(relation_field_name)\n )\n\n # conditions\n relation_field_cond = (\n \"AND k_rf.entity_type = 'relation'\\n\"\n \"AND k_rf.deleted = 0\"\n )\n\n # handle value type\n if relation_value_type.startswith('term: '):\n relation_key_column = 'k_rf_t.name'\n relation_field_join += (\n '\\nLEFT JOIN taxonomy_term_data AS k_rf_t\\n'\n 'ON k_rf_t.tid = k_rf.field_{0}_tid' .\n format(relation_field_name)\n )\n elif relation_value_type == 'ip':\n relation_key_column = (\n 'k_rf.field_{0}_start'.format(relation_field_name)\n )\n else:\n relation_key_column = (\n 'k_rf.field_{0}_value'.format(relation_field_name)\n )\n\n # handle specified field value\n if len(relation_cv) > 2:\n relation_value = relation_cv[2]\n relation_value_cond = (\n 'AND {0} = %s'.format(relation_key_column)\n )\n\n # node2 details\n node2_cv = key_cv[2]\n node2_ident = node2_cv[0]\n node2_value_type = node2_cv[1]\n if len(node2_cv) > 2:\n node2_value = node2_cv[2]\n node2_type = node2_ident[1]\n node2_id_type = node2_ident[2]\n\n # handle node2 ID types\n if node2_id_type == 'id':\n node2_key_column = 'node2.nid'\n elif node2_id_type == 'title':\n node2_key_column = 'node2.title'\n\n # handle specified node2 value\n node2_value_cond = ''\n if len(node2_cv) > 2:\n node2_value_cond = 'AND {0} = %s'.format(node2_key_column)\n\n field_idents = {}\n field_value_types = {}\n field_values = []\n field_names = {}\n value_columns = []\n field_joins = []\n term_joins = []\n field_entity_conds = []\n field_value_conds = []\n field_deleted_conds = []\n v_order_columns = []\n for i, field_cv in enumerate(value_cv):\n # field details\n field_idents[i] = field_cv[0]\n field_value_types[i] = field_cv[1]\n if len(field_cv) > 2:\n field_values.append(field_cv[2])\n field_names[i] = field_idents[i][1]\n\n # field join\n field_joins.append(\n 'LEFT JOIN field_data_field_{0} AS f{1}\\n'\n ' ON f{1}.entity_id = e2.entity_id\\n'\n ' AND f{1}.revision_id = e2.revision_id' .\n format(field_names[i], i)\n )\n\n # handle value types\n if field_value_types[i].startswith('term: '):\n value_columns.append('t{0}.name'.format(i))\n term_joins.append(\n 'LEFT JOIN taxonomy_term_data AS t{0}\\n'\n ' ON t{0}.tid = f{0}.field_{1}_tid' .\n format(i, field_names[i])\n )\n elif field_value_types[i] == 'ip':\n value_columns.append(\n 'f{0}.field_{1}_start'.format(i, field_names[i])\n )\n else:\n value_columns.append(\n 'f{0}.field_{1}_value'.format(i, field_names[i])\n )\n\n # field entity type\n field_entity_conds.append(\n \"AND f{0}.entity_type = 'relation'\".format(i)\n )\n\n # handle specified field value\n if len(field_cv) > 2:\n field_value_conds.append(\n 'AND {0} = %s'.format(value_columns[-1])\n )\n\n # not deleted\n field_deleted_conds.append(\n 'AND f{0}.deleted = 0'.format(i)\n )\n\n # order column\n v_order_columns.append('f{0}.delta'.format(i))\n\n # query string and arguments\n query_str = (\n'''\nSELECT {0}, {1}{2}, {3}\nFROM node AS node1\nLEFT JOIN field_data_endpoints AS e1\n ON e1.endpoints_entity_id = node1.nid\nLEFT JOIN field_data_endpoints AS e2\n ON e2.entity_id = e1.entity_id\n AND e2.revision_id = e1.revision_id\n AND e2.endpoints_r_index > e1.endpoints_r_index\n{4}\nLEFT JOIN node AS node2\n ON node2.nid = e2.endpoints_entity_id\n{5}\n{6}\nWHERE node1.vid IN\n (SELECT MAX(vid)\n FROM node_revision\n GROUP BY nid)\nAND node1.type = %s\n{7}\nAND e1.revision_id IN\n (SELECT MAX(vid)\n FROM relation_revision\n GROUP BY rid)\nAND e1.entity_type = 'relation'\nAND e1.bundle = %s\nAND e1.endpoints_entity_type = 'node'\nAND e1.deleted = 0\nAND e2.endpoints_entity_type = 'node'\nAND e2.deleted = 0\n{8}\n{9}\nAND node2.vid IN\n (SELECT MAX(vid)\n FROM node_revision\n GROUP BY nid)\nAND node2.type = %s\n{10}\n{11}\n{12}\n{13}\nORDER BY node1.title, node1.nid, e1.entity_id, {14}\n''' .\n format(node1_key_column,\n (relation_key_column + ', ') if relation_key_column\n else '',\n node2_key_column,\n ', '.join(value_columns),\n relation_field_join,\n '\\n'.join(field_joins),\n '\\n'.join(term_joins),\n node1_value_cond,\n relation_field_cond,\n relation_value_cond,\n node2_value_cond,\n '\\n'.join(field_entity_conds),\n '\\n'.join(field_value_conds),\n '\\n'.join(field_deleted_conds),\n ', '.join(v_order_columns))\n )\n query_args = [node1_type]\n if len(node1_cv) > 2:\n query_args.append(node1_value)\n query_args.append(relation_type)\n if len(relation_ident) > 2 and len(relation_cv) > 2:\n query_args.append(relation_value)\n query_args.append(node2_type)\n if len(node2_cv) > 2:\n query_args.append(node2_value)\n query_args += field_values\n\n #\n # node -> fc -> field(s) (including term references)\n #\n elif chain_type == 'n-fc-f':\n # node details\n node_cv = key_cv[0]\n node_ident = node_cv[0]\n node_value_type = node_cv[1]\n if len(node_cv) > 2:\n node_value = node_cv[2]\n node_type = node_ident[1]\n node_id_type = node_ident[2]\n\n # handle node ID types\n if node_id_type == 'id':\n key_column = 'node.nid'\n elif node_id_type == 'title':\n key_column = 'node.title'\n\n # handle specified node value\n node_value_cond = ''\n if len(node_cv) > 2:\n node_value_cond = 'AND {0} = %s'.format(key_column)\n\n # fc details\n fc_cv = key_cv[1]\n fc_ident = fc_cv[0]\n fc_value_type = fc_cv[1]\n if len(fc_cv) > 2:\n fc_value = fc_cv[2]\n fc_type = fc_ident[1]\n fc_id_type = fc_ident[2]\n\n # handle fc ID types\n if fc_id_type == 'id':\n extra_key_column = 'fci.item_id'\n elif fc_id_type == 'label':\n extra_key_column = 'fci.label'\n\n # handle specified fc value\n fc_value_cond = ''\n if len(fc_cv) > 2:\n fc_value_cond = 'AND {0} = %s'.format(extra_key_column)\n\n field_idents = {}\n field_value_types = {}\n field_values = []\n field_names = {}\n value_columns = []\n field_joins = []\n term_joins = []\n field_entity_conds = []\n field_value_conds = []\n field_deleted_conds = []\n v_order_columns = []\n for i, field_cv in enumerate(value_cv):\n # field details\n field_idents[i] = field_cv[0]\n field_value_types[i] = field_cv[1]\n if len(field_cv) > 2:\n field_values.append(field_cv[2])\n field_names[i] = field_idents[i][1]\n\n # field join\n field_joins.append(\n 'LEFT JOIN field_data_field_{0} AS f{1}\\n'\n ' ON f{1}.entity_id = fci.item_id\\n'\n ' AND f{1}.revision_id = fci.revision_id' .\n format(field_names[i], i)\n )\n\n # handle value types\n if field_value_types[i].startswith('term: '):\n value_columns.append('t{0}.name'.format(i))\n term_joins.append(\n 'LEFT JOIN taxonomy_term_data AS t{0}\\n'\n ' ON t{0}.tid = f{0}.field_{1}_tid' .\n format(i, field_names[i])\n )\n elif field_value_types[i] == 'ip':\n value_columns.append(\n 'f{0}.field_{1}_start'.format(i, field_names[i])\n )\n else:\n value_columns.append(\n 'f{0}.field_{1}_value'.format(i, field_names[i])\n )\n\n # field entity type\n field_entity_conds.append(\n \"AND f{0}.entity_type = 'field_collection_item'\".format(i)\n )\n\n # handle specified field value\n if len(field_cv) > 2:\n field_value_conds.append(\n 'AND {0} = %s'.format(value_columns[-1])\n )\n\n # not deleted\n field_deleted_conds.append(\n 'AND f{0}.deleted = 0'.format(i)\n )\n\n # order column\n v_order_columns.append('f{0}.delta'.format(i))\n\n # query string and arguments\n query_str = (\n'''\nSELECT {0}, {1}{2}\nFROM node\nLEFT JOIN field_data_field_{3} AS fcf\n ON fcf.entity_id = node.nid\n AND fcf.revision_id = node.vid\nLEFT JOIN field_collection_item as fci\n ON fci.item_id = fcf.field_{3}_value\n AND fci.revision_id = fcf.field_{3}_revision_id\n{4}\n{5}\nWHERE node.vid IN\n (SELECT MAX(vid)\n FROM node_revision\n GROUP BY nid)\nAND node.type = %s\n{6}\nAND fcf.entity_type = 'node'\nAND fcf.deleted = 0\nAND fci.revision_id IN\n (SELECT MAX(revision_id)\n FROM field_collection_item_revision\n GROUP BY item_id)\nAND fci.archived = 0\n{7}\n{8}\n{9}\n{10}\nORDER BY node.title, node.nid, fcf.delta, {11}\n''' .\n format(key_column,\n (extra_key_column + ', ') if extra_key_column else '',\n ', '.join(value_columns),\n fc_type,\n '\\n'.join(field_joins),\n '\\n'.join(term_joins),\n node_value_cond,\n fc_value_cond,\n '\\n'.join(field_entity_conds),\n '\\n'.join(field_value_conds),\n '\\n'.join(field_deleted_conds),\n ', '.join(v_order_columns))\n )\n query_args = [node_type]\n if len(node_cv) > 2:\n query_args.append(node_value)\n if len(fc_cv) > 2:\n query_args.append(fc_value)\n query_args += field_values\n\n ######################## execute the query ########################\n\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n return ret[1]", "def sub_get_lines(treeiter):\n while treeiter != None:\n tup = self.store[treeiter]\n if criteria(tup):\n lines.append({\n 'jid': tup[0],\n 'country': tup[3],\n 'ip': tup[4]})\n if self.store.iter_has_child(treeiter):\n childiter = self.store.iter_children(treeiter)\n sub_get_lines(childiter)\n treeiter = self.store.iter_next(treeiter)", "def max_recursion_depth(self, max_recursion_depth: ConfigNodePropertyInteger):\n\n self._max_recursion_depth = max_recursion_depth", "def _flatten_json(node, stop_prefix, seperator='/', prefix='/', depth=0):\n node_list = []\n field_dict = {}\n\n # print(f\"{' ' * depth}>>> {prefix}\")\n\n node_type = type(node)\n if node_type == list:\n for entry in node:\n sub_list, sub_fields = _flatten_json(entry, stop_prefix, seperator, prefix, depth)\n node_list = node_list + sub_list\n field_dict.update(sub_fields)\n elif node_type == dict:\n for key in node:\n prefstr = _make_new_prefix(prefix, key, seperator)\n #print(f\"{' ' * depth}PREFSTR: {prefstr} <- P:{prefix} S:{seperator} K:{key}\")\n sub_list, sub_fields = _flatten_json(node[key], stop_prefix, seperator, prefstr, depth + 1)\n if prefstr == stop_prefix:\n sub_list = unedgify(node[key])\n if type(sub_list) == dict:\n sub_list = sub_list.copy()\n if type(sub_list) != list:\n sub_list = [sub_list]\n node_list = node_list + sub_list\n field_dict.update(sub_fields)\n else:\n # at the stop-level, use normal field names\n key = prefix\n if stop_prefix in key:\n key = prefix.split(seperator)[-1]\n #print(f'{\" \" * depth}PREFIX:{prefix} STOP_PREFIX:{stop_prefix} KEY:{key} VALUE:{pprint.pformat(node)}')\n field_dict[key] = node\n\n if len(node_list) > 0 and \\\n len(field_dict) > 0:\n for entry in node_list:\n # cannot blindly do entry.update(field_dict), as subtrees with\n # no nodes for stop_prefix will bubble up and overwrite previous\n # entries...\n for field_key in field_dict:\n if type(entry) == dict:\n if field_key not in entry:\n entry[field_key] = field_dict[field_key]\n field_dict = {}\n return node_list, field_dict.copy()", "def get_params(self, deep=...):\n ...", "def _compute_invalidation_scope_recursive(request, result, meta, source_type, target_type, simulated_prop):\n if 'calculatedProperty' in meta: # we cannot patch calc props, so behavior here is irrelevant\n return\n elif meta['type'] == 'object':\n if 'properties' not in meta:\n return # sometimes can occur (see workflow.json in fourfront) - nothing we can do\n for sub_prop, sub_meta in meta['properties'].items():\n _compute_invalidation_scope_recursive(request, result, sub_meta, source_type, target_type,\n '.'.join([simulated_prop, sub_prop]))\n elif meta['type'] == 'array':\n sub_type = meta['items']['type']\n if sub_type == 'object':\n if 'properties' not in meta['items']:\n return # sometimes can occur (see workflow.json in fourfront) - nothing we can do\n for sub_prop, sub_meta in meta['items']['properties'].items():\n _compute_invalidation_scope_recursive(request, result, sub_meta, source_type, target_type,\n '.'.join([simulated_prop, sub_prop]))\n else:\n _compute_invalidation_scope_base(request, result, source_type, target_type, simulated_prop)\n else:\n _compute_invalidation_scope_base(request, result, source_type, target_type, simulated_prop)" ]
[ "0.5792529", "0.53290737", "0.5041367", "0.50318784", "0.4979942", "0.4966645", "0.49606967", "0.49531704", "0.4941977", "0.49168393", "0.49085727", "0.48915526", "0.48759022", "0.48753", "0.4858908", "0.4832988", "0.47985405", "0.47869855", "0.47741127", "0.47645083", "0.47599682", "0.47476", "0.4742114", "0.4738495", "0.4697844", "0.46894297", "0.46844864", "0.4661207", "0.46572834", "0.46570098", "0.46564493", "0.46519956", "0.46354467", "0.46278873", "0.4590245", "0.45796806", "0.45723304", "0.4568749", "0.45616734", "0.45538473", "0.45439515", "0.45338747", "0.45338562", "0.45269677", "0.45263234", "0.45218676", "0.45157114", "0.45117798", "0.45108253", "0.45059952", "0.4502979", "0.4483762", "0.4478311", "0.4475466", "0.44652334", "0.44611654", "0.4457905", "0.44546318", "0.44413683", "0.44389197", "0.4433691", "0.44285223", "0.44202825", "0.44190544", "0.44041815", "0.43977547", "0.4393243", "0.43898037", "0.43864176", "0.43827486", "0.438245", "0.4378783", "0.43782458", "0.4375883", "0.4374512", "0.43724173", "0.43685645", "0.43677852", "0.43581745", "0.43577638", "0.43563774", "0.43553644", "0.43511367", "0.43498397", "0.433945", "0.43339935", "0.43336132", "0.43335676", "0.4326424", "0.4324589", "0.43227082", "0.43185547", "0.43176964", "0.43172967", "0.43166625", "0.43151215", "0.4314853", "0.43146673", "0.43133944", "0.43127048" ]
0.4593388
34
Instantiate a new Directive
def __init__(self, raw_directive: Dict): self.name: str = raw_directive.get("name") self.description: str = raw_directive.get("description") self.locations: List[str] = raw_directive.get("locations", []) self.args: Dict[str, Argument] = Schema.parse_arguments(raw_directive.get("args", []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_directive(cls, directive, app):\n return cls(directive,\n app,\n arguments=directive.arguments,\n content=directive.content,\n options=directive.options)", "def run(self):\n node = DirectiveNode(\n self.name,\n name=self.name,\n content=self.content,\n options=self.options,\n )\n return [node]", "def directive(func):\n func.cfg_is_directive = True\n return func", "def __init__(self, iface, component_name, directive_name):\n self.iface = iface\n self.component_name = component_name\n self.directive_name = directive_name", "def run_directive(\n self, name: str, first_line: str, content: str, position: int\n ) -> List[nodes.Element]:\n # TODO directive name white/black lists\n\n self.document.current_line = position\n\n # get directive class\n directive_class, messages = directives.directive(\n name, self.language_module_rst, self.document\n ) # type: (Directive, list)\n if not directive_class:\n error = self.reporter.error(\n 'Unknown directive type \"{}\".\\n'.format(name),\n # nodes.literal_block(content, content),\n line=position,\n )\n return [error] + messages\n\n if issubclass(directive_class, Include):\n # this is a Markdown only option,\n # to allow for altering relative image reference links\n directive_class.option_spec[\"relative-images\"] = directives.flag\n directive_class.option_spec[\"relative-docs\"] = directives.path\n\n try:\n arguments, options, body_lines = parse_directive_text(\n directive_class, first_line, content\n )\n except DirectiveParsingError as error:\n error = self.reporter.error(\n \"Directive '{}': {}\".format(name, error),\n nodes.literal_block(content, content),\n line=position,\n )\n return [error]\n\n # initialise directive\n if issubclass(directive_class, Include):\n directive_instance = MockIncludeDirective(\n self,\n name=name,\n klass=directive_class,\n arguments=arguments,\n options=options,\n body=body_lines,\n lineno=position,\n )\n else:\n state_machine = MockStateMachine(self, position)\n state = MockState(self, state_machine, position)\n directive_instance = directive_class(\n name=name,\n # the list of positional arguments\n arguments=arguments,\n # a dictionary mapping option names to values\n options=options,\n # the directive content line by line\n content=StringList(body_lines, self.document[\"source\"]),\n # the absolute line number of the first line of the directive\n lineno=position,\n # the line offset of the first line of the content\n content_offset=0, # TODO get content offset from `parse_directive_text`\n # a string containing the entire directive\n block_text=\"\\n\".join(body_lines),\n state=state,\n state_machine=state_machine,\n )\n\n # run directive\n try:\n result = directive_instance.run()\n except DirectiveError as error:\n msg_node = self.reporter.system_message(\n error.level, error.msg, line=position\n )\n msg_node += nodes.literal_block(content, content)\n result = [msg_node]\n except MockingError as exc:\n error_msg = self.reporter.error(\n \"Directive '{}' cannot be mocked: {}: {}\".format(\n name, exc.__class__.__name__, exc\n ),\n nodes.literal_block(content, content),\n line=position,\n )\n return [error_msg]\n\n assert isinstance(\n result, list\n ), 'Directive \"{}\" must return a list of nodes.'.format(name)\n for i in range(len(result)):\n assert isinstance(\n result[i], nodes.Node\n ), 'Directive \"{}\" returned non-Node object (index {}): {}'.format(\n name, i, result[i]\n )\n return result", "def new(self):\n\n self.obj = self.factory()\n\n if self.textproperty is None:\n self.attributes = ElementHandler.load_definitions(self, self.obj)", "def _get_directive_name(self):", "def newElement(self,cls,attrib={}):\n elem = cls(**attrib)\n self.setFreeId(elem)\n if cls==Subtoken:\n self.subtokens[elem.id] = elem\n elif cls==DepToken:\n self.deptokens[elem.id] = elem\n elif cls==RelToken:\n self.reltokens[elem.id] = elem\n elif cls==DepEntity:\n self.depentities[elem.id] = elem\n elif cls==RelEntity:\n self.relentities[elem.id] = elem\n else:\n # It is caller responsibility to add elements to the graph\n pass\n \n return(elem)", "def _createVetor(cls, elem):\n return cls(elem)", "def instantiateNewCmd(self):\n return QadSTRETCHCommandClass(self.plugIn)", "def render_directive(self, token: SyntaxTreeNode) -> None:\n first_line = token.info.split(maxsplit=1)\n name = first_line[0][1:-1]\n arguments = \"\" if len(first_line) == 1 else first_line[1]\n content = token.content\n position = token_line(token)\n nodes_list = self.run_directive(name, arguments, content, position)\n self.current_node += nodes_list", "def contained(self, name, data):\n factory = self.context.factory(self.context, name)\n if factory is None:\n raise ConfigurationError('Invalid directive', name)\n adapter = factory(self.context, data)\n return adapter", "def instantiateNewCmd(self):\n return QadGRIPSTRETCHCommandClass(self.plugIn)", "def __getattr__(self, attr):\n def factory(parent=None, **kwargs):\n return self.Node(parent, attr, **kwargs)\n return factory", "def __init__(self, *args, **kwargs):\n self._directives = []\n self.domain = kwargs.get(\"domain\", None)\n self._locations = {}", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def register_based_directives():\n if not BASED_LIB_RST:\n return\n\n if \"directives\" in BASED_LIB_RST:\n for dir_name, dir_cls_str in BASED_LIB_RST[\"directives\"].items():\n class_ = import_string(dir_cls_str)\n directives.register_directive(dir_name, class_)", "def instantiate(name, *args, **kwargs):\n ...", "def new(name=None):", "def command(func):\n classname = inspect.getouterframes(inspect.currentframe())[1][3]\n name = func.__name__\n help_name = name.replace(\"do_\", \"help_\")\n doc = textwrap.dedent(func.__doc__)\n\n def new(instance, args):\n # instance.new.__doc__ = doc\n try:\n argv = shlex.split(args)\n arguments = docopt(doc, help=True, argv=argv)\n func(instance, args, arguments)\n except SystemExit as e:\n if args not in ('-h', '--help'):\n print(\"Could not execute the command.\")\n print(e)\n print(doc)\n\n new.__doc__ = doc\n return new", "def directive_error(self, level, message):\n return DirectiveError(level, message)", "def new(cls):\n return cls()", "def __init__(self, attr):\n super().__init__()\n TemplateEngineFactory.register_factory('Jinja2Engine', Jinja2Engine.Factory)\n\n step1 = GenerateTemplateFileNameViaPyST(attr)\n step2 = GenerateTemplateStubViaJinja2()\n step3 = GenerateTemplateOutputViaJinja2()\n\n self.add(step1)\n self.add(step2)\n self.add(step3)\n self._attribute = attr", "def create_instance(c_instance):\n return LemaurPad(c_instance)", "def define(self, name, constructor, options=None):\n if '-' not in name:\n raise ValueError('Invalid custom element name. Must contain hypen: ' + name)\n # el = document.createElement(name)\n # el.constructor = constructor\n from domonic.html import tag\n from domonic.dom import Element\n el = type(name, (tag, Element), {'name': name, '__init__': constructor})\n if options is not None:\n if 'extends' in options:\n el.extends = options['extends']\n self.store[name] = el\n return el", "def factory(cmd, **default_kwargs):\n cmd = resolve_command(cmd)\n return Command(cmd)", "def create_parser():\n pass", "def _build(self, *args, **kwargs):\n for directive in self.directives:\n alias, ip, port, server_name, location = directive[\"signature\"].split(\":\")\n\n if location not in self.locations.keys():\n handle_location = Location(**{\n \"location\" : location,\n }\n )\n self.locations = handle_location\n self.locations[location].directives = directive", "def __init__ (self, attrs):\n super(MyDistribution, self).__init__(attrs)\n self.console = ['dosage']", "def __init__(self, cmd):\n # Build command + options \n self.cmd = cmd \n setattr(self, 'command', \"%s\" % (cmd))", "def newChemTorsion(self, **attrlinks):\n return ChemTorsion(self, **attrlinks)", "def _directive_render(node: RenderTreeNode, context: RenderContext) -> str:\n # special directives that should only be used within substitutions\n if node.meta[\"module\"].endswith(\"misc.Replace\") and node.children:\n return \"\\n\\n\".join(child.render(context) for child in node.children[-1])\n if node.meta[\"module\"].endswith(\"misc.Date\"):\n return \"{sub-ref}`today`\"\n # TODO handle unicode directive\n\n name = node.meta[\"name\"]\n info_str = option_block = code_block = \"\"\n\n if node.children and node.children[0].type == \"directive_arg\":\n info_str = \"\".join(child.render(context) for child in node.children[0])\n info_str = \" \".join(info_str.splitlines()).strip()\n if info_str:\n info_str = \" \" + info_str\n\n if node.meta[\"options_list\"]:\n yaml_str = yaml_dump(\n {\n key: (True if val is None else (int(val) if val.isnumeric() else val))\n for key, val in node.meta[\"options_list\"]\n }\n )\n option_block = indent(yaml_str, \":\", lambda s: True).strip()\n\n if node.children and node.children[-1].type == \"directive_content\":\n content = \"\\n\\n\".join(child.render(context) for child in node.children[-1])\n if not option_block and content.startswith(\":\"):\n # add a new-line, so content is not treated as an option\n content = \"\\n\" + content\n elif option_block and content:\n # new lines between options and content\n option_block += \"\\n\\n\"\n code_block = content\n\n if option_block or code_block:\n # new line before closing fence\n code_block += \"\\n\"\n\n # Info strings of backtick code fences can not contain backticks or tildes.\n # If that is the case, we make a tilde code fence instead.\n if node.markup and \":\" in node.markup:\n fence_char = \":\"\n elif \"`\" in info_str or \"~\" in info_str:\n fence_char = \"~\"\n else:\n fence_char = \"`\"\n\n # The code block must not include as long or longer sequence of `fence_char`s\n # as the fence string itself\n fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)\n fence_str = fence_char * fence_len\n return f\"{fence_str}{{{name}}}{info_str}\\n{option_block}{code_block}{fence_str}\"", "def __init__(self, *args, **kwargs):\n _gdi_.PseudoDC_swiginit(self,_gdi_.new_PseudoDC(*args, **kwargs))", "def new_command(self, content=None):\n return PrimitiveControllerCommand(content)", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def create(cls, target, tag=None):\n norm = cls.normalize(target)\n \n if tag == None:\n norm = norm.split('/',1)\n if len(norm) == 1:\n norm.append('')\n else:\n norm = (tag,norm)\n \n return cls(*norm)", "def newDynaForm(dynamodel=None, dynabase=None, dynainclude=None, \n dynaexclude=None, dynaproperties=None):\n\n class DynaForm(dynabase):\n \"\"\"The dynamically created Form class\n \"\"\"\n\n __metaclass__ = DynaFormMetaclass\n\n class Meta:\n \"\"\"Inner Meta class that defines some behavior for the form.\n \"\"\"\n\n model = dynamodel\n fields = dynainclude\n exclude = dynaexclude\n dynaconf = dynaproperties\n\n return DynaForm", "def __new__(cls, index: int) -> Expr:\n return Expr.__new__(cls)", "def setup(app: Sphinx) -> None:\n app.add_directive(\"furo-demo\", _FuroDemoDirective)", "def createComponent(tagName):\n\n class Component():\n \"\"\"A basic class for a virtual DOM Component\"\"\"\n def __init__(self, *children, **kwargs):\n self.children = _flatten_children(*children, **kwargs)\n self.attributes = kwargs\n self.tagName = tagName\n\n def _repr_mimebundle_(self, include, exclude, **kwargs):\n return {\n 'application/vdom.v1+json': toJSON(self),\n 'text/plain': '<{tagName} />'.format(tagName=tagName)\n }\n \n Component.__doc__ = \"\"\"A virtual DOM component for a {tagName} tag\n \n >>> {tagName}()\n <{tagName} />\n \"\"\".format(tagName=tagName)\n \n return Component", "def new(cls, **kwargs):\n return cls(**kwargs)", "def _make_parser(self):\n return DefusedExpatParser()", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def autodot(parser, token):\n args = token.split_contents()\n \n if not len(args) in (2, 3, 4, 5):\n raise template.TemplateSyntaxError(\"%r tag requires one to four arguments.\" % args[0])\n \n if len(args) in (4, 5):\n if args[2] != \"as\":\n raise template.TemplateSyntaxError(\"%r tag with 3 or 4 arguments must be 'a as b'.\" % args[0])\n withsrc = args[1]\n args = args[:1] + args[3:]\n else:\n withsrc = None\n \n model_name = args[1]\n varsrc = parser.compile_filter(withsrc or model_name)\n if len(args) == 3:\n mode = args[2]\n if not mode in (OUTPUT_FILE, OUTPUT_INLINE):\n raise template.TemplateSyntaxError(\"%r's second argument must be '%s' or '%s'.\" % (args[0], OUTPUT_FILE, OUTPUT_INLINE))\n else:\n mode = OUTPUT_FILE\n \n nodelist = parser.parse(('endautodot',))\n parser.delete_first_token()\n return AutodotNode(nodelist, model_name, varsrc, mode, withsrc)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def with_cm4_doc(func):\n def new(instance, args, arguments):\n func(instance, args, arguments)\n\n new.__doc__ = cm4.command.command.__doc__\n return new", "def spec(cls, id_, name, args):\r\n return cls(id_, name, None, args, None)", "def create(self, class_name, attrs, session):", "def format_directive(self, module, package):\n if module:\n automodule = '%s.%s' % (package, module)\n else:\n automodule = package\n\n directive = '.. automodule:: %s\\n' % automodule\n for option in OPTIONS:\n directive += ' :%s:\\n' % option\n return directive", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def __getattr__(self, name):\n return Command(self.cmd, name)", "def __init__(self, params, diam=1, aggressive=True):\n defaults = dict(diam=diam, aggressive=aggressive)\n super(MirrorDescent, self).__init__(params, defaults)", "def _new(cls, rep, shape, domain):\n cls._check(rep, shape, domain)\n obj = object.__new__(cls)\n obj.rep = rep\n obj.shape = obj.rows, obj.cols = shape\n obj.domain = domain\n return obj", "def __init__(self, tag):\n self.tag = tag.lower()\n self.attrs = {}\n self.contents = ()", "def setup(app):\n app.add_directive('alias', AliasDirective)", "def __init__(self, command: Optional[List[str]] = None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.docs_command = DocsCommandContext()\n self.command = command or self.docs_command.sub_commands\n self.command_string = self.docs_command.sub_command_string\n self.command_callback = self.docs_command.command_callback", "def add_command(self, command, docstring):\n do_name = 'do_%s' % (command,)\n do_cmd = make_cmd_fn(command)\n setattr(self, do_name, do_cmd)\n\n if docstring:\n help_cmd = make_help_cmd(command, docstring)\n help_name = 'help_%s' % (command,)\n setattr(self, help_name, help_cmd)\n\n self.names.append(do_name)", "def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()", "def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()", "def parse_directive(line):\n composite = list()\n pointer = line.find(\"#\")\n composite.append(line[0: pointer])\n composite.append(line[pointer + 1: len(line) - 1])\n return composite", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"define\", \"#define\")\n if len(self.value) == 0:\n self.string = \" %s\" % (self.name)\n else:\n self.string = \" %s %s\" % (self.name, self.value)", "def __init__(self, command: str):\n\n # Assign attributes\n self.command_str: str = command\n self.name: str = \"\"\n self.arg: str = \"\"\n\n # Parse the command\n self.parse_command()", "def New():\n Self = $classname()\n Self._initialize_()\n Self._update_()\n return Self", "def new_declaration (var_names) :\r\n\r\n\ttokens = [\"::\"]\r\n\tfor n in var_names :\r\n\t\ttokens += tokenizer.tokenize(n) + [\",\"]\r\n\tdel tokens[-1]\r\n\r\n\tresult = declaration (tokens)\r\n\r\n\treturn result", "def newChemBond(self, **attrlinks):\n return ChemBond(self, **attrlinks)", "def setup(app):\n app.add_directive('show_tasks', ShowTasksDirective)", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def __new__(cls, name, *args, **kwargs):\n instance = super(TFModel, cls).__new__(cls)\n instance.__scope_name = name\n instance.__graph = tf.Graph()\n instance.__phase = tf.placeholder(tf.bool)\n return instance", "def __init__(self, dynamod, measmod, initrv, alpha, beta, kappa, **kwargs):\n if not issubclass(type(dynamod), DiscreteGaussianModel):\n raise ValueError(\n \"_DiscDiscUnscentedKalman requires \" \"a Gaussian dynamic model.\"\n )\n if not issubclass(type(measmod), DiscreteGaussianModel):\n raise ValueError(\n \"_DiscDiscUnscentedKalman requires \" \"a Gaussian measurement model.\"\n )\n super().__init__(dynamod, measmod, initrv)\n self.ut = UnscentedTransform(self.dynamod.dimension, alpha, beta, kappa)", "def format_directive(module, package=None):\n directive = '.. automodule:: %s\\n' % makename(package, module)\n for option in OPTIONS:\n directive += ' :%s:\\n' % option\n return directive", "def _new_rep(self, rep):\n return self._new(rep, self.shape, self.domain)", "def _new_instance(self):\n return self.__class__(self._fmodule, self._tensor_rank)", "def __init__(self, tag):\n self.tag = tag", "def from_spec(cls, spec, prog, **kwargs):\n parser = spec.parser(prog, **kwargs)\n return cls(spec.name, spec.kind, spec.summary, parser, spec.factory)", "def instantiate():\n d = defer.Deferred()", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--version', action='version', version=self.get_version())\n\n self.add_arguments(parser)\n return parser", "def __init__(self, command, target: str):\n self.command = command\n self.target = target", "def create(cls, _):\n return cls", "def create_descr(self, attr_name):", "def make_parser(description, agg=False, allowed_experiments=sum([exps for exps in EXPERIMENTS.values()], [])):\n\n return RamArgumentParser(description=description,\n agg=agg,\n allowed_experiments=allowed_experiments)", "def create_instance(c_instance):\n return MonoPedal(c_instance)", "def __init__(self, *args):\n _gdi_.DCClipper_swiginit(self,_gdi_.new_DCClipper(*args))", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ..." ]
[ "0.7304532", "0.60879993", "0.59436953", "0.5747961", "0.5662996", "0.5511462", "0.52373666", "0.51641417", "0.51499546", "0.51448756", "0.508324", "0.5068346", "0.5061711", "0.50400126", "0.5034657", "0.5017803", "0.50147015", "0.49971962", "0.4973718", "0.49727347", "0.49699762", "0.49487227", "0.49447167", "0.49141505", "0.49005604", "0.48861465", "0.48774177", "0.48623502", "0.48481578", "0.48403302", "0.48345655", "0.4834032", "0.48331362", "0.4823032", "0.48098305", "0.48065004", "0.4805007", "0.4788097", "0.47868174", "0.478235", "0.47818044", "0.47762364", "0.47689742", "0.47645068", "0.47542563", "0.47542563", "0.47417685", "0.47393635", "0.4737442", "0.4736262", "0.47314307", "0.47140875", "0.47123554", "0.4712316", "0.4706599", "0.46995395", "0.46748844", "0.46719256", "0.46700427", "0.46700427", "0.46640667", "0.46615493", "0.46559533", "0.46535203", "0.46480808", "0.46384552", "0.46379176", "0.46332797", "0.46245953", "0.4622003", "0.46118727", "0.46081907", "0.4607515", "0.46069232", "0.4603034", "0.4591755", "0.4589384", "0.45834637", "0.45831773", "0.45787433", "0.45785525", "0.45748973", "0.45744097", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553", "0.45737553" ]
0.6045261
2
Instantiate a new Argument
def __init__(self, raw_arg: Dict): self.name = raw_arg.get("name") self.description = raw_arg.get("description") self.type = TypeDefer(raw_arg.get("type")) if raw_arg.get("type") is not None else None self.default_value = raw_arg.get("defaultValue")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name=\"alpha\", attr=None):\n Arg.__init__(self, name, attr)", "def from_param(cls, arg):\n return cls(arg)", "def add_argument(*args, **kwargs):\n return _Argument(args, frozenset(kwargs.items()))", "def __init__(self, name, flags, attr=None):\n Arg.__init__(self, name, attr)\n self.flags = flags", "def __init__(\n self,\n name: Optional[str] = None,\n aliases: Iterable[str] = (),\n args: Iterable[Argument] = (),\n ) -> None:\n self.args = Lexicon()\n self.positional_args: List[Argument] = []\n self.flags = Lexicon()\n self.inverse_flags: Dict[str, str] = {} # No need for Lexicon here\n self.name = name\n self.aliases = aliases\n for arg in args:\n self.add_arg(arg)", "def arg(\n cls,\n *flags: str,\n default: Any = EMPTY,\n choices: Sequence[Any] = None,\n help: str = None, # pylint: disable=redefined-builtin\n metavar: str = None,\n ) -> \"Argument\":\n return cls(\n *flags, default=default, choices=choices, help_text=help, metavar=metavar\n )", "def __init__(self, name=\"info\", attr=None):\n Arg.__init__(self, name, attr)", "def create_from_arg_string(cls, arg_string):\n return cls()", "def __init__(self):\n BuiltinFunction.__init__(self, \"arg\",\n conversions=dict(maxima='carg',\n mathematica='Arg',\n sympy='arg'))", "def add_argument(self, *args, **kwargs):\n self.arguments[args[0]] = self._Argument(*args, **kwargs)", "def create_argument_list(self):\n raise NotImplementedError", "def argument(*args, **kwargs):\n def deco(fct):\n if isinstance(fct, Command):\n cmd = fct\n cmd.add_argument(*args, **kwargs)\n else:\n if not hasattr(fct, '_acmdlib_arguments'):\n fct._acmdlib_arguments = []\n fct._acmdlib_arguments.append((args, kwargs))\n #print \"===\",args,kwargs,type(fct),fct\n return fct\n return deco", "def from_parameter(cls, name: str, parameter: inspect.Parameter) -> \"Argument\":\n # pylint: disable=too-many-branches,too-many-statements\n positional = parameter.kind is not parameter.KEYWORD_ONLY\n type_ = parameter.annotation\n default = parameter.default\n flag = name.upper() if positional else f\"--{name.replace('_', '-')}\"\n\n # If field is assigned an Argument use that as the starting point\n if isinstance(default, Argument):\n instance = default\n default = EMPTY\n if flag not in instance.name_or_flags:\n instance.name_or_flags = (flag,) + instance.name_or_flags\n else:\n instance = cls(flag)\n\n # Start updating kwargs\n kwargs = instance.kwargs\n if default is not EMPTY:\n kwargs.setdefault(\"default\", default)\n\n # Handle type variances\n origin = getattr(type_, \"__origin__\", None)\n if origin is not None:\n type_ = cls._handle_generics(origin, type_, positional, kwargs)\n elif isinstance(type_, type):\n type_ = cls._handle_types(type_, positional, kwargs)\n elif isinstance(type_, argparse.FileType):\n pass # Just pass as this is an `argparse` builtin\n else:\n raise TypeError(f\"Unsupported type: {type_!r}\")\n\n if type_:\n kwargs[\"type\"] = type_\n\n return instance", "def create_argument(parameter, input_dict):\n arg = []\n position = 0\n\n if 'id' not in parameter:\n exit_perm_fail(\"Error: input parameter given without an id\")\n par_id = parameter['id']\n\n # get parameter type properties\n par_type = parameter.get('type')\n\n is_optional = False\n if par_type.endswith('?'):\n is_optional = True\n par_type = par_type[0:-1]\n\n is_array = False\n if par_type.endswith('[]'):\n is_array = True\n par_type = par_type[0:-2]\n\n # get input value\n value = parameter.get('default')\n if par_id in input_dict:\n value = input_dict[par_id]\n\n # check type a bit\n if not is_optional and value is None:\n exit_perm_fail(\"Error: no input provided for required parameter {}\".format(str(par_id)))\n if is_array and not isinstance(value, list):\n exit_perm_fail(\"Error: expected an array input value for parameter {}\".format(str(par_id)))\n\n if 'inputBinding' in parameter and value is not None:\n binding = parameter['inputBinding']\n\n # get argument creation settings\n separate = 'separate' not in binding or binding['separate']\n item_separator = binding.get('itemSeparator')\n prefix = binding.get('prefix')\n\n # produce argument\n if is_array:\n if par_type == 'File':\n value = list(map(lambda x: x['path'], value))\n else:\n value = list(map(str, value))\n\n if item_separator:\n value = [item_separator.join(value)]\n else:\n if par_type == 'File':\n value = [value['path']]\n else:\n value = [value]\n\n for val in value:\n if prefix:\n if separate:\n arg.append(prefix)\n arg.append(str(val))\n else:\n arg.append(prefix + str(val))\n else:\n arg.append(str(val))\n\n # put it in the right place\n if 'position' in binding:\n position = int(binding['position'])\n\n return position, arg", "def __init__(self, args=False):\n self.args = args", "def __add_arguments__(cls, parser):", "def __init__(self, args):\n self.args = args", "def __init__(self, args):\n super().__init__()\n self.args = args", "def __init__(self, *args):\n this = _libsbml.new_Parameter(*args)\n try: self.this.append(this)\n except: self.this = this", "def add_arguments(cls, arg_parser: ArgParser) -> None:", "def __init__(self, args: argparse.Namespace):\n self._args = args", "def __init__(self, *args):\n\n self.args = args", "def __init__(self, *args):\n\n self.args = args", "def add_argument(self, *args, **kwds):\n # no argument to add to stack\n if not args:\n return self\n\n # consume Command objects if exists\n if isinstance(args[0], Command):\n self._arg_stack.extend(args[0]._arg_stack)\n target = args[0]\n return self.add_argument(*args[1:], **kwds)\n\n # stack args, kwds to pass to parser.add_argument\n self._arg_stack.append(('normal', args, kwds))\n return self", "def __init__(self, arg_list: List[_Argument]):\n self.arg_list: List[_Argument] = arg_list", "def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)", "def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)", "def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)", "def add_arg(self, *args: Any, **kwargs: Any) -> None:\n # Normalize\n if len(args) == 1 and isinstance(args[0], Argument):\n arg = args[0]\n else:\n arg = Argument(*args, **kwargs)\n # Uniqueness constraint: no name collisions\n for name in arg.names:\n if name in self.args:\n msg = \"Tried to add an argument named {!r} but one already exists!\" # noqa\n raise ValueError(msg.format(name))\n # First name used as \"main\" name for purposes of aliasing\n main = arg.names[0] # NOT arg.name\n self.args[main] = arg\n # Note positionals in distinct, ordered list attribute\n if arg.positional:\n self.positional_args.append(arg)\n # Add names & nicknames to flags, args\n self.flags[to_flag(main)] = arg\n for name in arg.nicknames:\n self.args.alias(name, to=main)\n self.flags.alias(to_flag(name), to=to_flag(main))\n # Add attr_name to args, but not flags\n if arg.attr_name:\n self.args.alias(arg.attr_name, to=main)\n # Add to inverse_flags if required\n if arg.kind == bool and arg.default is True:\n # Invert the 'main' flag name here, which will be a dashed version\n # of the primary argument name if underscore-to-dash transformation\n # occurred.\n inverse_name = to_flag(\"no-{}\".format(main))\n self.inverse_flags[inverse_name] = to_flag(main)", "def __init__(self, arg: ast3.arg) -> None:\n self.typed = False\n self.line = arg.lineno\n self.column = arg.col_offset\n self.name = arg.arg\n self.type = None\n self.type_line = -1\n self.type_column = -1\n if arg.annotation:\n anno = arg.annotation\n self.typed = True\n self.type = Annotation(anno) # type: ignore\n self.type_line = anno.lineno\n self.type_column = anno.col_offset", "def append_argument(self, inst):\n self.arguments.append(inst)", "def instantiate(name, *args, **kwargs):\n ...", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs", "def __new__(cls, *args, **kwargs):\n instance = super().__new__(cls)\n instance.args = args\n instance.kwargs = kwargs\n return instance", "def arg(\n default=MISSING,\n /,\n *,\n flag=None,\n factory=MISSING,\n init=True,\n repr=True, # noqa: A002\n hash=None, # noqa: A002\n help=None, # noqa: A002\n compare=True,\n metadata=None,\n):\n metadata = metadata or {}\n for k, v in {'flag': flag, 'help': help}.items():\n if v:\n metadata = metadata | {k: v}\n return field( # type: ignore[call-overload]\n default=default,\n default_factory=factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata)", "def __init__(self, a=1.0, name='Id'):\n super(IdentityExpression, self).__init__(name=name)\n ## Factor to multiply the argument with.\n self.a = a", "def _create_arguments(self, args):\n assert isinstance(args, (list, tuple))\n\n arguments = []\n index = 0\n for arg in args:\n assert isinstance(arg, (list, tuple))\n assert len(arg) == 2 or len(arg) == 3\n\n identifier = arg[0]\n if isinstance(arg[1], str):\n idl_type = self._create_type(\n arg[1], is_optional=(len(arg) == 3))\n else:\n idl_type = arg[1]\n\n default_value = None\n if len(arg) == 3:\n default_value = self._create_literal_constant(arg[2])\n\n arguments.append(\n Argument.IR(\n identifier,\n index=index,\n idl_type=idl_type,\n default_value=default_value))\n\n index += 1\n\n return arguments", "def create_argument_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n prog=\"mafiabot\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Mafia Telegram Bot command line interface.\",\n )\n\n # parser.add_argument(\n # \"--argument\",\n # action=\"store_true\",\n # default=,\n # help=\"\",\n # )\n\n add_logging_options(parser)\n\n return parser", "def __init__(self, namespace, listOfArgumentNames):\n self.namespace = namespace\n self.listOfArgumentNames = listOfArgumentNames", "def argument(self, *name_or_flags, **kwargs):\n return self.parser.add_argument(*name_or_flags, **kwargs)", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def __new__(*args):", "def _create_argument_generator(func: PredictorMethod) -> PredictorArgumentGenerator:\n return _InjectedParams.from_func(func).make_arguments", "def __init__(self, args, kwargs):\n\n\t\tself.always = kwargs\n\n\t\tself.positional = self.get_flags(args) if args else []\n\n\t\tself.meta = re.compile(r\"[()<>]\")\n\n\t\tself.arguments = re.compile(r\"^(-?\\d,?)+!?$|\"\n\t\t\t \t\t \t\t\t r\"^!?(-?\\d,?)+$|\"\n\t\t\t \t\t\t\t\t r\"^(!\\+?|\\+!?)$\")\n\n\t\t# Used in self.stringify to auto-increment\n\t\t# positional argument positions\n\t\tself.counter = 0", "def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs):\n return from_argparse_args(cls, args, **kwargs)", "def add_argument(self, *args: Any, **kwargs: Any) -> None:\n self._arguments.append((args, kwargs))", "def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs", "def __init__(__self__, *,\n name: pulumi.Input[str],\n args: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"name\", name)\n if args is not None:\n pulumi.set(__self__, \"args\", args)", "def __init__(self, name, args):\n self._proc = None\n self._args = [f\"/{name}\"]\n self._args.extend(args)", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='info',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def new(name=None):", "def __init__(self, idargs):\n if type(idargs) != idargparse.TxpArgumentParser:\n raise ValueError(\"idargs must be a TxpArgumentParser\")\n self._mod = pyIdlak_txp.PyIdlakModule_new(self._modidx, idargs.idlakopts)", "def add_argument(self, parser):\n parser.add_argument(*self.args, **self.kwargs)", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='INFO',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def __init__(self, *args):\n this = _libsbml.new_LocalParameter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, command: str):\n\n # Assign attributes\n self.command_str: str = command\n self.name: str = \"\"\n self.arg: str = \"\"\n\n # Parse the command\n self.parse_command()", "def __init__(self, program, args):\n self.__program = program\n self.__args = args", "def __init__(self, a, b, *args, **kwargs):\n self.a = a\n self.b = b\n super(Uniform, self).__init__(*args, **kwargs)", "def registerAndParseArgument(name):\n from FWCore.ParameterSet.VarParsing import VarParsing\n options = VarParsing ('analysis')\n options.register(name, 0, VarParsing.multiplicity.singleton,VarParsing.varType.int,name)\n options.parseArguments()\n return options", "def __init__(self, args, kwargs):\n self._args_dec = list(args)\n self._kwargs_dec = dict(kwargs)", "def __init__(self, anara, anadec, *args, **kwargs):\n _Beamlet.__init__(self, *args, **kwargs)\n self._anara = anara\n self._anadec = anadec", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__args = args\n self.__kwargs = kwargs", "def __init__(self, *args):\n this = _elas.new_Elas_parameters(*args)\n try:\n self.this.append(this)\n except Exception:\n self.this = this", "def __init__(self, *args):\n this = _libsbml.new_ConversionOption(*args)\n try: self.this.append(this)\n except: self.this = this", "def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n parser.add_argument('-N',\n type=int,\n default=DEFAULT_N,\n help='Number of particles.')\n parser.add_argument('--rho',\n type=float,\n default=DEFAULT_RHO,\n help='Number density.')\n parser.add_argument('--dimensions',\n type=int,\n choices=[2, 3],\n help='Number of dimensions.',\n default=DEFAULT_DIMENSIONS)\n parser.add_argument('--warmup_steps',\n type=int,\n default=DEFAULT_WARMUP_STEPS,\n help='Number of timesteps to run before timing.')\n parser.add_argument('--benchmark_steps',\n type=int,\n default=DEFAULT_BENCHMARK_STEPS,\n help='Number of timesteps to run in the benchmark.')\n parser.add_argument('--repeat',\n type=int,\n default=DEFAULT_REPEAT,\n help='Number of times to repeat the run.')\n parser.add_argument('-v',\n '--verbose',\n action='store_true',\n help='Verbose output.')\n return parser", "def _set_args(self, args):\n if not args:\n self.arg = None\n elif len(args) == 1:\n self.arg = args[0]\n else:\n self.arg = args", "def from_argparse(cls, args: argparse.Namespace) -> Args:\n args_dict = copy.deepcopy(vars(args))\n for possible_arg in POSSIBLE_ARGS:\n args_dict.pop(possible_arg, None)\n\n return cls(**args_dict)", "def __init__(self, name, args=[], opts=[], **kwargs):\n #print \"Item\",name,args,opts,kwargs\n self.name = name\n\n args = list(args)\n for i in range(len(args)):\n args[i] = map_arg(args[i])\n self.args = flatten(args)\n\n opts = list(opts)\n for i in range(len(opts)):\n opts[i] = map_arg(opts[i])\n self.opts = flatten(opts)\n\n self.kwargs = dict(kwargs) # take a copy\n for key, val in self.kwargs.items():\n if type(val) == tuple or type(val) == list:\n self.kwargs[key] = map_arg(val)\n\n #print \"Item.__init__\",self.name,self.args,self.opts", "def __init__(self, command, target: str):\n self.command = command\n self.target = target", "def add_argument(self, *args, **kw):\n super().add_argument(*args, **kw)", "def __init__ (self, *args, **kw):\n self.__args = args\n self.__kw = kw", "def __init__(self, name: str, arg_type_name: str, is_required=False):\n self.key = name\n self.value = arg_type_name\n self.required = is_required", "def __init__(self, *args, **kwargs):\n nargs = len(args) + len(kwargs)\n if nargs == 0:\n raise TypeError(\"one or more arguments required (0 given)\")\n \n first_arg = args[0]\n if isinstance(first_arg, str):\n if nargs > 2 or (nargs > 1 and \"quiet\" not in kwargs):\n raise TypeError(\n \"incorrect arguments for creating Dta from file\"\n )\n self._new_from_file(*args, **kwargs)\n elif isinstance(first_arg, Dta):\n if nargs > 3:\n raise TypeError(\n \"too many arguments to create Dta from existing Dta\"\n )\n self._new_from_dta(*args, **kwargs)\n elif isinstance(first_arg, collections.Iterable):\n self._new_from_iter(*args, **kwargs)\n else:\n raise TypeError(\"Dta cannot be created from these arguments:\")", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def AddArguments(cls, argument_group):", "def __init__(self, *args):\n this = _ida_hexrays.new_carglist_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def add_argument(self, *args, **kwargs):\r\n\r\n # if no positional args are supplied or only one is supplied and\r\n # it doesn't look like an option string, parse a positional\r\n # argument\r\n chars = self.prefix_chars\r\n if not args or len(args) == 1 and args[0][0] not in chars:\r\n if args and 'dest' in kwargs:\r\n raise ValueError('dest supplied twice for positional argument')\r\n kwargs = self._get_positional_kwargs(*args, **kwargs)\r\n\r\n # otherwise, we're adding an optional argument\r\n else:\r\n kwargs = self._get_optional_kwargs(*args, **kwargs)\r\n\r\n # if no default was supplied, use the parser-level default\r\n if 'default' not in kwargs:\r\n dest = kwargs['dest']\r\n if dest in self._defaults:\r\n kwargs['default'] = self._defaults[dest]\r\n elif self.argument_default is not None:\r\n kwargs['default'] = self.argument_default\r\n\r\n # create the action object, and add it to the parser\r\n action_class = self._pop_action_class(kwargs)\r\n if not _callable(action_class):\r\n raise ValueError('unknown action \"%s\"' % action_class)\r\n action = action_class(**kwargs)\r\n\r\n # raise an error if the action type is not callable\r\n type_func = self._registry_get('type', action.type, action.type)\r\n if not _callable(type_func):\r\n raise ValueError('%r is not callable' % type_func)\r\n\r\n return self._add_action(action)", "def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser", "def add_arguments(self, parser):", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Get the arguments or, it's default values\n self._label = super().register_argument(\"label\", \"\")", "def __init__(self, *args, name=''):\n from collections import Iterable\n if len(args) == 1:\n if isinstance(args[0], Point):\n self.data = args[0].data.copy()\n elif isinstance(args[0], Iterable):\n self.data = list(args[0])\n else:\n self.data = list(args)\n self.name = name if not name.isspace() else ''", "def __init__(self, a=\"a\", b=\"b\"):\n self.a = a\n self.b = b", "def __init__(self):\n\n self.arg = None\n self.output = None" ]
[ "0.6991429", "0.6833005", "0.67898697", "0.6781041", "0.6746932", "0.6634749", "0.65116656", "0.64632064", "0.6431998", "0.64295053", "0.63775975", "0.637756", "0.6214503", "0.6194126", "0.6143888", "0.6139867", "0.61160153", "0.6082575", "0.6075455", "0.6035835", "0.60151345", "0.5983106", "0.5983106", "0.597569", "0.59716266", "0.5955706", "0.5955706", "0.5955706", "0.5953267", "0.5947924", "0.59351754", "0.5901955", "0.59014726", "0.5898677", "0.58680856", "0.5863411", "0.5858644", "0.58458394", "0.5843351", "0.58339775", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.582055", "0.58039975", "0.5802547", "0.5763628", "0.5745421", "0.57265925", "0.5723549", "0.5713966", "0.57132983", "0.5710479", "0.56942433", "0.569075", "0.5680605", "0.5676371", "0.56693137", "0.56372833", "0.56245303", "0.5616019", "0.5587766", "0.5587326", "0.5585946", "0.5572557", "0.55697167", "0.55685306", "0.5564953", "0.5552285", "0.5550901", "0.5535986", "0.5532235", "0.5519887", "0.5514336", "0.5510557", "0.55069476", "0.55038595", "0.54961467", "0.5491066", "0.5490874", "0.5490249", "0.54897034", "0.54896307", "0.5487103", "0.54861605" ]
0.5699178
68
Instantiate a new TypeDefer
def __init__(self, raw_defer: Dict): self.kind = raw_defer.get("kind") self.name = raw_defer.get("name") self.of_type: TypeDefer = TypeDefer(raw_defer.get("ofType")) if raw_defer.get("ofType") is not None else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instantiate():\n d = defer.Deferred()", "def __init__(self, type_):\n\n self.type = type_", "def Instance(self) -> TypeManager:", "def __call__(self, *args):\n return TypeCall(self, args)", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def deferredInit(deferredName):\n def _deferredInit(func):\n def __deferredInit(self, *args, **kwargs):\n initDeferred = None\n if(hasattr(self, deferredName)):\n initDeferred = getattr(self, deferredName)\n if(initDeferred.called):\n return defer.maybeDeferred(func, self, *args, **kwargs)\n else:\n raise RuntimeError(\"%s doesn't define the Deferred attribute `%s`.\" % (self.__class__.__name__, deferredName))\n \n def _finish(result):\n return func(self, *args, **kwargs)\n \n def _finish_error(failure):\n print '_finish_err: %s' % failure\n \n resultDeferred = defer.Deferred()\n resultDeferred.addCallbacks(_finish, _finish_error)\n \n initDeferred.chainDeferred(resultDeferred)\n \n return resultDeferred\n return __deferredInit\n \n # if it's a callable, that means there's no arguments\n # so we use the defaultname for the instance's deferred\n if(callable(deferredName)):\n func = deferredName\n deferredName = 'initDeferred'\n return _deferredInit(func)\n \n return _deferredInit", "def __constructor__(self):\n return type(self)", "def __init__(self, type=np.float64):\n self._inst = None\n self._type = type", "def type_instance(typedef):\n if subclassof(typedef, Type):\n # Type class passed, create no-arg instance\n typedef = typedef()\n return typedef", "def observe(self) -> \"defer.Deferred[_T]\":\n ...", "def register(dmm, typecls):\n def wraps(fn):\n dmm.register(typecls, fn)\n return fn\n\n return wraps", "def __init__(self):\n self.instantiable = {self: self}\n self.is_generic = False", "def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n self.fast_validate = (12, aType)", "def get_declaration(self, type_):\n return self.__apply_sequence(type_)", "def __init__(self, field: FT):\n self.field: Final[FT] = field", "def Reference(cls):\n return type(cls.__name__, (Typed, ), {\"type\": cls})", "def define(**names):\n module = initialize(2)\n __deferred_definitions__ = module.__deferred_definitions__\n for name, specifier in names.items():\n __deferred_definitions__[name] = Deferred(name, specifier)", "def defer(self, *args, **kwargs):\n return DeferredRoutineCall(self, *args, **kwargs)", "def __init__(self, data_type=None):\n self.type = data_type", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self, type, name, verbose):\n self._name = name\n self._verbose = verbose\n self.__fdev_id = _cantera.flowdev_new(type)", "def factory(type_or_name: str | type, singleton: bool = False) -> Callable[[T], T]:\n\n def _decorator(original: T) -> T:\n setattr(original, 'factory_provider', (type_or_name, singleton))\n return original\n\n return _decorator", "def __init_subclass__(cls, type_: CalibrationTargetType):\n cls._types[type_] = cls", "def __init__(\n self,\n type_: Type[T],\n *,\n type_is_generic_self: bool = False,\n coerce: bool = False,\n compcoef: Optional[float] = None,\n inheritable: bool = True,\n simpledelta: bool = True,\n merge_fn: MergeFunction = default_field_merge,\n ephemeral: bool = False,\n weak_ref: bool = False,\n allow_ddl_set: bool = False,\n describe_visibility: DescribeVisibilityPolicy = (\n DescribeVisibilityPolicy.SHOW_IF_EXPLICIT),\n ddl_identity: bool = False,\n aux_cmd_data: bool = False,\n special_ddl_syntax: bool = False,\n reflection_method: ReflectionMethod = ReflectionMethod.REGULAR,\n reflection_proxy: Optional[Tuple[str, str]] = None,\n name: Optional[str] = None,\n reflection_name: Optional[str] = None,\n patch_level: int = -1,\n **kwargs: Any,\n ) -> None:\n if not isinstance(type_, type):\n raise ValueError(f'{type_!r} is not a type')\n\n self.type = type_\n self.type_is_generic_self = type_is_generic_self\n self.coerce = coerce\n self.allow_ddl_set = allow_ddl_set\n self.ddl_identity = ddl_identity\n self.aux_cmd_data = aux_cmd_data\n self.special_ddl_syntax = special_ddl_syntax\n self.describe_visibility = describe_visibility\n\n self.compcoef = compcoef\n self.inheritable = inheritable\n self.simpledelta = simpledelta\n self.weak_ref = weak_ref\n self.reflection_method = reflection_method\n self.reflection_proxy = reflection_proxy\n self.is_reducible = issubclass(type_, s_abc.Reducible)\n self.patch_level = patch_level\n\n if name is not None:\n self.name = name\n if reflection_name is not None:\n self.sname = reflection_name\n\n if (\n merge_fn is default_field_merge\n and callable(\n type_merge_fn := getattr(self.type, 'merge_values', None)\n )\n ):\n self.merge_fn = type_merge_fn\n else:\n self.merge_fn = merge_fn\n\n self.ephemeral = ephemeral", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def wrapped_unit(cls) -> MyType:\n MyType.clear_interning_cache()\n return MyType.decorate(MyUnit)", "def __init__(self, line, context):\n match = Ftype_type_decl.type_match(line)\n if match is None:\n raise ParseSyntaxError(\"type declaration\", token=line, context=context)\n else:\n self._match_len = len(match.group(0))\n self._class = match.group(1)\n self._typestr = match.group(2)\n self._kind = self.typestr\n # End if", "def type(cls):", "def fountToDeferred(fount):\n d = Deferred(fount.stopFlow)\n fount.flowTo(_DeferredAggregatingDrain(d))\n return d", "def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n try:\n self.fast_validate = CoercableTypes[aType]\n except:\n self.fast_validate = (11, aType)", "def __init__(self, func, type):\n self.func = func\n self.type = type", "def type(self) -> global___Type:", "def __init__(self, runtime, field, value=None):\n assert field is not None, \"Cannot construct share without a field.\"\n assert callable(field), \"The field is not callable, wrong argument?\"\n\n Deferred.__init__(self)\n self.runtime = runtime\n self.field = field\n if value is not None:\n self.callback(value)", "def __init__(self, inst, class_type):\n\t\tself.type = str(class_type)[7:]\n\t\tself.type = self.type[:-1]\n\t\tself.inst = inst\n\t\treturn", "def _resolve_type(self) -> None:\n if self._type is None and self.type_name is not None:\n self._type = self._type_context.get_type_by_name(self.type_name)", "def __init__(self,\n type: str) -> None:\n # pylint: disable=super-init-not-called\n self.type = type", "def __init__(self,\n type: str) -> None:\n # pylint: disable=super-init-not-called\n self.type = type", "def __init__(self,given_type):\n self.given_type=given_type", "def _ConstructType(self, type_name, type_contents, filepath, require_guid):\n\n description = ''\n parents = None\n local_field_names = None\n opt_local_field_names = None\n is_abstract = False\n allow_undefined_fields = False\n is_canonical = False\n guid = None\n\n expected_keys = set([\n 'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'guid',\n 'is_canonical', 'allow_undefined_fields'\n ])\n\n if 'description' in type_contents:\n description = type_contents['description']\n if 'implements' in type_contents:\n parents = type_contents['implements']\n if 'uses' in type_contents:\n local_field_names = type_contents['uses']\n if 'opt_uses' in type_contents:\n opt_local_field_names = type_contents['opt_uses']\n if 'is_abstract' in type_contents:\n is_abstract = type_contents['is_abstract']\n if 'allow_undefined_fields' in type_contents:\n allow_undefined_fields = type_contents['allow_undefined_fields']\n if 'is_canonical' in type_contents:\n is_canonical = type_contents['is_canonical']\n if 'guid' in type_contents:\n guid = type_contents['guid']\n\n # Generate tuples to represent each field\n fq_lfn = []\n if local_field_names:\n self._ConstructField(local_field_names, False, fq_lfn)\n if opt_local_field_names:\n self._ConstructField(opt_local_field_names, True, fq_lfn)\n\n entity_type = EntityType(\n filepath=filepath,\n typename=type_name,\n description=description,\n parents=parents,\n local_field_tuples=fq_lfn,\n is_abstract=is_abstract,\n allow_undefined_fields=allow_undefined_fields,\n inherited_fields_expanded=False,\n is_canonical=is_canonical,\n guid=guid,\n require_guid=require_guid,\n namespace=self.local_namespace)\n\n # Add errors to type if there's anything extra in the block. We add to the\n # entity type because an extra key here is likely a typo in a real key name\n # that would result in information being lost from the type.\n for key in type_contents:\n if key not in expected_keys:\n entity_type.AddFinding(\n findings_lib.UnrecognizedKeyError(key, entity_type.file_context))\n\n return entity_type", "def deferred(self, key: KT) -> \"defer.Deferred[VT]\":\n ...", "def register(cls, D: DONLOADER_CLASS) -> DONLOADER_CLASS:\r\n ...", "def describer_type(dtype: str):\n\n def wrapped_describer(klass):\n klass.describer_type = dtype\n return klass\n\n return wrapped_describer", "def _constructor(self):\n return self.__class__", "def type(name):", "def __init__(self,\n name: str,\n type: Type,\n expire_after_call: timedelta = None,\n expire_after_write: timedelta = None):\n if not name:\n raise ValueError(\"name can not be missing.\")\n if not name.isidentifier():\n raise ValueError(\n f\"invalid name {name}. A spec name can only contains alphanumeric letters (a-z) and (0-9), \"\n f\"or underscores ( \"\n f\"_). A valid identifier cannot start with a number, or contain any spaces.\")\n if iskeyword(name):\n forbidden = '\\n'.join(kwlist)\n raise ValueError(\n f\"invalid spec name {name} (Python SDK specifically). since {name} will result as an attribute on \"\n f\"context.store.\\n\"\n f\"The following names are forbidden:\\n {forbidden}\")\n if not name.islower():\n raise ValueError(f\"Only lower case names are allowed, {name} is given.\")\n self.name = name\n if not type:\n raise ValueError(\"type can not be missing.\")\n if not isinstance(type, Type):\n raise TypeError(\"type is not a StateFun type.\")\n self.type = type\n if expire_after_call and expire_after_write:\n # both can not be set.\n raise ValueError(\"Either expire_after_call or expire_after_write can be set, but not both.\")\n if expire_after_call:\n self.duration = int(expire_after_call.total_seconds() * 1000.0)\n self.after_call = True\n self.after_write = False\n elif expire_after_write:\n self.duration = int(expire_after_write.total_seconds() * 1000.0)\n self.after_call = False\n self.after_write = True\n else:\n self.duration = 0\n self.after_call = False\n self.after_write = False", "def mk_typ(self, name, kind):\n # (str, ty.Kind) -> ty.TypeVar\n\n typ = ty.TypeVar(name, kind)\n self.type_param_scopes[0].appendleft((name, typ))\n return typ", "def test_adaptToIType(self):\n typeInstance = igwt.IType(Change())", "def __init__(self):\n self.func = {\n \"str\": DataType.str,\n \"int\": DataType.int,\n \"float\": DataType.float,\n \"duration\": DataType.duration,\n \"datetime\": DataType.datetime,\n }", "def __init__(self, type_name, args):\n super().__init__()\n self.type_name = type_name\n self.args = args\n self._projection = None", "def __init__(self, deferred):\n self._values = []\n self._deferred = deferred", "def initialize(cls):", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def unclass(self, t):\n if isinstance(t, pytd.ClassType):\n # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the\n # latter is the complete name.\n return pytd.NamedType(t.cls.name)\n else:\n return t", "def __init__(self, descriptor = None, type = None, config = None):\n self.descriptor = descriptor\n self.type = type\n self.config = config", "def __init__(self, distribution_type: Type[Distribution]) -> None:\n super().__init__()\n self.distribution_type = distribution_type", "def defer(observable_factory: Callable[[Any], ObservableBase]) -> ObservableBase:\n from ..operators.observable.defer import defer\n return defer(observable_factory)", "def type_obj(self) -> Union[type, GenericAlias]:\n pass", "def register_type(type, factory):\n\n _types[type] = factory", "def simple_type(typename=None, serialize_fn=None, deserialize_fn=None) -> Type:\n return SimpleType(typename, serialize_fn, deserialize_fn)", "def fromJSON(cls, data):\n return SERIALISE_CLASS_LOOKUP[data['timeref_type']].fromJSON(data)", "def __new__(cls, name, bases, dct):\n _cls = super().__new__(cls, name, bases, dct)\n PeaType._dct.update({name: {'cls': cls,\n 'name': name,\n 'bases': bases,\n 'dct': dct}})\n return _cls", "def __new__(cls, name, bases, dct):\n _cls = super().__new__(cls, name, bases, dct)\n PeaType._dct.update({name: {'cls': cls,\n 'name': name,\n 'bases': bases,\n 'dct': dct}})\n return _cls", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result", "def from_type(cls: type[_ST1], type_obj: type) -> _ST1:\n try:\n dct = vars(type_obj)\n except TypeError:\n raise TypeError(f\"Expected a type object, got {type(type_obj).__name__!r}\") from None\n return cls._reconstruct({k: np.dtype(v) for k, v in dct.items() if not k.startswith(\"_\")})", "def create(_type, *args, **kwargs):\n # noinspection PyUnresolvedReferences\n return IExplorer.registry[_type.lower()](*args, **kwargs)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def __init__(self, name):\n self.type_cls = None\n\n self.name = name\n self.description = None\n self.updated = None\n self.notes = None\n self.properties = {}", "def injectTypes (g):\n\tself=__module__\n\ts=g.symbols\n\tg.token('TYPE_VAR', '_|[A-Z][A-Z0-9]*')\n\tg.rule('TypeParameter', s.LSB, listOf(g.agroup(s.TYPE_VAR, s.FQNAME), s.COMMA, g), s.RSB)\n\tg.rule('TypeReference', s.FQNAME._as('name'), s.TypeParameter.optional()._as('parameters'))\n\tg.group('TypeValue')\n\tg.rule('TypeExpression')\n\tg.rule('TypeUnionSuffix', s.PIPE, s.TypeValue)\n\tg.group('TypePrefix', s.TypeReference)\n\tg.group('TypeSuffix', s.TypeUnionSuffix)\n\tg.rule('TypeExpression', s.TypePrefix, s.TypeSuffix.zeroOrMore())\n\tg.rule('TypeParens', s.LP, listOf(s.TypeExpression, s.COMMA, g), s.RP)\n\ts.TypeValue.set(s.TypeParens, s.TypeExpression)\n\tg.rule('TypeSlot', s.CheckIndent, g.aword('@slot'), s.NAME._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'))\n\tg.group('TypeLine', s.TypeSlot)\n\tg.group('TypeCode', s.COMMENT, s.TypeLine)\n\tg.rule('TypeBody', s.Indent, s.TypeCode.zeroOrMore(), s.Dedent)\n\tg.rule('Type', s.CheckIndent, g.aword('@type'), s.TypeReference._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'), s.TypeBody.optional())", "def builtin_defer(func, *args, **kwargs):\n deferred_handlers.append(functools.partial(func, *args, **kwargs))", "def __init__(self): \n self.types = {}", "def type_():\n pass", "def type(\n cls: Type = None,\n *,\n name: str = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: str = None,\n federation: Optional[FederationTypeParams] = None,\n):\n\n def wrap(cls):\n wrapped = _wrap_dataclass(cls)\n\n return _process_type(\n wrapped,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n federation=federation,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)", "def get_factory():", "def __init__(self, name, type, project):\n DSSRecipeCreator.__init__(self, type, name, project)\n self.script = None", "def __type_hinting__(self, user: User, dbsession: Session, session: ISession, admin: Admin, registry: Registry, on_demand_resource_renderer: OnDemandResourceRenderer, transaction_manager: TransactionManager):\n self.user = user\n self.dbsession = dbsession\n self.session = session\n self.admin = admin\n self.registry = registry\n self.on_demand_resource_renderer = on_demand_resource_renderer\n self.transaction_manager = transaction_manager\n self.tm = transaction_manager", "def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form", "def get_class(namespace, data_type, init_pre=lambda **kwargs: None,\n init_post=lambda **kwargs: None):\n spec = __NS_CATALOG.get_spec(namespace, data_type)\n __nwbfields__ = spec2nwbfields(spec)\n\n @docval(*spec2docval(spec))\n def __init__(self, **kwargs):\n init_pre(**kwargs)\n super_args = [x['name'] for x in super(type(self), self).__init__.__docval__['args']]\n super(type(self), self).__init__(**{arg: kwargs[arg] for arg in super_args\n if arg in kwargs and kwargs[arg] is not None})\n for attr, val in kwargs.items():\n try:\n setattr(self, attr, val)\n except AttributeError:\n pass\n init_post(**kwargs)\n\n d = {'__init__': __init__, '__nwbfields__': __nwbfields__}\n\n cls = type(spec['neurodata_type_def'], (eval(spec['neurodata_type_inc']),), d)\n register_class(data_type, namespace, cls)\n return cls", "def instantiate(name, *args, **kwargs):\n ...", "def __init__(self, allow_none=False, excludes=None, request_types=None):\n self.funcs = {}\n self.instance = None\n self.allow_none = allow_none\n if excludes is None:\n self.excludes = []\n else:\n self.excludes = excludes\n if request_types is None:\n self.request_types = ['all']\n else:\n self.request_types = request_types", "def __init__(self, type, value):\r\n self._type = type\r\n self._value = value", "def create(cls, _):\n return cls", "def gen_alternative_type(full_name, d):\n name = full_name.rsplit(\".\", 1)[-1]\n\n nts = d.keys()\n members = dict() # set of possible types for each member\n for nt in nts:\n for a, t in d[nt]:\n if a in members:\n members[a].add(t)\n else:\n members[a] = {t}\n ret = list()\n ret.append(f\"// Generated Alternative {name}=\")\n for nt in nts:\n ret.append(\"// {}=({})\".format(nt, \", \".join([f\"{a}={t}\" for a, t in d[nt]])))\n ret.append(\"\")\n for nt in nts:\n ret.append(f\"class {name}_{nt};\")\n ret.append(\"\")\n ret.append(f\"class {name} {{\")\n ret.append(\"public:\")\n ret.append(\n \" enum class kind {{ {} }};\".format(\n \", \".join([f\"{nt}={i}\" for i, nt in enumerate(nts)])\n )\n )\n ret.append(\"\")\n ret.append(\" static Alternative* getType() {\")\n ret.append(\" PyObject* resolver = getOrSetTypeResolver();\")\n ret.append(\" if (!resolver)\")\n ret.append(f' throw std::runtime_error(\"{name}: no resolver\");')\n ret.append(\n \" PyObject* res = PyObject_CallMethod\"\n f'(resolver, \"resolveTypeByName\", \"s\", \"{full_name}\");'\n )\n ret.append(\" if (!res)\")\n ret.append(f' throw std::runtime_error(\"{full_name}: did not resolve\");')\n ret.append(\" return (Alternative*)PyInstance::unwrapTypeArgToTypePtr(res);\")\n ret.append(\" }\")\n\n ret.append(f\" static {name} fromPython(PyObject* p) {{\")\n ret.append(\" Alternative::layout* l = nullptr;\")\n ret.append(\n \" PyInstance::copyConstructFromPythonInstance\"\n \"(getType(), (instance_ptr)&l, p, ConversionLevel::ImplicitContainers);\"\n )\n ret.append(f\" return {name}(l);\")\n ret.append(\" }\")\n ret.append(\"\")\n ret.append(\" PyObject* toPython() {\")\n ret.append(\n \" return PyInstance::extractPythonObject((instance_ptr)&mLayout, getType());\"\n )\n ret.append(\" }\")\n ret.append(\"\")\n\n ret.append(f\" ~{name}() {{ getType()->destroy((instance_ptr)&mLayout); }}\")\n ret.append(\n f\" {name}():mLayout(0) {{ getType()->constructor((instance_ptr)&mLayout); }}\"\n )\n ret.append(\n f\" {name}(kind k):mLayout(0) {{ \"\n \"ConcreteAlternative::Make(getType(), (int64_t)k)\"\n \"->constructor((instance_ptr)&mLayout); }\"\n )\n ret.append(\n f\" {name}(const {name}& in) \"\n \"{ getType()->copy_constructor((instance_ptr)&mLayout, (instance_ptr)&in.mLayout); }\"\n )\n ret.append(\n f\" {name}& operator=(const {name}& other) \"\n \"{ getType()->assign((instance_ptr)&mLayout, (instance_ptr)&other.mLayout);\"\n \" return *this; }\"\n )\n ret.append(\"\")\n for nt in nts:\n ret.append(\n f\" static {name} {nt}(\"\n + \", \".join([f\"const {t}& {a}\" for a, t in d[nt]])\n + \");\"\n )\n ret.append(\"\")\n ret.append(\" kind which() const { return (kind)mLayout->which; }\")\n ret.append(\"\")\n ret.append(\" template <class F>\")\n ret.append(\" auto check(const F& f) {\")\n for nt in nts:\n ret.append(f\" if (is{nt}()) {{ return f(*({name}_{nt}*)this); }}\")\n ret.append(\" }\")\n ret.append(\"\")\n for nt in nts:\n ret.append(f\" bool is{nt}() const {{ return which() == kind::{nt}; }}\")\n ret.append(\"\")\n ret.append(\" // Accessors for members\")\n for m in members:\n m_type = return_type(members[m])\n ret.append(f\" {m_type} {m}() const;\")\n ret.append(\"\")\n ret.append(\" Alternative::layout* getLayout() const { return mLayout; }\")\n ret.append(\"protected:\")\n ret.append(f\" explicit {name}(Alternative::layout* l): mLayout(l) {{}}\")\n ret.append(\" Alternative::layout *mLayout;\")\n ret.append(\"};\")\n ret.append(\"\")\n ret.append(\"template <>\")\n ret.append(f\"class TypeDetails<{name}> {{\")\n ret.append(\"public:\")\n ret.append(\" static Type* getType() {\")\n ret.append(f\" static Type* t = {name}::getType();\")\n ret.append(\" if (t->bytecount() != bytecount) {\")\n ret.append(\n f' throw std::runtime_error(\"{name} somehow we have the wrong bytecount!\");'\n )\n ret.append(\" }\")\n ret.append(\" return t;\")\n ret.append(\" }\")\n ret.append(\" static const uint64_t bytecount = sizeof(void*);\")\n ret.append(\"};\")\n ret.append(\"\")\n for nt in nts:\n ret.append(f\"class {name}_{nt} : public {name} {{\")\n ret.append(\"public:\")\n ret.append(\" static ConcreteAlternative* getType() {\")\n\n ret.append(\n \" static ConcreteAlternative* t = ConcreteAlternative::Make\"\n f\"({name}::getType(), static_cast<int>(kind::{nt}));\"\n )\n ret.append(\" return t;\")\n ret.append(\" }\")\n ret.append(f\" static Alternative* getAlternative() {{ return {name}::getType(); }}\")\n # ret.append(f' static NamedTuple* elementType() {{ return {nt}_Type; }}')\n ret.append(\"\")\n ret.append(f\" {name}_{nt}():{name}(kind::{nt}) {{}}\")\n if len(d[nt]) > 0:\n ret.append(\n f\" {name}_{nt}(\"\n + \", \".join([f\" const {t}& {a}1\" for a, t in d[nt]])\n + f\"):{name}(kind::{nt}) {{\"\n )\n for a, _ in d[nt]:\n ret.append(f\" {a}() = {a}1;\")\n ret.append(\" }\")\n ret.append(f\" {name}_{nt}(const {name}_{nt}& other):{name}(kind::{nt}) {{\")\n ret.append(\n \" getType()->copy_constructor((instance_ptr)&mLayout, \"\n \"(instance_ptr)&other.mLayout);\"\n )\n ret.append(\" }\")\n ret.append(f\" {name}_{nt}& operator=(const {name}_{nt}& other) {{\")\n ret.append(\n \" getType()->assign((instance_ptr)&mLayout, (instance_ptr)&other.mLayout);\"\n )\n ret.append(\" return *this;\")\n ret.append(\" }\")\n ret.append(f\" ~{name}_{nt}() {{}}\")\n ret.append(\"\")\n for i, (a, t) in enumerate(d[nt]):\n offset = (\n \"\"\n if i == 0\n else \" + \" + \" + \".join([\"size\" + str(j) for j in range(1, i + 1)])\n )\n ret.append(f\" {t}& {a}() const {{ return *({t}*)(mLayout->data{offset}); }}\")\n ret.append(\"private:\")\n for i, (_, t) in list(enumerate(d[nt]))[:-1]:\n ret.append(f\" static const int size{i + 1} = sizeof({t});\")\n ret.append(\"};\")\n ret.append(\"\")\n ret.append(\n f\"{name} {name}::{nt}(\" + \", \".join([f\"const {t}& {a}\" for a, t in d[nt]]) + \") {\"\n )\n ret.append(f\" return {name}_{nt}(\" + \", \".join([a for a, _ in d[nt]]) + \");\")\n ret.append(\"}\")\n ret.append(\"\")\n for m in members:\n m_type = return_type(members[m])\n multiple_types = len(members[m]) > 1\n ret.append(f\"{m_type} {name}::{m}() const {{\")\n for nt in nts:\n if m in [e[0] for e in d[nt]]:\n ret.append(f\" if (is{nt}())\")\n if multiple_types:\n ret.append(f\" return {m_type}((({name}_{nt}*)this)->{m}());\")\n else:\n ret.append(f\" return (({name}_{nt}*)this)->{m}();\")\n ret.append(\n f' throw std::runtime_error(\"\\\\\"{name}\\\\\" subtype does not contain \\\\\"{m}\\\\\"\");'\n )\n ret.append(\"}\")\n ret.append(\"\")\n ret.append(f\"// END Generated Alternative {name}\")\n ret.append(\"\")\n return [e + \"\\n\" for e in ret]", "def __init__(self):\n\n # Dictionary of types seen so far. Builtin types always available.\n # Values : list of constructors which the type defines\n # This is a smartdict, so keys can be retrieved.\n self.knownTypes = smartdict.Smartdict()\n for typecon in ast.builtin_types_map.values():\n self.knownTypes[typecon()] = None\n\n # Dictionary of constructors encountered so far.\n # Value: Type which the constructor produces.\n # This is a smartdict, so keys can be retrieved.\n self.knownConstructors = smartdict.Smartdict()", "def wrap_typing(self):\n if self._typing_key is None:\n key = self._function\n else:\n key = self._typing_key\n\n def inner(typing_class):\n # Note that two templates could be used for the same function, to\n # avoid @infer_global etc the typing template is copied. This is to\n # ensure there's a 1:1 relationship between the typing templates and\n # their keys.\n clazz_dict = dict(typing_class.__dict__)\n clazz_dict['key'] = key\n cloned = type(f\"cloned_template_for_{key}\", typing_class.__bases__,\n clazz_dict)\n self._TYPER = cloned\n _overload_glue.add_no_defer(key)\n self._build()\n return typing_class\n return inner", "def defineFrom(from_name, *names):\n module = initialize(2)\n __deferred_definitions__ = module.__deferred_definitions__\n for name in names:\n specifier = from_name + ':' + name\n __deferred_definitions__[name] = Deferred(name, specifier)", "def __init__(\n self,\n *,\n type: str = \"string\",\n default: str = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def __init__(self, raw_type: type):\n self.raw_type = raw_type\n self.name = raw_type.__name__\n self.qualname = raw_type.__qualname__\n self.module = raw_type.__module__\n self.full_name = TypeInfo.to_full_name(raw_type)\n self.hash = hash(self.full_name)\n self.is_abstract = inspect.isabstract(raw_type)\n # TODO(fk) store more information on attributes\n self.instance_attributes: OrderedSet[str] = OrderedSet()\n self.attributes: OrderedSet[str] = OrderedSet()\n\n # TODO(fk) properly implement generics!\n # For now we just store the number of generic parameters for set, dict and list.\n self.num_hardcoded_generic_parameters: int | None = (\n 2 if raw_type is dict else 1 if raw_type in (set, list) else None\n )", "def make_fee_class():\r\n fee = 1\r\n return make_class(locals())", "def __init__(self, raw_arg: Dict):\n self.name = raw_arg.get(\"name\")\n self.description = raw_arg.get(\"description\")\n self.type = TypeDefer(raw_arg.get(\"type\")) if raw_arg.get(\"type\") is not None else None\n self.default_value = raw_arg.get(\"defaultValue\")", "def type(self, type):\n self._type = type", "def type(self, type):\n self._type = type", "def as_ctype(type):\n return getattr(type, \"as_ctype\", lambda: type)()", "def __init__(self, raw_field: Dict):\n self.name = raw_field.get(\"name\")\n self.description = raw_field.get(\"description\")\n self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get(\"args\", []))\n self.type: TypeDefer = TypeDefer(raw_field.get(\"type\")) if raw_field.get(\"type\") is not None else None\n self.is_deprecated: bool = raw_field.get(\"isDeprecated\")\n self.deprecation_reason: str = raw_field.get(\"deprecationReason\")", "def __init__(self, field: \"Attribute[_T]\") -> None:\n self.field = field", "def setup_class(cls):", "def setup_class(cls):", "def __init__(self, pretransformed_input, transform_fn, dtype=None,\n shape=NONE_SPECIFIED, also_track=None, name=None):\n pretransformed_input = tensor_util.convert_nonref_to_tensor(\n pretransformed_input,\n name='pretransformed_input')\n\n if dtype is None:\n dtype = (getattr(transform_fn, 'dtype', None) or\n dtype_util.base_dtype(pretransformed_input.dtype))\n try:\n dtype = None if dtype is None else tf.as_dtype(dtype)\n except TypeError:\n raise TypeError('Argument `dtype` must be convertible to a '\n '`tf.dtypes.DType`; saw \"{}\" of type \"{}\".'.format(\n repr(dtype), type(dtype)))\n\n if shape == NONE_SPECIFIED:\n shape = getattr(transform_fn, 'forward_event_shape', _identity)\n shape = shape(pretransformed_input.shape)\n try:\n shape = tf.TensorShape(shape)\n except TypeError:\n raise TypeError('Argument `shape` must be convertible to a '\n '`tf.TensorShape`; saw \"{}\".'.format(shape))\n\n name = name or getattr(transform_fn, 'name', None)\n if not name:\n name = '_'.join([\n transform_fn.__name__,\n getattr(pretransformed_input, 'name', '')])\n name = name_util.strip_invalid_chars(name)\n name = name_util.camel_to_lower_snake(name)\n name = name_util.get_name_scope_name(name)\n name = name_util.strip_invalid_chars(name)\n\n if hasattr(transform_fn, 'forward'):\n fwd_name = '\"{}\"'.format(transform_fn.name)\n else:\n fwd_name = transform_fn.__name__\n if not callable(transform_fn):\n raise TypeError('Argument `transform_fn` must be `callable`.')\n\n super(DeferredTensor, self).__init__(name=name)\n self._pretransformed_input = pretransformed_input\n self._transform_fn = transform_fn\n self._dtype = dtype\n self._shape = shape\n self._also_track = also_track\n self._name = name\n self._fwd_name = fwd_name\n\n # Secret handshake with tf.is_tensor to return True for DT.\n #\n # Works around `tf.get_static_value` not returning `None`.\n # With this, `tf.get_static_value` returns `None`, and without\n # this returns the `DeferredTensor` object.\n # TODO(b/140157055): Remove this shim after LinOp is patched in 2.0.\n self.is_tensor_like = True" ]
[ "0.64138234", "0.54614604", "0.54489744", "0.54139173", "0.5383102", "0.5300838", "0.52134734", "0.5191931", "0.5120031", "0.50648534", "0.49831468", "0.49792293", "0.49551898", "0.49469346", "0.49264267", "0.49214688", "0.48970965", "0.48778772", "0.48720497", "0.48700586", "0.4867235", "0.48640287", "0.48631537", "0.48529202", "0.4852579", "0.48455602", "0.48381415", "0.4833245", "0.4824423", "0.48197848", "0.481025", "0.4804476", "0.4794827", "0.478876", "0.47882617", "0.47805718", "0.47805718", "0.47767726", "0.47569844", "0.47024986", "0.46952125", "0.4674762", "0.46693024", "0.4660713", "0.4651452", "0.46447113", "0.4638889", "0.46360058", "0.4630815", "0.4624915", "0.46216708", "0.46197358", "0.46162513", "0.4600167", "0.45985696", "0.45962438", "0.45864034", "0.4572104", "0.45626932", "0.45571047", "0.4550993", "0.4550993", "0.45491236", "0.45491236", "0.4544942", "0.45401904", "0.4536881", "0.45336226", "0.45336226", "0.45303318", "0.45236447", "0.45180586", "0.45137346", "0.45136592", "0.4508315", "0.4505633", "0.4503279", "0.4493832", "0.44858685", "0.44788224", "0.4476418", "0.44739208", "0.44728178", "0.44696704", "0.4468362", "0.44626138", "0.44586924", "0.4456375", "0.44560233", "0.4454462", "0.44494507", "0.44475624", "0.4446069", "0.4446069", "0.44448838", "0.4444664", "0.44386277", "0.4437564", "0.4437564", "0.44302917" ]
0.7597686
0
Instantiate a new SchemaTypeField
def __init__(self, raw_field: Dict): self.name = raw_field.get("name") self.description = raw_field.get("description") self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get("args", [])) self.type: TypeDefer = TypeDefer(raw_field.get("type")) if raw_field.get("type") is not None else None self.is_deprecated: bool = raw_field.get("isDeprecated") self.deprecation_reason: str = raw_field.get("deprecationReason")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def __init__(self, field: \"SchemaTypeField\", settings: Settings):\n from qlient import helpers\n self.settings = settings\n self.name = field.name\n self.description = field.description\n self.arguments = helpers.adapt_arguments(field.args)\n self.return_type = field.type\n self._return_fields: Union[Tuple[SelectedField], None] = None", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def __new__(cls, **kwargs):\n # Call up to allocate the new instance:\n try:\n instance = super(Schema, cls).__new__(cls, **kwargs)\n except TypeError:\n instance = super(Schema, cls).__new__(cls)\n \n # Create the “__fields__” attribute and retrieve the class-based\n # field indexes, “__field_names__” and “__field_index__”:\n instance.__fields__ = Flat()\n field_names, field_index = pyattrs(cls, 'field_names',\n 'field_index')\n \n # Set each of the field-default values through a call to\n # the underlying descriptor instances’ “get_default()” method:\n for field, nsfield in zip(field_names, field_index):\n instance.__fields__[nsfield] = stattr(instance, field).get_default()\n \n # Override defaults with any instance-specific values,\n # as specfied through keywords:\n for key, value in kwargs.items():\n if key in field_names:\n setattr(instance, key, value)\n \n for namespace in instance.__fields__.namespaces():\n if namespace in field_names:\n setattr(instance, namespace, field_names[namespace])\n \n # Return the new instance:\n return instance", "def create_schema(self, schema: str):\n return", "def makeField(self,field_name,field_type,field_precision,field_scale,field_length):\n \n new_field = self.GP.CreateObject(\"field\")\n new_field.Name = field_name\n new_field.Type = field_type\n new_field.Precision = field_precision\n new_field.Scale = field_scale\n new_field.Length = field_length\n new_field.IsNullable = True\n \n return new_field", "def __new__(metacls, name, bases, attributes, **kwargs):\n # Use both a namespaced mapping and a standard dict\n # as class-based records of our field attributes:\n field_index = Flat()\n field_names = {}\n \n # Stow both the Python name and the namespaced name\n # for each field attribute defined on the schema,\n # additionally manually calling __set_name__(…) if\n # we’re on a pre-3.6 version of Python:\n for attribute, value in attributes.items():\n if isinstance(value, FieldBase):\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n # This is the same as the above, but for the base\n # ancestor class – this enables field inheritance:\n for base in bases:\n parent = base.__mro__[0]\n for attribute, value in vars(parent).items():\n if isinstance(value, FieldBase) and attribute not in attributes:\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n for namespace in field_index.namespaces():\n nsfield = Namespace(field_index, namespace=namespace)\n if NEED_NAME:\n nsfield.__set_name__(None, namespace)\n attributes[namespace] = nsfield\n field_names[namespace] = nsfield\n \n # Add both the field-index and the field-names mappings\n # to the class dictionary for the new type:\n attributes['__field_index__'] = field_index\n attributes['__field_names__'] = field_names\n \n # Create and return the schema type:\n return super(MetaSchema, metacls).__new__(metacls, name,\n bases,\n attributes,\n **kwargs)", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def __init__(\n self,\n data_type,\n name,\n index,\n has_default,\n default=_NO_DEFAULT,\n order=None,\n doc=None,\n other_props=None\n ):\n if (not isinstance(name, _str)) or (not name):\n raise SchemaParseException('Invalid record field name: %r.' % name)\n if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):\n raise SchemaParseException('Invalid record field order: %r.' % order)\n\n # All properties of this record field:\n self._props = {}\n\n self._has_default = has_default\n if other_props:\n self._props.update(other_props)\n\n self._index = index\n self._type = self._props['type'] = data_type\n self._name = self._props['name'] = name\n\n if has_default:\n self._props['default'] = default\n\n if order is not None:\n self._props['order'] = order\n\n if doc is not None:\n self._props['doc'] = doc", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def schemaized_field(field):\n return zope.schema.interfaces.IField(field)", "def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema", "def __init__(self, schema_type, parameter_type, parameter_name):\n self.schema_type = schema_type\n self.parameter_type = parameter_type\n self.parameter_name = parameter_name", "def __init__(self, schema ):\n self.schema = schema", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema", "def create_field(self, label, value_type, key=None):\n payload = self._build_params(label=label, value_type=value_type, key=key)\n return Field.deserialize(self._post('fields', None, payload))", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def get_field_type(\n self, field_type: Union[Type, str], collection_name: str\n ) -> SchemaFieldDataType:\n TypeClass: Optional[Type] = _field_type_mapping.get(field_type)\n\n if TypeClass is None:\n self.report.report_warning(\n collection_name, f\"unable to map type {field_type} to metadata schema\"\n )\n TypeClass = NullTypeClass\n\n return SchemaFieldDataType(type=TypeClass())", "def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result", "def __init__(self, schema=None):\n self.schema = schema or {}", "def generate_values_type(self) -> typing.Any:\n values_type = self.type.__args__[1]\n\n name = self.get_singular_name(self.name)\n self.internal_field = AvroField(name, values_type)\n self.values_type = self.internal_field.get_avro_type()", "def __init__(self, name, exclusive=False, default=None):\n self.name = name\n self.type = etau.get_class_name(self)[: -len(\"Schema\")]\n self.exclusive = exclusive\n self.default = default\n self._attr_cls = etau.get_class(self.type)", "def get_field_type_from_schema(schema_type, field_name):\n if field_name == '@class':\n return GraphQLString\n else:\n if field_name not in schema_type.fields:\n raise AssertionError(u'Field {} passed validation but was not present on type '\n u'{}'.format(field_name, schema_type))\n\n # Validation guarantees that the field must exist in the schema.\n return schema_type.fields[field_name].type", "def convert_type(self, value, schema_type, **kwargs):", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def __init__(\n self,\n graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],\n dsl_schema: DSLSchema,\n ):\n self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type\n self._dsl_schema = dsl_schema\n log.debug(f\"Creating {self!r})\")", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def __init__(self, field: FT):\n self.field: Final[FT] = field", "def __init__(self, *args, **kwargs):\n if not args:\n raise TypeError('Field definition incorrect, please provide type')\n elif not isinstance(args[0], type):\n raise TypeError('Field input not a type')\n self.data_type = args[0]\n if ((self.data_type not in self.allowed_types and\n not issubclass(self.data_type, self.allowed_types))):\n raise TypeError('Field input type %s is not allowed' % self.data_type)\n self.check_kwargs(kwargs, self.data_type)\n # attributes\n if 'auto_update' in kwargs and kwargs['auto_update']:\n self.auto_update = self.data_type.utcnow # datetime.datetime\n if 'document_class' in kwargs and kwargs['document_class']:\n self.document_class = kwargs['document_class']\n self.validator = self.generate_validator(self.data_type, **kwargs)\n self.required = kwargs['required'] if 'required' in kwargs else True\n if 'default' in kwargs:\n self.default_value = kwargs['default']\n if not callable(self.default_value):\n validation_failed = False\n try:\n self.validator(self.default_value)\n except ValidationError as e:\n new_err = ('default value \"%s\"' % kwargs['default']) + ''.join(e.args)\n validation_failed = True\n if validation_failed:\n raise TypeError(new_err)\n # check if dict/list type and wrap copy in callable\n if isinstance(self.default_value, (dict, list)):\n def default_value_wrapper():\n return copy.deepcopy(kwargs['default'])\n self.default_value = default_value_wrapper", "def from_schema(cls, schema, *args, **kwargs):\r\n\r\n return cls(schema.get(u\"id\", u\"\"), schema, *args, **kwargs)", "def __init__(self, instance=None):\n self.instance = instance\n self.schema = None\n if self.instance:\n self.schema = surveys.SurveySchema(self.instance.survey)", "def __init__(self, field: \"Attribute[_T]\") -> None:\n self.field = field", "def _gen_basic_field(name_of_field, name_of_type, the_type):\n def validate(self, x):\n return None if x is None else the_type(x)\n\n doc = \"A field which can be {name_of_type} or None\".format(name_of_type=name_of_type)\n\n return Field(name_of_field, (), {'validate': validate, '__doc__': doc})", "def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema", "def column_to_bq_schema(self):\n kwargs = {}\n if len(self.fields) > 0:\n fields = [field.column_to_bq_schema() for field in self.fields]\n kwargs = {\"fields\": fields}\n\n return google.cloud.bigquery.SchemaField(self.name, self.dtype,\n self.mode, **kwargs)", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def _schema_type(self) -> Optional[type]:\n pass", "def getTypicalInstance(self, cls):\n instance = cls()\n for field in cls.schema.fields:\n setattr(instance, field.name,\n self.getTypicalValue(cls, field.name))\n return instance", "def __init__(self, data_type=None):\n self.type = data_type", "def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )", "def complex_type_factory(name, definition, schema):\n d = dict()\n basecls = None\n basedef = definition.basedef\n if basedef and basedef != ITSELF:\n basecls = complex_type_factory(basedef.name, basedef, schema)\n if definition.content_type.is_element_only():\n model = definition.content_type.partical.term\n complex_model(model, d, schema)\n complex_attributes(definition.attributes, d, schema)\n cls = type(name, (basecls or ComplexImp,), d)\n cls.definition = definition\n return cls", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def _new_field(self):\n field = self.domain.new_field()\n return field", "def local_type(verifield, type_name):\n from polyglot.pyapi.meta import retrieve_schema_table_fields\n from polyglot.pyapi.instance import create_instance_validators\n from polyglot.models.schema import Instance\n (tenant_id, schema_id, table_id) = type_name.split(\"::\")\n fields = retrieve_schema_table_fields(tenant_id, schema_id, table_id)\n validators = Instance._validations\n validators['instance_data'] = create_instance_validators(fields)\n instance = Instance(**instance)\n instance.validate(validators)\n instance._validations = validators\n return not((hasattr(instance, 'validation_errors') \n and instance.validation_errors) \\\n or instance.instance_data.get('validation_errors', {}))", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def set_field_by_schema(self, header, field):\n if header not in self.schema.keys():\n if settings._DISABLE_SCHEMA_MATCH:\n return\n else:\n raise InvalidRecordProperty('Record schema does not have the property \"%s\"' % header)\n\n data_type = self.schema[header]['type'].lower()\n\n if data_type == 'string':\n if Record.is_empty_str(field):\n self.fields[header] = None\n else:\n self.fields[header] = field\n return\n\n if data_type == 'integer':\n if Record.could_be_int(field):\n self.fields[header] = int(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'datetime':\n datetime_format = self.schema[header]['datetime_format'];\n if datetime_format == None:\n datetime_format = settings._STRFTIME_FORMAT\n if Record.could_be_datetime(field, datetime_format):\n self.fields[header] = datetime.strptime(field, datetime_format)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'number':\n if Record.could_be_number(field):\n self.fields[header] = float(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'float':\n if Record.could_be_float(field):\n self.fields[header] = float(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'boolean':\n self.fields[header] = Record.parse_boolean(field)\n return", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def __init__(self, column_type, name):\n self.column_type = column_type\n self.name = name", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def _CreateMapFieldSchema(\n self,\n field_descriptor: FieldDescriptor,\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if field_descriptor is None: # Check required by mypy.\n raise AssertionError(\"`field_descriptor` is None.\")\n\n type_name: str = _GetTypeName(field_descriptor)\n visiting.add(type_name)\n\n key_value_d = _GetMapFieldKeyValueTypes(field_descriptor)\n if key_value_d is None:\n raise AssertionError(\"`field_descriptor` doesn't have a map type.\")\n\n key_type_name = _GetTypeName(key_value_d.key)\n value_type_name = _GetTypeName(key_value_d.value)\n\n # pylint: disable=line-too-long\n # `protobuf.map` key types can be only a subset of the primitive types [1],\n # so there is definitely no composite key type to further visit, but the\n # value type \"can be any type except another map\" [1] or an array [2].\n #\n # [1]: https://developers.google.com/protocol-buffers/docs/proto#maps\n # [2]: https://developers.google.com/protocol-buffers/docs/reference/proto2-spec#map_field\n # pylint: enable=line-too-long\n self._CreateSchema(key_value_d.value, visiting)\n\n visiting.remove(type_name)\n\n self.schema_objs[type_name] = cast(\n Dict[str, Union[str, SchemaReference]], {\n \"description\":\n f\"This is a map with real key type=\\\"{key_type_name}\\\" \"\n f\"and value type=\\\"{value_type_name}\\\"\",\n \"type\": \"object\",\n \"additionalProperties\": _GetReferenceObject(value_type_name),\n })", "def get_field_def(schema, parent_type, field_ast):\n name = field_ast.name.value\n if name == SchemaMetaFieldDef.name and schema.get_query_type() == parent_type:\n return SchemaMetaFieldDef\n\n elif name == TypeMetaFieldDef.name and schema.get_query_type() == parent_type:\n return TypeMetaFieldDef\n\n elif name == TypeNameMetaFieldDef.name and \\\n isinstance(parent_type, (\n GraphQLObjectType,\n GraphQLInterfaceType,\n GraphQLUnionType,\n )):\n return TypeNameMetaFieldDef\n\n elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):\n return parent_type.get_fields().get(name)", "def add(self, name, fieldtype):\r\n \r\n if name.startswith(\"_\"):\r\n raise FieldConfigurationError(\"Field names cannot start with an underscore\")\r\n elif name in self._by_name:\r\n raise FieldConfigurationError(\"Schema already has a field named %s\" % name)\r\n \r\n if callable(fieldtype):\r\n fieldtype = fieldtype()\r\n if not isinstance(fieldtype, FieldType):\r\n raise FieldConfigurationError(\"%r is not a FieldType object\" % fieldtype)\r\n \r\n fnum = len(self._by_number)\r\n self._numbers[name] = fnum\r\n self._by_number.append(fieldtype)\r\n self._names.append(name)\r\n self._by_name[name] = fieldtype", "def from_schema(cls, tag, schema):\n cls.tag = tag\n cls.schema = schema\n cls._parser = generate_parser(tag, schema)\n return cls", "def __init__(self, defined_type, many=False, optional=False,\n validate=True):\n\n self._validate_type(defined_type)\n\n self._type = defined_type\n self._many = many\n self._optional = optional\n self._validate = validate", "def createField(selected_layer, newFieldName, newFieldType):\r\n field = ogr.FieldDefn(newFieldName, newFieldType)\r\n selected_layer.CreateField(field)", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def _schema_type(self) -> Optional[type]:\n return None", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def _get_schema(self):\n self._pick()\n return Schema()", "def __init__(self, type_):\n\n self.type = type_", "def __init__(self, data_type, other_props=None):\n if data_type not in PRIMITIVE_TYPES:\n raise AvroException('%r is not a valid primitive type.' % data_type)\n super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)", "def create_field(self, field, dim_translation=None):\n raise NotImplementedError", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def getSchema(cls):\n pass", "def register_field(mongo_field_cls, marshmallow_field_cls, available_params=()):\n\n class Builder(MetaFieldBuilder):\n AVAILABLE_PARAMS = available_params\n MARSHMALLOW_FIELD_CLS = marshmallow_field_cls\n\n register_field_builder(mongo_field_cls, Builder)", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def set_schema_class(self, schema):\n self.schema_class = schema", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def SchedulerDataclassField(default={'type': 'fifo'}, description='Hyperopt scheduler settings.'):\n\n\n class SchedulerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid scheduler from\n `ludwig.schema.hyperopt.scheduler_registry` and creates a corresponding `oneOf` JSON schema for external\n usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in scheduler_config_registry:\n scheduler_config_cls = scheduler_config_registry[value['type'].lower()]\n try:\n return scheduler_config_cls.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for scheduler: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for scheduler: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(scheduler_config_registry.keys()), 'default': default['type'], 'description': 'The type of scheduler to use during hyperopt'}}, 'title': 'scheduler_options', 'allOf': get_scheduler_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in scheduler_config_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = scheduler_config_registry[default['type'].lower()]\n load_default = opt.Schema().load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': SchedulerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported scheduler type: {default['type']}. See scheduler_config_registry. Details: {e}\")", "def EncoderDataclassField(feature_type: str, default: str):\n\n\n class EncoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid encoder config from the encoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_encoder_classes(feature_type):\n enc = get_encoder_cls(feature_type, value[TYPE])\n try:\n return enc.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid encoder params: {value}, see `{enc}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for encoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n encoder_classes = list(get_encoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': encoder_classes, 'default': default}}, 'title': 'encoder_options', 'allOf': get_encoder_conds(feature_type)}\n try:\n encoder = get_encoder_cls(feature_type, default)\n load_default = encoder.Schema().load({'type': default})\n dump_default = encoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': EncoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported encoder type: {default}. See encoder_registry. Details: {e}')", "def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")", "def __init__(self, type_: Union[ConstraintTypes, str], value: Any):\n self.type = ConstraintTypes(type_)\n self.value = value\n enforce(self.check_validity(), \"ConstraintType initialization inconsistent.\")", "def get_schema_cls() -> t.Any:\n return None", "def set_schema(self, schema):\r\n self.__schema = schema", "def idx_createFieldIndex(plominoIndex, fieldname, fieldtype='TEXT', **args):\n \n if not hasattr(plominoIndex, 'createFieldIndex'):\n plominoIndex = plominoIndex.getParentDatabase().getIndex()\n \n if not fieldname in plominoIndex.indexes():\n plominoIndex.createFieldIndex(fieldname, fieldtype, **args)", "def __init__(\n self,\n name,\n namespace,\n fields=None,\n make_fields=None,\n names=None,\n record_type=RECORD,\n doc=None,\n other_props=None\n ):\n if record_type == REQUEST:\n # Protocol requests are not named:\n super(RecordSchema, self).__init__(\n data_type=REQUEST,\n other_props=other_props,\n )\n elif record_type in [RECORD, ERROR]:\n # Register this record name in the tracker:\n super(RecordSchema, self).__init__(\n data_type=record_type,\n name=name,\n namespace=namespace,\n names=names,\n other_props=other_props,\n )\n else:\n raise SchemaParseException(\n 'Invalid record type: %r.' % record_type)\n\n if record_type in [RECORD, ERROR]:\n avro_name = names.get_name(name=name, namespace=namespace)\n nested_names = names.new_with_default_namespace(namespace=avro_name.namespace)\n elif record_type == REQUEST:\n # Protocol request has no name: no need to change default namespace:\n nested_names = names\n\n if fields is None:\n fields = make_fields(names=nested_names)\n else:\n assert make_fields is None\n self._fields = tuple(fields)\n\n self._field_map = RecordSchema._make_field_map(self._fields)\n\n self._props['fields'] = fields\n if doc is not None:\n self._props['doc'] = doc", "def __init__(self, field: str):\n super().__init__()\n self.field = field", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def create_schema(self, schema):\n base = '/api/storage/v1/schema'\n svc = \"%(base)s/%(prop)s\" % {'base': base, 'prop': schema['property']}\n ret = self.rclient.get(svc)\n if ret.status == restclient.Status.OK:\n LOG.warning('Property %s already exists.', schema['property'])\n return\n ret = self.rclient.post(base, schema)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error Creating '\n 'Property: %(property)s '\n 'Type: %(type)s '\n 'Description: %(description)s '\n 'Return code: %(ret.status)d '\n 'Message: %(ret.data)s.')\n % {'property': schema['property'],\n 'type': schema['type'],\n 'description': schema['description'],\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.ShareBackendException(msg=exception_msg)", "def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema", "def get_avro_translated_schema(self):\n type_conversions = {\n 'STRING': 'string',\n 'NUMERIC': {\n 'type': 'bytes',\n 'logicalType': 'decimal',\n 'precision': 38,\n 'scale': 9,\n }\n }\n\n fields = []\n # TODO([email protected]): add support for nested fields\n for bq_field in self.bq_schema:\n field_type = type_conversions[bq_field.field_type]\n\n field = {\n 'name': bq_field.name,\n 'type': field_type,\n }\n\n fields.append(field)\n\n schema_dict = {\n 'type': 'record',\n 'name': self.schema_name,\n 'fields': fields,\n }\n avro_schema = avro.schema.Parse(json.dumps(schema_dict))\n\n return avro_schema", "def __init__(self, schema=None):\n self._dict = {}\n self.schema = schema", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def SplitDataclassField(default: str):\n\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid split params: {value}, see `{split_class}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(split_config_registry.data.keys()), 'default': default}}, 'title': 'split_options', 'allOf': get_split_conds()}\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({'type': default})\n dump_default = splitter.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': SplitMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported splitter type: {default}. See split_registry. Details: {e}')", "def instance_schema(self):\n raise NotImplementedError", "def test_type_builder_builds_correct_model_for_simple_class():\n schema = [\n SchemaObject(\n name=\"TestClass\",\n properties=[\n SchemaValue(name=\"stringValue\", value_type=\"string\"),\n SchemaValue(name=\"booleanValue\", value_type=\"boolean\"),\n SchemaValue(name=\"anyValue\", value_type=\"any\"),\n SchemaValue(name=\"nullValue\", value_type=\"null\"),\n SchemaValue(name=\"optionalStringValue\", value_types=[\"null\", \"string\"]),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == ClassDefinition(\n name=\"TestClass\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"stringValue\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"boolean_value\",\n key=\"booleanValue\",\n value_type=\"bool\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"any_value\", key=\"anyValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"null_value\", key=\"nullValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"optional_string_value\",\n key=\"optionalStringValue\",\n value_type=\"Optional[str]\",\n known_type=True,\n ),\n ],\n depends_on=set(),\n )", "def __init__(self, model_type_id=None, external_id=None, field_values=None, title=None, duration=None, start=None, recommended=None, static_start=None, details=None, contacts=None, tags=None): # noqa: E501 # noqa: E501\n\n self._model_type_id = None\n self._external_id = None\n self._field_values = None\n self._title = None\n self._duration = None\n self._start = None\n self._recommended = None\n self._static_start = None\n self._details = None\n self._contacts = None\n self._tags = None\n self.discriminator = None\n\n self.model_type_id = model_type_id\n self.external_id = external_id\n if field_values is not None:\n self.field_values = field_values\n if title is not None:\n self.title = title\n if duration is not None:\n self.duration = duration\n if start is not None:\n self.start = start\n if recommended is not None:\n self.recommended = recommended\n if static_start is not None:\n self.static_start = static_start\n if details is not None:\n self.details = details\n if contacts is not None:\n self.contacts = contacts\n if tags is not None:\n self.tags = tags", "def gen_config_field(name_of_field, name_of_type, the_type):\n return _gen_basic_field(name_of_field, name_of_type, the_type)" ]
[ "0.7356506", "0.718628", "0.6659049", "0.6587988", "0.6470674", "0.6427356", "0.63884383", "0.6371193", "0.6355834", "0.6309193", "0.62948745", "0.6287319", "0.62753356", "0.62753356", "0.6181912", "0.6081761", "0.6023993", "0.6019796", "0.601481", "0.6001073", "0.59939086", "0.59936416", "0.59754896", "0.5951752", "0.5909134", "0.5888026", "0.5884573", "0.5852523", "0.585046", "0.5848627", "0.5840221", "0.5828837", "0.5826675", "0.58065057", "0.5791529", "0.57822067", "0.5772269", "0.5757514", "0.5751098", "0.57499534", "0.57448655", "0.5740622", "0.57374567", "0.5710822", "0.57031065", "0.56993544", "0.56832653", "0.5676252", "0.56693095", "0.5667615", "0.56433904", "0.56153023", "0.56120366", "0.559931", "0.5597253", "0.55825347", "0.5580054", "0.55690306", "0.5553702", "0.5552897", "0.55474", "0.55454195", "0.55444247", "0.5517675", "0.54828", "0.54809827", "0.5477694", "0.54773694", "0.5474083", "0.5471547", "0.54691017", "0.54677445", "0.54674315", "0.54673237", "0.54504967", "0.54489225", "0.5434377", "0.54109275", "0.54086936", "0.5407956", "0.53982353", "0.53977567", "0.5381482", "0.53756833", "0.5371922", "0.53707784", "0.53685963", "0.5367242", "0.5366724", "0.53606606", "0.53587407", "0.5356837", "0.53518057", "0.5332589", "0.5329452", "0.53280723", "0.5317737", "0.5313306", "0.5309249", "0.53057426" ]
0.5908709
25
Instantiate a new SchemaTypeInputField
def __init__(self, raw_input: Dict): self.name = raw_input.get("name") self.description = raw_input.get("description") self.type: TypeDefer = TypeDefer(raw_input.get("type")) if raw_input.get("type") is not None else None self.default_value = raw_input.get("defaultValue")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def __init__(self, field: \"SchemaTypeField\", settings: Settings):\n from qlient import helpers\n self.settings = settings\n self.name = field.name\n self.description = field.description\n self.arguments = helpers.adapt_arguments(field.args)\n self.return_type = field.type\n self._return_fields: Union[Tuple[SelectedField], None] = None", "def __init__(self, *args, **kwargs):\n if not args:\n raise TypeError('Field definition incorrect, please provide type')\n elif not isinstance(args[0], type):\n raise TypeError('Field input not a type')\n self.data_type = args[0]\n if ((self.data_type not in self.allowed_types and\n not issubclass(self.data_type, self.allowed_types))):\n raise TypeError('Field input type %s is not allowed' % self.data_type)\n self.check_kwargs(kwargs, self.data_type)\n # attributes\n if 'auto_update' in kwargs and kwargs['auto_update']:\n self.auto_update = self.data_type.utcnow # datetime.datetime\n if 'document_class' in kwargs and kwargs['document_class']:\n self.document_class = kwargs['document_class']\n self.validator = self.generate_validator(self.data_type, **kwargs)\n self.required = kwargs['required'] if 'required' in kwargs else True\n if 'default' in kwargs:\n self.default_value = kwargs['default']\n if not callable(self.default_value):\n validation_failed = False\n try:\n self.validator(self.default_value)\n except ValidationError as e:\n new_err = ('default value \"%s\"' % kwargs['default']) + ''.join(e.args)\n validation_failed = True\n if validation_failed:\n raise TypeError(new_err)\n # check if dict/list type and wrap copy in callable\n if isinstance(self.default_value, (dict, list)):\n def default_value_wrapper():\n return copy.deepcopy(kwargs['default'])\n self.default_value = default_value_wrapper", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, name, description, field_type_processor, required=False):\n FieldDescriptor.__init__(self, name, description, \n field_type_processor.extract, required)\n # add an adapt method\n self.adapt = field_type_processor.adapt", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def __init__(\n self,\n data_type,\n name,\n index,\n has_default,\n default=_NO_DEFAULT,\n order=None,\n doc=None,\n other_props=None\n ):\n if (not isinstance(name, _str)) or (not name):\n raise SchemaParseException('Invalid record field name: %r.' % name)\n if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):\n raise SchemaParseException('Invalid record field order: %r.' % order)\n\n # All properties of this record field:\n self._props = {}\n\n self._has_default = has_default\n if other_props:\n self._props.update(other_props)\n\n self._index = index\n self._type = self._props['type'] = data_type\n self._name = self._props['name'] = name\n\n if has_default:\n self._props['default'] = default\n\n if order is not None:\n self._props['order'] = order\n\n if doc is not None:\n self._props['doc'] = doc", "def __init__(self, type_=\"text\", name=\"\"):\n super().__init__(\"input\")\n self.type = type_\n self.name = name", "def create_schema(self, schema: str):\n return", "def __init__(self, schema_type, parameter_type, parameter_name):\n self.schema_type = schema_type\n self.parameter_type = parameter_type\n self.parameter_name = parameter_name", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def schemaized_field(field):\n return zope.schema.interfaces.IField(field)", "def __init__(self, schema ):\n self.schema = schema", "def set_input_type(self, input_type):\n if input_type is not None: self._input_type.value = input_type\n return self", "def __new__(cls, **kwargs):\n # Call up to allocate the new instance:\n try:\n instance = super(Schema, cls).__new__(cls, **kwargs)\n except TypeError:\n instance = super(Schema, cls).__new__(cls)\n \n # Create the “__fields__” attribute and retrieve the class-based\n # field indexes, “__field_names__” and “__field_index__”:\n instance.__fields__ = Flat()\n field_names, field_index = pyattrs(cls, 'field_names',\n 'field_index')\n \n # Set each of the field-default values through a call to\n # the underlying descriptor instances’ “get_default()” method:\n for field, nsfield in zip(field_names, field_index):\n instance.__fields__[nsfield] = stattr(instance, field).get_default()\n \n # Override defaults with any instance-specific values,\n # as specfied through keywords:\n for key, value in kwargs.items():\n if key in field_names:\n setattr(instance, key, value)\n \n for namespace in instance.__fields__.namespaces():\n if namespace in field_names:\n setattr(instance, namespace, field_names[namespace])\n \n # Return the new instance:\n return instance", "def __init__(self, raw_field: Dict):\n self.name = raw_field.get(\"name\")\n self.description = raw_field.get(\"description\")\n self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get(\"args\", []))\n self.type: TypeDefer = TypeDefer(raw_field.get(\"type\")) if raw_field.get(\"type\") is not None else None\n self.is_deprecated: bool = raw_field.get(\"isDeprecated\")\n self.deprecation_reason: str = raw_field.get(\"deprecationReason\")", "def __init__(self, schema=None):\n self.schema = schema or {}", "def __new__(metacls, name, bases, attributes, **kwargs):\n # Use both a namespaced mapping and a standard dict\n # as class-based records of our field attributes:\n field_index = Flat()\n field_names = {}\n \n # Stow both the Python name and the namespaced name\n # for each field attribute defined on the schema,\n # additionally manually calling __set_name__(…) if\n # we’re on a pre-3.6 version of Python:\n for attribute, value in attributes.items():\n if isinstance(value, FieldBase):\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n # This is the same as the above, but for the base\n # ancestor class – this enables field inheritance:\n for base in bases:\n parent = base.__mro__[0]\n for attribute, value in vars(parent).items():\n if isinstance(value, FieldBase) and attribute not in attributes:\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n for namespace in field_index.namespaces():\n nsfield = Namespace(field_index, namespace=namespace)\n if NEED_NAME:\n nsfield.__set_name__(None, namespace)\n attributes[namespace] = nsfield\n field_names[namespace] = nsfield\n \n # Add both the field-index and the field-names mappings\n # to the class dictionary for the new type:\n attributes['__field_index__'] = field_index\n attributes['__field_names__'] = field_names\n \n # Create and return the schema type:\n return super(MetaSchema, metacls).__new__(metacls, name,\n bases,\n attributes,\n **kwargs)", "def makeField(self,field_name,field_type,field_precision,field_scale,field_length):\n \n new_field = self.GP.CreateObject(\"field\")\n new_field.Name = field_name\n new_field.Type = field_type\n new_field.Precision = field_precision\n new_field.Scale = field_scale\n new_field.Length = field_length\n new_field.IsNullable = True\n \n return new_field", "def _gen_basic_field(name_of_field, name_of_type, the_type):\n def validate(self, x):\n return None if x is None else the_type(x)\n\n doc = \"A field which can be {name_of_type} or None\".format(name_of_type=name_of_type)\n\n return Field(name_of_field, (), {'validate': validate, '__doc__': doc})", "def create_field(self, label, value_type, key=None):\n payload = self._build_params(label=label, value_type=value_type, key=key)\n return Field.deserialize(self._post('fields', None, payload))", "def schema(value: Any) -> Schema:\n raise InputTypeError(value)", "def __init__(self, data_type=None):\n self.type = data_type", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema", "def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def convert_type(self, value, schema_type, **kwargs):", "def from_schema(cls, schema, *args, **kwargs):\r\n\r\n return cls(schema.get(u\"id\", u\"\"), schema, *args, **kwargs)", "def __init__(self, name, exclusive=False, default=None):\n self.name = name\n self.type = etau.get_class_name(self)[: -len(\"Schema\")]\n self.exclusive = exclusive\n self.default = default\n self._attr_cls = etau.get_class(self.type)", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def __new__(cls,name,description,args_in,required=True,data_type=None,schema=None):\n mydict={\n \"name\":name,\n \"description\":description,\n \"in\":args_in,\n \"required\":required,\n \"schema\":schema,\n \"type\":data_type,\n }\n if args_in!=\"body\":\n mydict[\"type\"]=data_type\n return mydict", "def getTypicalInstance(self, cls):\n instance = cls()\n for field in cls.schema.fields:\n setattr(instance, field.name,\n self.getTypicalValue(cls, field.name))\n return instance", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def PreprocessingDataclassField(feature_type: str):\n\n\n class PreprocessingMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid preprocessing config from the\n preprocessing_registry and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if feature_type in preprocessing_registry:\n pre = preprocessing_registry[feature_type]\n try:\n return pre.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid preprocessing params: {value}, see `{pre}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for preprocessor: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n preprocessor_cls = preprocessing_registry[feature_type]\n props = schema_utils.unload_jsonschema_from_marshmallow_class(preprocessor_cls)['properties']\n return {'type': 'object', 'properties': props, 'title': 'preprocessing_options', 'additionalProperties': True}\n try:\n preprocessor = preprocessing_registry[feature_type]\n load_default = preprocessor.Schema().load({'feature_type': feature_type})\n dump_default = preprocessor.Schema().dump({'feature_type': feature_type})\n return field(metadata={'marshmallow_field': PreprocessingMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported preprocessing type: {feature_type}. See preprocessing_registry. Details: {e}')", "def mutate(self, info, input):\n # Convert input to dictionary\n data = api_utils.input_to_dictionary(input)\n data_source_type = Operation('ModelDataSourceType').create(**data)\n return CreateDataSourceType(data_source_type=data_source_type)", "def __init__(self, instance=None):\n self.instance = instance\n self.schema = None\n if self.instance:\n self.schema = surveys.SurveySchema(self.instance.survey)", "def generate_values_type(self) -> typing.Any:\n values_type = self.type.__args__[1]\n\n name = self.get_singular_name(self.name)\n self.internal_field = AvroField(name, values_type)\n self.values_type = self.internal_field.get_avro_type()", "def __init__(self, field: \"Attribute[_T]\") -> None:\n self.field = field", "def schema(self, schema, in_='formData'):\n parameters = core.parameters_from_object_schema(schema, in_=in_)\n return compose(*map(self.parameter, parameters))", "def __init__(self, field: str):\n super().__init__()\n self.field = field", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema", "def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def local_type(verifield, type_name):\n from polyglot.pyapi.meta import retrieve_schema_table_fields\n from polyglot.pyapi.instance import create_instance_validators\n from polyglot.models.schema import Instance\n (tenant_id, schema_id, table_id) = type_name.split(\"::\")\n fields = retrieve_schema_table_fields(tenant_id, schema_id, table_id)\n validators = Instance._validations\n validators['instance_data'] = create_instance_validators(fields)\n instance = Instance(**instance)\n instance.validate(validators)\n instance._validations = validators\n return not((hasattr(instance, 'validation_errors') \n and instance.validation_errors) \\\n or instance.instance_data.get('validation_errors', {}))", "def add(self, name, fieldtype):\r\n \r\n if name.startswith(\"_\"):\r\n raise FieldConfigurationError(\"Field names cannot start with an underscore\")\r\n elif name in self._by_name:\r\n raise FieldConfigurationError(\"Schema already has a field named %s\" % name)\r\n \r\n if callable(fieldtype):\r\n fieldtype = fieldtype()\r\n if not isinstance(fieldtype, FieldType):\r\n raise FieldConfigurationError(\"%r is not a FieldType object\" % fieldtype)\r\n \r\n fnum = len(self._by_number)\r\n self._numbers[name] = fnum\r\n self._by_number.append(fieldtype)\r\n self._names.append(name)\r\n self._by_name[name] = fieldtype", "def init_validator(schema,etd):\n #Major version\n major_version=int(jsonschema.__version__.split('.')[0])\n if major_version < 4:\n #The easy way\n return ValidatorClass(schema,types=etd)\n else:\n #The hard way\n #Create the extra types functions dictionary\n etd_funcs={name:create_checker_func(typs) for name,typs in etd.items()}\n #Create the type checker\n type_checker = ValidatorClass.TYPE_CHECKER.redefine_many(etd_funcs)\n #Create the validator class\n CustomValidator = jsonschema.validators.extend(ValidatorClass, type_checker=type_checker)\n #Return the validator\n return CustomValidator(schema=schema)", "def __init__(\n self,\n graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],\n dsl_schema: DSLSchema,\n ):\n self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type\n self._dsl_schema = dsl_schema\n log.debug(f\"Creating {self!r})\")", "def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def _new_field(self):\n field = self.domain.new_field()\n return field", "def __init__(self, field):\n super().__init__()\n self.field = str(field)", "def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def SplitDataclassField(default: str):\n\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid split params: {value}, see `{split_class}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(split_config_registry.data.keys()), 'default': default}}, 'title': 'split_options', 'allOf': get_split_conds()}\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({'type': default})\n dump_default = splitter.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': SplitMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported splitter type: {default}. See split_registry. Details: {e}')", "def EncoderDataclassField(feature_type: str, default: str):\n\n\n class EncoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid encoder config from the encoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_encoder_classes(feature_type):\n enc = get_encoder_cls(feature_type, value[TYPE])\n try:\n return enc.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid encoder params: {value}, see `{enc}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for encoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n encoder_classes = list(get_encoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': encoder_classes, 'default': default}}, 'title': 'encoder_options', 'allOf': get_encoder_conds(feature_type)}\n try:\n encoder = get_encoder_cls(feature_type, default)\n load_default = encoder.Schema().load({'type': default})\n dump_default = encoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': EncoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported encoder type: {default}. See encoder_registry. Details: {e}')", "def column_to_bq_schema(self):\n kwargs = {}\n if len(self.fields) > 0:\n fields = [field.column_to_bq_schema() for field in self.fields]\n kwargs = {\"fields\": fields}\n\n return google.cloud.bigquery.SchemaField(self.name, self.dtype,\n self.mode, **kwargs)", "def __init__(self, field: FT):\n self.field: Final[FT] = field", "def __init__(self, type_):\n\n self.type = type_", "def SchedulerDataclassField(default={'type': 'fifo'}, description='Hyperopt scheduler settings.'):\n\n\n class SchedulerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid scheduler from\n `ludwig.schema.hyperopt.scheduler_registry` and creates a corresponding `oneOf` JSON schema for external\n usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in scheduler_config_registry:\n scheduler_config_cls = scheduler_config_registry[value['type'].lower()]\n try:\n return scheduler_config_cls.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for scheduler: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for scheduler: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(scheduler_config_registry.keys()), 'default': default['type'], 'description': 'The type of scheduler to use during hyperopt'}}, 'title': 'scheduler_options', 'allOf': get_scheduler_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in scheduler_config_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = scheduler_config_registry[default['type'].lower()]\n load_default = opt.Schema().load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': SchedulerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported scheduler type: {default['type']}. See scheduler_config_registry. Details: {e}\")", "def set_input_type_class(self, input_type_class):\n if input_type_class is not None:\n self._input_type.expected = (input_type_class,)\n return self", "def __init__(self, shape):\n self.shape = literal_eval(shape)\n self.keras_layer = keras.layers.Input(shape=self.shape)\n self.type = 'Input'\n self.name = ':'.join([self.type, str(self.shape)])", "def __init__(self, column_type, name):\n self.column_type = column_type\n self.name = name", "def get_field_def(schema, parent_type, field_ast):\n name = field_ast.name.value\n if name == SchemaMetaFieldDef.name and schema.get_query_type() == parent_type:\n return SchemaMetaFieldDef\n\n elif name == TypeMetaFieldDef.name and schema.get_query_type() == parent_type:\n return TypeMetaFieldDef\n\n elif name == TypeNameMetaFieldDef.name and \\\n isinstance(parent_type, (\n GraphQLObjectType,\n GraphQLInterfaceType,\n GraphQLUnionType,\n )):\n return TypeNameMetaFieldDef\n\n elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):\n return parent_type.get_fields().get(name)", "def __init__(__self__, *,\n type_name: Optional[pulumi.Input[str]] = None,\n type_version_arn: Optional[pulumi.Input[str]] = None,\n version_id: Optional[pulumi.Input[str]] = None):\n if type_name is not None:\n pulumi.set(__self__, \"type_name\", type_name)\n if type_version_arn is not None:\n pulumi.set(__self__, \"type_version_arn\", type_version_arn)\n if version_id is not None:\n pulumi.set(__self__, \"version_id\", version_id)", "def get_field_type_from_schema(schema_type, field_name):\n if field_name == '@class':\n return GraphQLString\n else:\n if field_name not in schema_type.fields:\n raise AssertionError(u'Field {} passed validation but was not present on type '\n u'{}'.format(field_name, schema_type))\n\n # Validation guarantees that the field must exist in the schema.\n return schema_type.fields[field_name].type", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def from_schema(cls, tag, schema):\n cls.tag = tag\n cls.schema = schema\n cls._parser = generate_parser(tag, schema)\n return cls", "def _schema_type(self) -> Optional[type]:\n pass", "def html5_field(name, base):\n return type(str(\"\"), (base,), {\"input_type\": name})", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def __init__(self, data_type, other_props=None):\n if data_type not in PRIMITIVE_TYPES:\n raise AvroException('%r is not a valid primitive type.' % data_type)\n super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)", "def SplitDataclassField(default: str):\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid split config from the split_registry and\n creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in split_config_registry.data:\n split_class = split_config_registry.data[value[TYPE]]\n try:\n return split_class.get_schema_cls().Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(\n f\"Invalid split params: {value}, see `{split_class}` definition. Error: {error}\"\n )\n raise ValidationError(\n f\"Invalid params for splitter: {value}, expected dict with at least a valid `type` attribute.\"\n )\n raise ValidationError(\"Field should be None or dict\")\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\"type\": \"string\", \"enum\": list(split_config_registry.data.keys()), \"default\": default},\n },\n \"title\": \"split_options\",\n \"allOf\": get_split_conds(),\n }\n\n try:\n splitter = split_config_registry.data[default]\n load_default = splitter.Schema().load({\"type\": default})\n dump_default = splitter.Schema().dump({\"type\": default})\n\n return field(\n metadata={\n \"marshmallow_field\": SplitMarshmallowField(\n allow_none=False,\n dump_default=dump_default,\n load_default=load_default,\n )\n },\n default_factory=lambda: load_default,\n )\n except Exception as e:\n raise ValidationError(f\"Unsupported splitter type: {default}. See split_registry. \" f\"Details: {e}\")", "def __init__(self, type_: Union[ConstraintTypes, str], value: Any):\n self.type = ConstraintTypes(type_)\n self.value = value\n enforce(self.check_validity(), \"ConstraintType initialization inconsistent.\")", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def input_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationInputInputSchemaArgs']:\n return pulumi.get(self, \"input_schema\")", "def set_field_by_schema(self, header, field):\n if header not in self.schema.keys():\n if settings._DISABLE_SCHEMA_MATCH:\n return\n else:\n raise InvalidRecordProperty('Record schema does not have the property \"%s\"' % header)\n\n data_type = self.schema[header]['type'].lower()\n\n if data_type == 'string':\n if Record.is_empty_str(field):\n self.fields[header] = None\n else:\n self.fields[header] = field\n return\n\n if data_type == 'integer':\n if Record.could_be_int(field):\n self.fields[header] = int(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'datetime':\n datetime_format = self.schema[header]['datetime_format'];\n if datetime_format == None:\n datetime_format = settings._STRFTIME_FORMAT\n if Record.could_be_datetime(field, datetime_format):\n self.fields[header] = datetime.strptime(field, datetime_format)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'number':\n if Record.could_be_number(field):\n self.fields[header] = float(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'float':\n if Record.could_be_float(field):\n self.fields[header] = float(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'boolean':\n self.fields[header] = Record.parse_boolean(field)\n return", "def field():\n field = Field()\n field.type = 'TextLine'\n return field", "def _get_schema(self):\n self._pick()\n return Schema()", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def Field( # pylint: disable=invalid-name\n cls, name=None, description=None, deprecation_reason=None, required=False\n ):\n return graphene.Field(\n cls._meta.output,\n args=cls._meta.arguments,\n resolver=cls._meta.publish,\n name=name,\n description=description,\n deprecation_reason=deprecation_reason,\n required=required,\n )", "def DefaultsDataclassField(feature_type: str):\n\n\n class DefaultMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid defaults config from the feature_registry\n and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n input_feature_class = input_mixin_registry[feature_type]\n output_feature_class = output_mixin_registry.get(feature_type, None)\n try:\n input_schema = input_feature_class.Schema().load(value)\n if output_feature_class:\n output_schema = output_feature_class.Schema().load(value)\n combined = input_schema + output_schema\n else:\n combined = input_schema\n return combined\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid params: {value}, see `{attr}` definition. Error: {error}')\n raise ValidationError(f'Invalid params: {value}')\n\n @staticmethod\n def _jsonschema_type_mapping():\n input_feature_cls = input_mixin_registry.get(feature_type)\n output_feature_cls = output_mixin_registry.get(feature_type, None)\n input_props = schema_utils.unload_jsonschema_from_marshmallow_class(input_feature_cls)['properties']\n if output_feature_cls:\n output_props = schema_utils.unload_jsonschema_from_marshmallow_class(output_feature_cls)['properties']\n combined_props = {**output_props, **input_props}\n else:\n combined_props = input_props\n return {'type': 'object', 'properties': combined_props, 'additionalProperties': False, 'title': 'defaults_options'}\n try:\n input_cls = input_mixin_registry[feature_type]\n output_cls = output_mixin_registry.get(feature_type, None)\n dump_default = input_cls.Schema().dump({'type': feature_type})\n if output_cls:\n output_dump = output_cls.Schema().dump({'type': feature_type})\n dump_default = {**output_dump, **dump_default}\n load_default = input_cls.Schema().load({'type': feature_type})\n if output_cls:\n output_load = output_cls.Schema().load({'type': feature_type})\n for k in dump_default.keys():\n if getattr(load_default, k, -1) == -1:\n setattr(load_default, k, getattr(output_load, k))\n return field(metadata={'marshmallow_field': DefaultMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported feature type: {feature_type}. See input_type_registry. Details: {e}')", "def createField(selected_layer, newFieldName, newFieldType):\r\n field = ogr.FieldDefn(newFieldName, newFieldType)\r\n selected_layer.CreateField(field)", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def get_field_type(\n self, field_type: Union[Type, str], collection_name: str\n ) -> SchemaFieldDataType:\n TypeClass: Optional[Type] = _field_type_mapping.get(field_type)\n\n if TypeClass is None:\n self.report.report_warning(\n collection_name, f\"unable to map type {field_type} to metadata schema\"\n )\n TypeClass = NullTypeClass\n\n return SchemaFieldDataType(type=TypeClass())", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def _uifield_from_dataclass(field: dc.Field) -> UiField:\n default = field.default if field.default is not dc.MISSING else Undefined\n dfactory = (\n field.default_factory if field.default_factory is not dc.MISSING else None\n )\n extra = {k: v for k, v in field.metadata.items() if k in _UI_FIELD_NAMES}\n\n return UiField(\n name=field.name,\n type=field.type,\n default=default,\n default_factory=dfactory,\n _native_field=field,\n **extra,\n )", "def gen_config_field(name_of_field, name_of_type, the_type):\n return _gen_basic_field(name_of_field, name_of_type, the_type)", "def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def instance_schema(self):\n raise NotImplementedError", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def CreateInput(self, name=None, type=None, data=None):\n\n\n inp = self._input_registry.Create(name, type, data)\n\n self._inputs.append(inp)\n\n return inp", "def __init__(self,name,value,*args,**kargs):\n self.ndim = len(value)\n if 'fields' in kargs:\n fields = kargs['fields']\n else:\n fields = [ str(i) for i in range(self.ndim) ]\n\n self.input = QtGui.QWidget(*args)\n InputItem.__init__(self,name,*args,**kargs)\n #self.layout().insertWidget(1,self.input)\n\n #layout = QtGui.QHBoxLayout(self)\n #self.input.setLayout(layout)\n layout = self.layout()\n self.fields = []\n for fld,val in zip(fields,value):\n f = InputInteger(fld,val)\n self.fields.append(f)\n layout.addWidget(f)", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def create_field(self, field, dim_translation=None):\n raise NotImplementedError", "def _schema_type(self) -> Optional[type]:\n return PanelSchema" ]
[ "0.67198443", "0.657528", "0.6384678", "0.62780166", "0.61373305", "0.61338854", "0.61338854", "0.61186373", "0.5996262", "0.5976817", "0.59587806", "0.59354585", "0.58839595", "0.5859686", "0.58279943", "0.5807643", "0.5797024", "0.5765275", "0.5757921", "0.57278675", "0.57172513", "0.5687493", "0.5652878", "0.5652438", "0.5641219", "0.56329453", "0.5626507", "0.5624692", "0.559402", "0.55913275", "0.55828583", "0.5569598", "0.5566706", "0.55618376", "0.54977965", "0.5491662", "0.54338163", "0.54329556", "0.5429517", "0.54180807", "0.5410129", "0.5401123", "0.53992367", "0.5395043", "0.5393786", "0.5388388", "0.5382265", "0.5381717", "0.5374773", "0.5373465", "0.5366717", "0.5355355", "0.53484726", "0.5348133", "0.53479135", "0.5345022", "0.53419393", "0.53393847", "0.5336301", "0.53359205", "0.53358674", "0.5327108", "0.5321505", "0.53194904", "0.53125197", "0.5302357", "0.52912956", "0.5289047", "0.52864647", "0.528225", "0.52776337", "0.5273794", "0.5263028", "0.5258336", "0.52540475", "0.524876", "0.5230484", "0.5227368", "0.52272147", "0.52247924", "0.52238035", "0.522005", "0.5212384", "0.52089316", "0.52078754", "0.519873", "0.5167159", "0.5164449", "0.51576", "0.5154005", "0.5136046", "0.51341397", "0.51310277", "0.51302195", "0.51156545", "0.5113607", "0.51087654", "0.5108599", "0.5105892", "0.51049125" ]
0.5212429
82
Instantiate a new SchemaTypeInterface
def __init__(self, raw_interface: Dict): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_schema(self, schema: str):\n return", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def __init__(\n self,\n graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],\n dsl_schema: DSLSchema,\n ):\n self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type\n self._dsl_schema = dsl_schema\n log.debug(f\"Creating {self!r})\")", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def __init__(self, schema=None):\n self.schema = schema or {}", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def getSchema(cls):\n pass", "def __init__(self, schema ):\n self.schema = schema", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def __init__(self, instance=None):\n self.instance = instance\n self.schema = None\n if self.instance:\n self.schema = surveys.SurveySchema(self.instance.survey)", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def instance_schema(self):\n raise NotImplementedError", "def _get_schema(self):\n self._pick()\n return Schema()", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def from_schema(cls, schema, *args, **kwargs):\r\n\r\n return cls(schema.get(u\"id\", u\"\"), schema, *args, **kwargs)", "def __init__(self, schema_type, parameter_type, parameter_name):\n self.schema_type = schema_type\n self.parameter_type = parameter_type\n self.parameter_name = parameter_name", "def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def complex_type_factory(name, definition, schema):\n d = dict()\n basecls = None\n basedef = definition.basedef\n if basedef and basedef != ITSELF:\n basecls = complex_type_factory(basedef.name, basedef, schema)\n if definition.content_type.is_element_only():\n model = definition.content_type.partical.term\n complex_model(model, d, schema)\n complex_attributes(definition.attributes, d, schema)\n cls = type(name, (basecls or ComplexImp,), d)\n cls.definition = definition\n return cls", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema", "def _schema_type(self) -> Optional[type]:\n pass", "def get_schema_cls() -> t.Any:\n return None", "def __init__(self, name, exclusive=False, default=None):\n self.name = name\n self.type = etau.get_class_name(self)[: -len(\"Schema\")]\n self.exclusive = exclusive\n self.default = default\n self._attr_cls = etau.get_class(self.type)", "def __init__(self, name):\n super(SchemaStub, self).__init__()\n self.model = SchemaStub._ModelStub()\n self.name = name", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result", "def __new__(metacls, name, bases, attributes, **kwargs):\n # Use both a namespaced mapping and a standard dict\n # as class-based records of our field attributes:\n field_index = Flat()\n field_names = {}\n \n # Stow both the Python name and the namespaced name\n # for each field attribute defined on the schema,\n # additionally manually calling __set_name__(…) if\n # we’re on a pre-3.6 version of Python:\n for attribute, value in attributes.items():\n if isinstance(value, FieldBase):\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n # This is the same as the above, but for the base\n # ancestor class – this enables field inheritance:\n for base in bases:\n parent = base.__mro__[0]\n for attribute, value in vars(parent).items():\n if isinstance(value, FieldBase) and attribute not in attributes:\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n for namespace in field_index.namespaces():\n nsfield = Namespace(field_index, namespace=namespace)\n if NEED_NAME:\n nsfield.__set_name__(None, namespace)\n attributes[namespace] = nsfield\n field_names[namespace] = nsfield\n \n # Add both the field-index and the field-names mappings\n # to the class dictionary for the new type:\n attributes['__field_index__'] = field_index\n attributes['__field_names__'] = field_names\n \n # Create and return the schema type:\n return super(MetaSchema, metacls).__new__(metacls, name,\n bases,\n attributes,\n **kwargs)", "def __new__(cls, **kwargs):\n # Call up to allocate the new instance:\n try:\n instance = super(Schema, cls).__new__(cls, **kwargs)\n except TypeError:\n instance = super(Schema, cls).__new__(cls)\n \n # Create the “__fields__” attribute and retrieve the class-based\n # field indexes, “__field_names__” and “__field_index__”:\n instance.__fields__ = Flat()\n field_names, field_index = pyattrs(cls, 'field_names',\n 'field_index')\n \n # Set each of the field-default values through a call to\n # the underlying descriptor instances’ “get_default()” method:\n for field, nsfield in zip(field_names, field_index):\n instance.__fields__[nsfield] = stattr(instance, field).get_default()\n \n # Override defaults with any instance-specific values,\n # as specfied through keywords:\n for key, value in kwargs.items():\n if key in field_names:\n setattr(instance, key, value)\n \n for namespace in instance.__fields__.namespaces():\n if namespace in field_names:\n setattr(instance, namespace, field_names[namespace])\n \n # Return the new instance:\n return instance", "def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def set_schema_class(self, schema):\n self.schema_class = schema", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def _CreateEnumSchema(\n self,\n descriptor: EnumDescriptor,\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n enum_schema_obj: EnumSchema = {\n \"type\": \"string\",\n }\n\n if descriptor.values:\n enum_schema_obj[\"enum\"] = (\n tuple([enum_value.name for enum_value in descriptor.values]))\n enum_schema_obj[\"description\"] = (\"\\n\".join([\n f\"{enum_value.name} == {enum_value.number}\"\n for enum_value in descriptor.values\n ]))\n else:\n enum_schema_obj[\"enum\"] = ()\n\n self.schema_objs[_GetTypeName(descriptor)] = enum_schema_obj", "def from_schema(cls, tag, schema):\n cls.tag = tag\n cls.schema = schema\n cls._parser = generate_parser(tag, schema)\n return cls", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def _schema_type(self) -> Optional[type]:\n return ImageSchema", "def get_schema(self, name):\n return Schema(self, name)", "def schema(cls):\n return Schema.get_instance(cls)", "def __init__(self, schema=None):\n self._dict = {}\n self.schema = schema", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def schemaized_field(field):\n return zope.schema.interfaces.IField(field)", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def schema(self, schema):\n # type: (object) -> None\n\n if schema is not None:\n if not isinstance(schema, object):\n raise TypeError(\"Invalid type for `schema`, type has to be `object`\")\n\n self._schema = schema", "def set_schema(self, schema):\r\n self.__schema = schema", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def make_schema(obj):\n\n if not isinstance(obj, Schema):\n if isinstance(obj, dict):\n return DictStructure(obj)\n elif isinstance(obj, list):\n return ListStructure(obj)\n elif isinstance(obj, (int, float, str, bool)) or (obj is None):\n return Value(obj)\n else:\n raise ValueError(f\"object {obj} cannot be represented as a JSON Structure\")\n else:\n return obj", "def create_schemas():\n\n # TEXT: the field is indexed, analyzed. By default it is not stored.\n # phrase=False does not allow to search for phrases.\n # sortable=True allows to sort the indexed values\n # ID: the file is indexed, without being analyzed.\n # STORED: the file is saved but not indexed.\n\n pub_schema = Schema(\n pubtype=TEXT(stored=True),\n key=STORED,\n author=TEXT(stored=True),\n title=TEXT(stored=True),\n pages=STORED,\n year=TEXT(stored=True),\n journal=STORED,\n volume=STORED,\n number=STORED,\n url=STORED,\n ee=STORED,\n crossref=ID(stored=True),\n )\n\n ven_schema = Schema(\n pubtype=STORED,\n key=ID(stored=True),\n author=STORED,\n title=TEXT(stored=True),\n journal=STORED,\n publisher=TEXT(stored=True),\n url=STORED,\n ee=STORED,\n year=STORED,\n isbn=STORED,\n )\n\n return pub_schema, ven_schema", "def __init__(self, py_dict=None):\n super(EdgeNATRulesSchema, self).__init__()\n self.set_data_type('xml')\n self.natRule = EdgeNATRuleSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def schemaInitTypes():\n libxml2mod.xmlSchemaInitTypes()", "def _schema_type(self) -> Optional[type]:\n return None", "def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def schema(value: Any) -> Schema:\n raise InputTypeError(value)", "def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None", "def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema", "def schema(self, schema):\n self._schema = schema", "def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)", "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def extend_schema(schema, documentAST=None):\n\n assert isinstance(schema, GraphQLSchema), \"Must provide valid GraphQLSchema\"\n assert documentAST and isinstance(\n documentAST, ast.Document\n ), \"Must provide valid Document AST\"\n\n # Collect the type definitions and extensions found in the document.\n type_definition_map = {}\n type_extensions_map = defaultdict(list)\n\n for _def in documentAST.definitions:\n if isinstance(\n _def,\n (\n ast.ObjectTypeDefinition,\n ast.InterfaceTypeDefinition,\n ast.EnumTypeDefinition,\n ast.UnionTypeDefinition,\n ast.ScalarTypeDefinition,\n ast.InputObjectTypeDefinition,\n ),\n ):\n # Sanity check that none of the defined types conflict with the\n # schema's existing types.\n type_name = _def.name.value\n if schema.get_type(type_name):\n raise GraphQLError(\n (\n 'Type \"{}\" already exists in the schema. It cannot also '\n + \"be defined in this type definition.\"\n ).format(type_name),\n [_def],\n )\n\n type_definition_map[type_name] = _def\n elif isinstance(_def, ast.TypeExtensionDefinition):\n # Sanity check that this type extension exists within the\n # schema's existing types.\n extended_type_name = _def.definition.name.value\n existing_type = schema.get_type(extended_type_name)\n if not existing_type:\n raise GraphQLError(\n (\n 'Cannot extend type \"{}\" because it does not '\n + \"exist in the existing schema.\"\n ).format(extended_type_name),\n [_def.definition],\n )\n if not isinstance(existing_type, GraphQLObjectType):\n raise GraphQLError(\n 'Cannot extend non-object type \"{}\".'.format(extended_type_name),\n [_def.definition],\n )\n\n type_extensions_map[extended_type_name].append(_def)\n\n # Below are functions used for producing this schema that have closed over\n # this scope and have access to the schema, cache, and newly defined types.\n\n def get_type_from_def(type_def):\n type = _get_named_type(type_def.name)\n assert type, \"Invalid schema\"\n return type\n\n def get_type_from_AST(astNode):\n type = _get_named_type(astNode.name.value)\n if not type:\n raise GraphQLError(\n (\n 'Unknown type: \"{}\". Ensure that this type exists '\n + \"either in the original schema, or is added in a type definition.\"\n ).format(astNode.name.value),\n [astNode],\n )\n return type\n\n # Given a name, returns a type from either the existing schema or an\n # added type.\n def _get_named_type(typeName):\n cached_type_def = type_def_cache.get(typeName)\n if cached_type_def:\n return cached_type_def\n\n existing_type = schema.get_type(typeName)\n if existing_type:\n type_def = extend_type(existing_type)\n type_def_cache[typeName] = type_def\n return type_def\n\n type_ast = type_definition_map.get(typeName)\n if type_ast:\n type_def = build_type(type_ast)\n type_def_cache[typeName] = type_def\n return type_def\n\n # Given a type's introspection result, construct the correct\n # GraphQLType instance.\n def extend_type(type):\n if isinstance(type, GraphQLObjectType):\n return extend_object_type(type)\n if isinstance(type, GraphQLInterfaceType):\n return extend_interface_type(type)\n if isinstance(type, GraphQLUnionType):\n return extend_union_type(type)\n return type\n\n def extend_object_type(type):\n return GraphQLObjectType(\n name=type.name,\n description=type.description,\n interfaces=lambda: extend_implemented_interfaces(type),\n fields=lambda: extend_field_map(type),\n )\n\n def extend_interface_type(type):\n return GraphQLInterfaceType(\n name=type.name,\n description=type.description,\n fields=lambda: extend_field_map(type),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_union_type(type):\n return GraphQLUnionType(\n name=type.name,\n description=type.description,\n types=list(map(get_type_from_def, type.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_implemented_interfaces(type):\n interfaces = list(map(get_type_from_def, type.interfaces))\n\n # If there are any extensions to the interfaces, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for namedType in extension.definition.interfaces:\n interface_name = namedType.name.value\n if any([_def.name == interface_name for _def in interfaces]):\n raise GraphQLError(\n (\n 'Type \"{}\" already implements \"{}\". '\n + \"It cannot also be implemented in this type extension.\"\n ).format(type.name, interface_name),\n [namedType],\n )\n interfaces.append(get_type_from_AST(namedType))\n\n return interfaces\n\n def extend_field_map(type):\n new_field_map = OrderedDict()\n old_field_map = type.fields\n for field_name, field in old_field_map.items():\n new_field_map[field_name] = GraphQLField(\n extend_field_type(field.type),\n description=field.description,\n deprecation_reason=field.deprecation_reason,\n args=field.args,\n resolver=cannot_execute_client_schema,\n )\n\n # If there are any extensions to the fields, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for field in extension.definition.fields:\n field_name = field.name.value\n if field_name in old_field_map:\n raise GraphQLError(\n (\n 'Field \"{}.{}\" already exists in the '\n + \"schema. It cannot also be defined in this type extension.\"\n ).format(type.name, field_name),\n [field],\n )\n new_field_map[field_name] = GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n\n return new_field_map\n\n def extend_field_type(type):\n if isinstance(type, GraphQLList):\n return GraphQLList(extend_field_type(type.of_type))\n if isinstance(type, GraphQLNonNull):\n return GraphQLNonNull(extend_field_type(type.of_type))\n return get_type_from_def(type)\n\n def build_type(type_ast):\n _type_build = {\n ast.ObjectTypeDefinition: build_object_type,\n ast.InterfaceTypeDefinition: build_interface_type,\n ast.UnionTypeDefinition: build_union_type,\n ast.ScalarTypeDefinition: build_scalar_type,\n ast.EnumTypeDefinition: build_enum_type,\n ast.InputObjectTypeDefinition: build_input_object_type,\n }\n func = _type_build.get(type(type_ast))\n if func:\n return func(type_ast)\n\n def build_object_type(type_ast):\n return GraphQLObjectType(\n type_ast.name.value,\n interfaces=lambda: build_implemented_interfaces(type_ast),\n fields=lambda: build_field_map(type_ast),\n )\n\n def build_interface_type(type_ast):\n return GraphQLInterfaceType(\n type_ast.name.value,\n fields=lambda: build_field_map(type_ast),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_union_type(type_ast):\n return GraphQLUnionType(\n type_ast.name.value,\n types=list(map(get_type_from_AST, type_ast.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_scalar_type(type_ast):\n return GraphQLScalarType(\n type_ast.name.value,\n serialize=lambda *args, **kwargs: None,\n # Note: validation calls the parse functions to determine if a\n # literal value is correct. Returning null would cause use of custom\n # scalars to always fail validation. Returning false causes them to\n # always pass validation.\n parse_value=lambda *args, **kwargs: False,\n parse_literal=lambda *args, **kwargs: False,\n )\n\n def build_enum_type(type_ast):\n return GraphQLEnumType(\n type_ast.name.value,\n values={v.name.value: GraphQLEnumValue() for v in type_ast.values},\n )\n\n def build_input_object_type(type_ast):\n return GraphQLInputObjectType(\n type_ast.name.value,\n fields=lambda: build_input_values(type_ast.fields, GraphQLInputObjectField),\n )\n\n def build_implemented_interfaces(type_ast):\n return list(map(get_type_from_AST, type_ast.interfaces))\n\n def build_field_map(type_ast):\n return {\n field.name.value: GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n for field in type_ast.fields\n }\n\n def build_input_values(values, input_type=GraphQLArgument):\n input_values = OrderedDict()\n for value in values:\n type = build_field_type(value.type)\n input_values[value.name.value] = input_type(\n type, default_value=value_from_ast(value.default_value, type)\n )\n return input_values\n\n def build_field_type(type_ast):\n if isinstance(type_ast, ast.ListType):\n return GraphQLList(build_field_type(type_ast.type))\n if isinstance(type_ast, ast.NonNullType):\n return GraphQLNonNull(build_field_type(type_ast.type))\n return get_type_from_AST(type_ast)\n\n # If this document contains no new types, then return the same unmodified\n # GraphQLSchema instance.\n if not type_extensions_map and not type_definition_map:\n return schema\n\n # A cache to use to store the actual GraphQLType definition objects by name.\n # Initialize to the GraphQL built in scalars and introspection types. All\n # functions below are inline so that this type def cache is within the scope\n # of the closure.\n\n type_def_cache = {\n \"String\": GraphQLString,\n \"Int\": GraphQLInt,\n \"Float\": GraphQLFloat,\n \"Boolean\": GraphQLBoolean,\n \"ID\": GraphQLID,\n \"__Schema\": __Schema,\n \"__Directive\": __Directive,\n \"__DirectiveLocation\": __DirectiveLocation,\n \"__Type\": __Type,\n \"__Field\": __Field,\n \"__InputValue\": __InputValue,\n \"__EnumValue\": __EnumValue,\n \"__TypeKind\": __TypeKind,\n }\n\n # Get the root Query, Mutation, and Subscription types.\n query_type = get_type_from_def(schema.get_query_type())\n\n existing_mutation_type = schema.get_mutation_type()\n mutationType = (\n existing_mutation_type and get_type_from_def(existing_mutation_type) or None\n )\n\n existing_subscription_type = schema.get_subscription_type()\n subscription_type = (\n existing_subscription_type\n and get_type_from_def(existing_subscription_type)\n or None\n )\n\n # Iterate through all types, getting the type definition for each, ensuring\n # that any type not directly referenced by a field will get created.\n types = [get_type_from_def(_def) for _def in schema.get_type_map().values()]\n\n # Do the same with new types, appending to the list of defined types.\n types += [get_type_from_AST(_def) for _def in type_definition_map.values()]\n\n # Then produce and return a Schema with these types.\n return GraphQLSchema(\n query=query_type,\n mutation=mutationType,\n subscription=subscription_type,\n # Copy directives.\n directives=schema.get_directives(),\n types=types,\n )", "def get_schema(cls):\n return cls.schema()", "def load_schema_datatype_into_networkx(schema):\n\n G = nx.DiGraph()\n for record in schema[\"@graph\"]:\n if record[\"@id\"] in DATATYPES:\n G.add_node(\n record[\"@id\"],\n uri=record[\"@id\"],\n description=record[\"rdfs:comment\"],\n )\n if \"rdfs:subClassOf\" in record:\n parents = dict2list(record[\"rdfs:subClassOf\"])\n for _parent in parents:\n if _parent[\"@id\"] != \"rdfs:Class\":\n G.add_edge(_parent[\"@id\"], record[\"@id\"])\n elif \"@type\" in record and \"http://schema.org/DataType\" in record[\"@type\"]:\n G.add_edge(\"http://schema.org/DataType\", record[\"@id\"])\n return G", "def registration_schema(self, ctx):\n schema = RegistrationSchema()\n schema.context['ctx'] = ctx\n return schema", "def get_schema() -> dict:\n raise NotImplementedError()", "def __init__(self, defined_type, many=False, optional=False,\n validate=True):\n\n self._validate_type(defined_type)\n\n self._type = defined_type\n self._many = many\n self._optional = optional\n self._validate = validate", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def __init__(\n self,\n data_type,\n name=None,\n namespace=None,\n names=None,\n other_props=None,\n ):\n assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type)\n self._avro_name = names.get_name(name=name, namespace=namespace)\n\n super(NamedSchema, self).__init__(data_type, other_props)\n\n names.register(self)\n\n self._props['name'] = self.name\n if self.namespace:\n self._props['namespace'] = self.namespace", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def schema() -> None:\n pass", "def _schema_builder(mocker):\n return mocker.create_autospec(SchemaBuilder)", "def __init__(self, field: \"SchemaTypeField\", settings: Settings):\n from qlient import helpers\n self.settings = settings\n self.name = field.name\n self.description = field.description\n self.arguments = helpers.adapt_arguments(field.args)\n self.return_type = field.type\n self._return_fields: Union[Tuple[SelectedField], None] = None", "def setup_schema(BaseDao, session):\n def setup_schema_fn():\n for class_ in BaseDao._decl_class_registry.values():\n if hasattr(class_, '__tablename__'):\n if class_.__name__.endswith('Schema'):\n raise ModelConversionError(\n \"For safety, setup_schema can not be used when a\"\n \"Model class ends with 'Schema'\"\n )\n\n class Meta(object):\n model = class_\n sqla_session = session\n dump_only = ('pkId', 'created', 'modified')\n\n schema_class_name = '%sSchema' % class_.__name__\n\n schema_class = type(\n schema_class_name,\n (ModelSchema,),\n {'Meta': Meta}\n )\n\n setattr(class_, '__marshmallow__', schema_class)\n\n return setup_schema_fn", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def ensure_schema(schemalike, must_have_constraints=True):\n if isinstance(schemalike, dict):\n schema = schemalike\n elif isinstance(schemalike, str):\n try:\n schema = ESSchemaClass.get(id=schemalike).validation.to_dict()\n except elasticsearch.exceptions.NotFoundError:\n raise ValueError(f\"cannot find schema class: {schemalike}.\")\n else:\n raise TypeError(\"the json schema should be a dict or an id to locate it.\")\n\n if \"type\" not in schema and must_have_constraints:\n raise ValueError(\"the schema specified does not support validation.\")\n\n return schema", "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def init_validator(schema,etd):\n #Major version\n major_version=int(jsonschema.__version__.split('.')[0])\n if major_version < 4:\n #The easy way\n return ValidatorClass(schema,types=etd)\n else:\n #The hard way\n #Create the extra types functions dictionary\n etd_funcs={name:create_checker_func(typs) for name,typs in etd.items()}\n #Create the type checker\n type_checker = ValidatorClass.TYPE_CHECKER.redefine_many(etd_funcs)\n #Create the validator class\n CustomValidator = jsonschema.validators.extend(ValidatorClass, type_checker=type_checker)\n #Return the validator\n return CustomValidator(schema=schema)", "def resolve_schema_instance(schema: Union[BaseModel, str]) -> BaseModel:\n from smpa.schemas.core import CoreGetSchema, CoreListSchema\n from smpa.schemas.auth import LoginSchema\n\n if schema == 'CoreListSchema':\n return CoreListSchema\n elif schema == 'CoreGetSchema':\n return CoreGetSchema\n elif schema == 'LoginSchema':\n return LoginSchema\n\n if isinstance(schema, type) and issubclass(schema, BaseModel):\n return schema()\n if isinstance(schema, BaseModel):\n return schema\n try:\n return model_registry.get_class(schema)()\n except RegistryError:\n raise ValueError(\n \"{!r} is not a BaseModel subclass or instance and has not\"\n \" been registered in the model registry.\".format(schema)\n )", "def Instance(self) -> TypeManager:", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def test_adaptToIType(self):\n typeInstance = igwt.IType(Change())", "def schema(self):\n raise NotImplementedError", "def __init__(self, py_dict=None):\n super(RuntimeNicInfoSchema, self).__init__()\n self.set_data_type('xml')\n self.index = None\n self.label = None\n self.network = NetworkSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def from_pyarrow(cls, pyarrow_schema):\n from ibis.formats.pyarrow import PyArrowSchema\n\n return PyArrowSchema.to_ibis(pyarrow_schema)", "def get_schema_structure(self) -> SchemaStructure:\n constructors: List[CombinatorData] = list(\n self._combinator_map.values()\n )\n methods: List[FunctionData] = list(\n self._function_map.values()\n )\n\n return SchemaStructure(constructors=constructors, methods=methods)", "def test_type_builder_builds_correct_model_for_simple_class():\n schema = [\n SchemaObject(\n name=\"TestClass\",\n properties=[\n SchemaValue(name=\"stringValue\", value_type=\"string\"),\n SchemaValue(name=\"booleanValue\", value_type=\"boolean\"),\n SchemaValue(name=\"anyValue\", value_type=\"any\"),\n SchemaValue(name=\"nullValue\", value_type=\"null\"),\n SchemaValue(name=\"optionalStringValue\", value_types=[\"null\", \"string\"]),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == ClassDefinition(\n name=\"TestClass\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"stringValue\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"boolean_value\",\n key=\"booleanValue\",\n value_type=\"bool\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"any_value\", key=\"anyValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"null_value\", key=\"nullValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"optional_string_value\",\n key=\"optionalStringValue\",\n value_type=\"Optional[str]\",\n known_type=True,\n ),\n ],\n depends_on=set(),\n )", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def convert_type(self, value, schema_type, **kwargs):", "def create_student_and_isa(schema: Dict[Any, Any]) -> ISA:\n student = User.get_user_by_email(schema[\"client\"][\"email\"])\n if not student:\n student = Student.create_user(**schema[\"client\"])\n schema.pop(\"client\")\n schema[\"student_id\"] = student.id\n\n isa = ISA.create_isa(**schema)\n return isa", "def ArrowSchema(self) -> pa.Schema:" ]
[ "0.64936477", "0.64496356", "0.64394224", "0.6386912", "0.62401533", "0.6212064", "0.61844695", "0.60685897", "0.60447806", "0.60024077", "0.5941799", "0.5912843", "0.59004474", "0.5892521", "0.5882622", "0.58692586", "0.584908", "0.58294225", "0.5798403", "0.5789837", "0.57735884", "0.577175", "0.5771604", "0.575343", "0.5751487", "0.5748451", "0.57445776", "0.5721994", "0.57204753", "0.5702866", "0.5688597", "0.5675544", "0.56499714", "0.5643115", "0.5630818", "0.56189185", "0.5611931", "0.5604426", "0.55976987", "0.558607", "0.55686796", "0.5566756", "0.5562125", "0.5559258", "0.5549219", "0.5539067", "0.5534137", "0.5533938", "0.55275023", "0.5527168", "0.5526573", "0.5462745", "0.5450566", "0.54451007", "0.5429803", "0.54279053", "0.5417289", "0.5405787", "0.540095", "0.53994495", "0.5387365", "0.5380632", "0.53529406", "0.5341342", "0.5337162", "0.53201497", "0.5305773", "0.5294115", "0.5293401", "0.5292998", "0.5289904", "0.52676034", "0.5264268", "0.5262922", "0.5261521", "0.52587247", "0.52587247", "0.52587247", "0.525791", "0.5254207", "0.52504027", "0.52355534", "0.52270734", "0.52211124", "0.52183926", "0.5200303", "0.51993537", "0.51936406", "0.51929134", "0.5183909", "0.51725227", "0.5170233", "0.5169126", "0.5168656", "0.51660466", "0.51655436", "0.51609325", "0.51521647", "0.5144363", "0.51431257", "0.51393634" ]
0.0
-1
Instantiate a new SchemaTypeEnum
def __init__(self, raw_enum: Dict): self.name: str = raw_enum.get("name") self.description: str = raw_enum.get("description") self.is_deprecated: bool = raw_enum.get("isDeprecated") self.deprecation_reason: str = raw_enum.get("deprecationReason")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateEnumSchema(\n self,\n descriptor: EnumDescriptor,\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n enum_schema_obj: EnumSchema = {\n \"type\": \"string\",\n }\n\n if descriptor.values:\n enum_schema_obj[\"enum\"] = (\n tuple([enum_value.name for enum_value in descriptor.values]))\n enum_schema_obj[\"description\"] = (\"\\n\".join([\n f\"{enum_value.name} == {enum_value.number}\"\n for enum_value in descriptor.values\n ]))\n else:\n enum_schema_obj[\"enum\"] = ()\n\n self.schema_objs[_GetTypeName(descriptor)] = enum_schema_obj", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType", "def test_type_builder_handles_enums():\n schema = [\n SchemaObject(\n name=\"ClassWithEnums\",\n properties=[\n SchemaValue(name=\"string_value\", value_type=\"string\"),\n SchemaEnum(\n name=\"enumValue\",\n value_type=\"string\",\n values=[\"first\", \"second\", \"third\"],\n ),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 2\n assert build_result[0] == ClassDefinition(\n name=\"ClassWithEnums\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"string_value\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"enum_value\",\n key=\"enumValue\",\n value_type=\"ClassWithEnumsEnumValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithEnumsEnumValue\"},\n )\n assert build_result[1] == EnumDefinition(\n name=\"ClassWithEnumsEnumValue\",\n values=[(\"FIRST\", \"first\"), (\"SECOND\", \"second\"), (\"THIRD\", \"third\")],\n depends_on=set(),\n )", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def create_schema(self, schema: str):\n return", "def __init__(self, cls):\n super(EnumType, self).__init__()\n self._cls = cls", "def _schema_type(self) -> Optional[type]:\n pass", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def create_type(name, description, metadata, force):\n type_ = orm.DataFlagType()\n\n type_.name = name\n type_.description = description\n type_.metadata = metadata\n\n if force:\n type_.save()\n else:\n click.echo(\"Type to create:\\n\")\n click.echo(format_type(type_))\n if click.confirm(\"Create type?\"):\n type_.save()\n click.echo(\"Success.\")\n else:\n click.echo(\"Aborted.\")", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def is_enum(schema_obj):\n\n return (isinstance(schema_obj, schema.Enum) or\n (isinstance(schema_obj, schema.Field) and schema_obj.enum_type))", "def test_enum_log(self):\n dt = h5t.special_dtype(enum=('i', {'a': 1, 'b': 2}))\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeEnumID)", "def __init__(self, attr1: schema_constraints.MetricTypeEnum):\n self.attr1 = attr1", "def convert_type(self, value, schema_type, **kwargs):", "def __init__(self, enum_type, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.enum_type = enum_type\n self.__member_type = type(list(self.enum_type)[0].value)", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def __init__(\n self,\n name,\n namespace,\n symbols,\n names=None,\n doc=None,\n other_props=None,\n ):\n symbols = tuple(symbols)\n symbol_set = frozenset(symbols)\n if (len(symbol_set) != len(symbols)\n or not all(map(lambda symbol: isinstance(symbol, _str), symbols))):\n raise AvroException(\n 'Invalid symbols for enum schema: %r.' % (symbols,))\n\n super(EnumSchema, self).__init__(\n data_type=ENUM,\n name=name,\n namespace=namespace,\n names=names,\n other_props=other_props,\n )\n\n self._props['symbols'] = symbols\n if doc is not None:\n self._props['doc'] = doc", "def __init__(self, type_: Union[ConstraintTypes, str], value: Any):\n self.type = ConstraintTypes(type_)\n self.value = value\n enforce(self.check_validity(), \"ConstraintType initialization inconsistent.\")", "def _schema_type(self) -> Optional[type]:\n return None", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def __init__(self, schema_type, parameter_type, parameter_name):\n self.schema_type = schema_type\n self.parameter_type = parameter_type\n self.parameter_name = parameter_name", "def schemaInitTypes():\n libxml2mod.xmlSchemaInitTypes()", "def create_resource_types(resource_type_filenanme):\n\n print(\"Resource Types\")\n\n for i, row in enumerate(open(resource_type_filenanme)):\n row = row.rstrip()\n code, name, description, is_active = row.split(\"|\")\n\n if is_active == \"True\":\n is_active = True\n else:\n is_active = False\n\n\n resource_type = Resource_Type(code=code,\n name=name,\n description=description,\n is_active=is_active)\n\n # Add resource type to session\n db.session.add(resource_type)\n\n # Commit all resource type instances to DB\n db.session.commit()", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def get_schema_cls() -> t.Any:\n return None", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def set_schema_class(self, schema):\n self.schema_class = schema", "def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def _schema_type(self) -> Optional[type]:\n return ImageSchema", "def from_string(cls, name: str) -> Enum:", "def __init__(self, data_type=None):\n self.type = data_type", "def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()", "def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def create_opinion_type(name, description, metadata, force):\n type_ = orm.DataFlagOpinionType()\n\n type_.name = name\n type_.description = description\n type_.metadata = metadata\n\n if force:\n type_.save()\n else:\n click.echo(\"Type to create:\\n\")\n click.echo(format_type(type_))\n if click.confirm(\"Create type?\"):\n type_.save()\n click.echo(\"Success.\")\n else:\n click.echo(\"Aborted.\")", "def __init__(\n self,\n graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],\n dsl_schema: DSLSchema,\n ):\n self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type\n self._dsl_schema = dsl_schema\n log.debug(f\"Creating {self!r})\")", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def type(self):\n if self.__type is None:\n found_type = find_definition(\n self.__type_name, self.message_definition())\n if not (found_type is not Enum and\n isinstance(found_type, type) and\n issubclass(found_type, Enum)):\n raise FieldDefinitionError(\n 'Invalid enum type: %s' % found_type)\n\n self.__type = found_type\n return self.__type", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def __init__(self, defined_type, many=False, optional=False,\n validate=True):\n\n self._validate_type(defined_type)\n\n self._type = defined_type\n self._many = many\n self._optional = optional\n self._validate = validate", "def test_type_attribute(self):\n\n self._create_string()\n self.assertEquals(\"%s:%s\" % (\"xs\",\"string\"), self.string.schema_node.get(\"type\"))", "def __init__(self, name, exclusive=False, default=None):\n self.name = name\n self.type = etau.get_class_name(self)[: -len(\"Schema\")]\n self.exclusive = exclusive\n self.default = default\n self._attr_cls = etau.get_class(self.type)", "def test_type_builder_builds_correct_model_for_simple_class():\n schema = [\n SchemaObject(\n name=\"TestClass\",\n properties=[\n SchemaValue(name=\"stringValue\", value_type=\"string\"),\n SchemaValue(name=\"booleanValue\", value_type=\"boolean\"),\n SchemaValue(name=\"anyValue\", value_type=\"any\"),\n SchemaValue(name=\"nullValue\", value_type=\"null\"),\n SchemaValue(name=\"optionalStringValue\", value_types=[\"null\", \"string\"]),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == ClassDefinition(\n name=\"TestClass\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"stringValue\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"boolean_value\",\n key=\"booleanValue\",\n value_type=\"bool\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"any_value\", key=\"anyValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"null_value\", key=\"nullValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"optional_string_value\",\n key=\"optionalStringValue\",\n value_type=\"Optional[str]\",\n known_type=True,\n ),\n ],\n depends_on=set(),\n )", "def test_type_builder_raises_exception_on_invalid_schema_item_type():\n\n class UnknownSchemaItem(SchemaItem):\n pass\n\n schema = [\n SchemaObject(\n name=\"FakeObject\", properties=[UnknownSchemaItem(name=\"objectUnknown\")]\n )\n ]\n\n with pytest.raises(ValueError):\n _ = build_types(schema)", "def type(self, type):\n allowed_values = [\"CUSTOM_AUTHENTICATION\"]\n if not value_allowed_none_or_none_sentinel(type, allowed_values):\n type = 'UNKNOWN_ENUM_VALUE'\n self._type = type", "def xsd_type(dtype):\n return XSD_TYPE_MAP.get(dtype,'string')", "def set_resource_type(self, data_enum):\n self._resource_type = data_enum", "def getSchema(cls):\n pass", "def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )", "def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SchemaExtension:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return SchemaExtension()", "def __init__(self, data_type, other_props=None):\n if data_type not in PRIMITIVE_TYPES:\n raise AvroException('%r is not a valid primitive type.' % data_type)\n super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)", "def _transtype(self, systype=None):\n if systype is None:\n systype = self.get_meta(CAMSYS_TYPE, None)\n if systype == \"annotation-type\":\n newtype = AnnotationType\n elif systype == \"relation-type\":\n newtype = RelationType\n else:\n newtype = Tag\n if self.__class__ is not newtype:\n self.__class__ = newtype", "def generate_valid(schema):\n LOG.debug(\"generate_valid: %s\" % schema)\n schema_type = schema[\"type\"]\n if isinstance(schema_type, list):\n # Just choose the first one since all are valid.\n schema_type = schema_type[0]\n return type_map_valid[schema_type](schema)", "def _get_schema(self):\n self._pick()\n return Schema()", "def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> TeamsAppDefinition:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return TeamsAppDefinition()", "def schema(value: Any) -> Schema:\n raise InputTypeError(value)", "def set_typ(self, refobj, typ):\n try:\n enum = JB_ReftrackNode.types.index(typ)\n except ValueError:\n raise ValueError(\"The given type %s could not be found in available types: %\" % (typ, JB_ReftrackNode.types))\n cmds.setAttr(\"%s.type\" % refobj, enum)", "def test_create_risk_type(self):\n url = r'/api/v0/risk-types/'\n data = {\n 'type_name': 'New Risk Type',\n 'schema': TEST_SCHEMA\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(RiskType.objects.count(), 1)\n self.assertEqual(RiskType.objects.get().type_name, 'New Risk Type')", "def init_validator(schema,etd):\n #Major version\n major_version=int(jsonschema.__version__.split('.')[0])\n if major_version < 4:\n #The easy way\n return ValidatorClass(schema,types=etd)\n else:\n #The hard way\n #Create the extra types functions dictionary\n etd_funcs={name:create_checker_func(typs) for name,typs in etd.items()}\n #Create the type checker\n type_checker = ValidatorClass.TYPE_CHECKER.redefine_many(etd_funcs)\n #Create the validator class\n CustomValidator = jsonschema.validators.extend(ValidatorClass, type_checker=type_checker)\n #Return the validator\n return CustomValidator(schema=schema)", "def create(spec):\n type = {\n 'polygon': Polygon,\n 'fusionTable': FusionTable,\n }[spec['type']]\n return type(spec)", "def __init__(self, description, flags, value):\n self.type = None\n self.converter = None\n self.name = description[0]\n\n type_code = description[1]\n scale = description[5]\n\n if type_code in MySQLdb.NUMBER:\n # Use python types first, fallback on float otherwise (e.g. NoneType)\n if isinstance(value, int):\n self.type = \"int\"\n else:\n # If there's a scale, use float, otherwise assume long\n self.type = \"float\" if scale else \"long\"\n\n # Check datetime and date\n if type_code in MySQLdb.DATETIME:\n self.type = \"timestamp\"\n self.converter = lambda x: x.isoformat()\n if type_code in MySQLdb.DATE:\n self.type = \"date\"\n self.converter = lambda x: x.isoformat()\n\n # Check if this is binary data and potentially unsafe for JSON\n if not self.type and flags & BINARY and type_code not in MySQLdb.TIME:\n # This needs to be checked BEFORE the next type check\n self.type = \"binary\"\n self.converter = b64encode\n elif isinstance(value, str):\n self.type = \"string\"\n\n if not self.type:\n # Just return a string and make sure to convert it to string if we don't know about\n # this type. This may include datetime.time\n self.type = \"string\"\n self.converter = str", "def complex_type_factory(name, definition, schema):\n d = dict()\n basecls = None\n basedef = definition.basedef\n if basedef and basedef != ITSELF:\n basecls = complex_type_factory(basedef.name, basedef, schema)\n if definition.content_type.is_element_only():\n model = definition.content_type.partical.term\n complex_model(model, d, schema)\n complex_attributes(definition.attributes, d, schema)\n cls = type(name, (basecls or ComplexImp,), d)\n cls.definition = definition\n return cls", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def test_enum_type():\n name = \"an_enum_field\"\n namespace = \"my_emum\"\n aliases = [\"enum\", \"first enum\"]\n default = types.Enum(\n [\"SPADES\", \"HEARTS\", \"DIAMONDS\", \"CLUBS\"], namespace=namespace, aliases=aliases, default=\"CLUBS\"\n )\n\n python_type = types.Enum\n field = fields.AvroField(name, python_type, default)\n\n expected = {\n \"name\": name,\n \"type\": {\n \"type\": \"enum\",\n \"name\": name,\n \"symbols\": default.symbols,\n \"namespace\": namespace,\n \"aliases\": aliases,\n },\n \"default\": default.default,\n }\n\n assert expected == field.to_dict()\n\n default = types.Enum([\"SPADES\", \"HEARTS\", \"DIAMONDS\", \"CLUBS\"])\n field = fields.AvroField(name, python_type, default)\n\n expected = {\n \"name\": name,\n \"type\": {\n \"type\": \"enum\",\n \"name\": name,\n \"symbols\": default.symbols,\n },\n }\n\n assert expected == field.to_dict()\n\n default = types.Enum([\"SPADES\", \"HEARTS\", \"DIAMONDS\", \"CLUBS\"], default=None)\n field = fields.AvroField(name, python_type, default)\n\n expected = {\n \"name\": name,\n \"type\": {\n \"type\": \"enum\",\n \"name\": name,\n \"symbols\": default.symbols,\n },\n \"default\": default.default,\n }\n\n assert expected == field.to_dict()\n\n with pytest.raises(AssertionError):\n default = types.Enum([\"SPADES\", \"HEARTS\", \"DIAMONDS\", \"CLUBS\"], default=\"BLUE\")\n field = fields.AvroField(name, python_type, default)\n\n field.to_dict()", "def __init__ (self, enumType: typing.Type[enum_lib.Enum], default: typing.Union[enum_lib.Enum, str], *args, invalidEnums: typing.Tuple[typing.Union[enum_lib.Enum, str], ...] = (), **kwargs):\n\n\t\tif not isinstance(enumType, type) or not issubclass(enumType, enum_lib.Enum):\n\t\t\traise tunable_base.MalformedTuningSchemaError('Must provide a python enum type to TunablePythonEnumEntry')\n\n\t\tif isinstance(default, enum_lib.Enum):\n\t\t\tdefault = default.name\n\n\t\t# noinspection PyTypeChecker\n\t\tself.EnumType = enumType # type: typing.Type[enum_lib.Enum]\n\t\tself.InvalidEnums = invalidEnums # type: typing.Tuple[typing.Union[enum_lib.EnumMeta, str], ...]\n\n\t\tsuper().__init__(tunable_type = str, default = default, *args, **kwargs)\n\n\t\tself.cache_key = \"TunablePythonEnumEntry_{}_{}\".format(enumType.__name__, self.default)", "def __init__(self, type_):\n\n self.type = type_", "def load_schema_datatype_into_networkx(schema):\n\n G = nx.DiGraph()\n for record in schema[\"@graph\"]:\n if record[\"@id\"] in DATATYPES:\n G.add_node(\n record[\"@id\"],\n uri=record[\"@id\"],\n description=record[\"rdfs:comment\"],\n )\n if \"rdfs:subClassOf\" in record:\n parents = dict2list(record[\"rdfs:subClassOf\"])\n for _parent in parents:\n if _parent[\"@id\"] != \"rdfs:Class\":\n G.add_edge(_parent[\"@id\"], record[\"@id\"])\n elif \"@type\" in record and \"http://schema.org/DataType\" in record[\"@type\"]:\n G.add_edge(\"http://schema.org/DataType\", record[\"@id\"])\n return G", "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def build_active_schema(cls, attr):\n return cls(attr.name, values={attr.value})", "def set_type(self, value):\n self._set_one_attribute(self.AttributeNames.TYPE, value)\n return self", "def new_entity_type(name, client=default):\n data = {\"name\": name}\n return raw.create(\"entity-types\", data, client=client)", "def __init__(self, schema=None):\n self.schema = schema or {}", "def test_type_builder_handles_enumerations_with_uppercase_values():\n schema = [\n SchemaEnum(\n name=\"UppercaseEnum\",\n value_type=\"string\",\n values=[\"HELLO_WORLD\", \"UPPERCASE_VALUE\", \"SOME_VALUE\"],\n ),\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == EnumDefinition(\n name=\"UppercaseEnum\",\n values=[\n (\"HELLO_WORLD\", \"HELLO_WORLD\"),\n (\"UPPERCASE_VALUE\", \"UPPERCASE_VALUE\"),\n (\"SOME_VALUE\", \"SOME_VALUE\"),\n ],\n depends_on=set(),\n )", "def __init__(self, name):\n super(SchemaStub, self).__init__()\n self.model = SchemaStub._ModelStub()\n self.name = name", "def from_db_value(self, value, expression, connection, context):\n # can't call super. See\n # https://docs.djangoproject.com/en/1.9/ref/models/fields/#django.db.models.Field.from_db_value\n if isinstance(value, int):\n try:\n return self.enum_class(value)\n except ValueError:\n raise ValidationError(\n 'Invalid enum integer value {} for {}'.format(value, self.enum_class))\n\n assert value is None\n return None", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema", "def validatePredefinedType(self, type: int) -> bool:\n ...", "def __init__(self, schema ):\n self.schema = schema", "def __init__(self, node, declare):\n symbol.__init__(self, node, declare, \"enum\", \"Enumeration\")\n # check if values are required, must be true or false\n val_req = getOptionalTag(node, \"valuesRequired\", \"false\")\n if val_req == \"false\":\n self.val_req = False\n elif val_req == \"true\":\n self.val_req = True\n else:\n err = \"Enumeration field 'valueRequired' must be either 'true' or 'false'.\\n\"\n err += \"Got: %s in node:\\n %s\" % (val_req, node.toxml())\n raise Exception(err)\n\n self.entries = []\n members = getNode(node, \"members\")\n for entry in filter(lambda n: n.nodeType == n.ELEMENT_NODE, members.childNodes):\n ent = declare( entry )\n if ent.getType() != \"enumEntry\":\n raise Exception(\"Incorrect entry '\"+ent.getType()+\"' found in enumeration:\\n\"+node.toxml())\n self.entries.append(ent)", "def test_enum_lit(self):\n dt = h5t.special_dtype(enum=('i', {'a': 1, 'b': 2}))\n htype = h5t.py_create(dt)\n self.assertIsInstance(htype, h5t.TypeIntegerID)", "def test_type_builder_handles_top_level_enumerations():\n schema = [\n SchemaEnum(name=\"TestEnum\", value_type=\"string\", values=[\"a\", \"b\", \"c\"]),\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == EnumDefinition(\n name=\"TestEnum\", values=[(\"A\", \"a\"), (\"B\", \"b\"), (\"C\", \"c\")], depends_on=set(),\n )", "def def_enum(dct, name):\n return type(name, (Enum,), dct)", "def __init__(self, type, name):\n self.id = len(OFFICES)\n self.type = type\n self.name = name", "def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema", "def EnumFromMojom(self, enum, mojom_type):\n assert mojom_type.tag == mojom_types_mojom.UserDefinedType.Tags.enum_type\n mojom_enum = mojom_type.enum_type\n self.PopulateUserDefinedType(enum, mojom_enum)\n enum.fields = [self.EnumFieldFromMojom(value)\n for value in mojom_enum.values]", "def create_type(name):\n\n new_type = Type(name=name)\n db.session.add(new_type)\n db.session.commit()\n return new_type", "def makeCountriesEnumerationType (self):\n\t\t# stub = getEnumerationTypeXmlStub (\"countriesType\")\n\t\t# countriesVocab = AnnotatedEnumerationType (stub)\n\t\tcountriesType = self.createEnumerationType (\"countriesType\");\n\t\t\n\t\tself.setDataHubValues (countriesType)\n\t\t\n\t\tself.setPubsValues (countriesType)\n\t\t\t\n\t\treturn countriesType", "def build_active_schema(cls, attr):\n return cls(attr.name, categories={attr.value})", "def _flag_created_omf_type(configuration_key, type_id, asset_code):\n\n payload = payload_builder.PayloadBuilder()\\\n .INSERT(configuration_key=configuration_key,\n asset_code=asset_code,\n type_id=type_id)\\\n .payload()\n\n _storage.insert_into_tbl(\"omf_created_objects\", payload)", "def local_type(verifield, type_name):\n from polyglot.pyapi.meta import retrieve_schema_table_fields\n from polyglot.pyapi.instance import create_instance_validators\n from polyglot.models.schema import Instance\n (tenant_id, schema_id, table_id) = type_name.split(\"::\")\n fields = retrieve_schema_table_fields(tenant_id, schema_id, table_id)\n validators = Instance._validations\n validators['instance_data'] = create_instance_validators(fields)\n instance = Instance(**instance)\n instance.validate(validators)\n instance._validations = validators\n return not((hasattr(instance, 'validation_errors') \n and instance.validation_errors) \\\n or instance.instance_data.get('validation_errors', {}))" ]
[ "0.6965221", "0.5952822", "0.5935833", "0.59076416", "0.58250195", "0.57742614", "0.5734689", "0.5734217", "0.5627075", "0.5590269", "0.5535121", "0.55136585", "0.54969114", "0.5492475", "0.5481712", "0.5452931", "0.54466367", "0.54267055", "0.5416778", "0.54110134", "0.54047006", "0.53914917", "0.53671205", "0.53331494", "0.5312529", "0.5290406", "0.52782464", "0.5277713", "0.52723545", "0.52721226", "0.5263402", "0.5242448", "0.5241859", "0.52191806", "0.5219101", "0.51940894", "0.5158488", "0.5146622", "0.514603", "0.5136606", "0.51365215", "0.51006323", "0.5099627", "0.5083154", "0.50830626", "0.5082944", "0.5071041", "0.506921", "0.5066372", "0.5064962", "0.5062905", "0.5048397", "0.5047607", "0.50474864", "0.5043925", "0.5043655", "0.50395596", "0.5026988", "0.5024057", "0.5023331", "0.50180227", "0.50144905", "0.501192", "0.50111556", "0.5009768", "0.498811", "0.49848697", "0.49842748", "0.49789134", "0.49740046", "0.4964288", "0.49586594", "0.49456698", "0.4945567", "0.49454653", "0.4938574", "0.49362576", "0.49258944", "0.49237227", "0.49235627", "0.4914641", "0.49133995", "0.49086002", "0.49048573", "0.48983356", "0.48871267", "0.48865283", "0.48796344", "0.48794445", "0.4867533", "0.48658523", "0.48616785", "0.4853384", "0.4849777", "0.48490497", "0.48397347", "0.48380035", "0.48354357", "0.48140332", "0.4810311", "0.47935903" ]
0.0
-1
Instantiate a new SchemaType
def __init__(self, raw_type: Dict): self.kind = raw_type.get("kind") self.name = raw_type.get("name") self.description = raw_type.get("description") self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get("fields") or [] if f] self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get("inputFields") or [] if i] self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get("interfaces") or [] if i] self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get("enumValues") or [] if e] self.possible_types = raw_type.get("possibleTypes")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_schema(self, schema: str):\n return", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def __init__(self, schema=None):\n self.schema = schema or {}", "def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema", "def __init__(self, schema ):\n self.schema = schema", "def __new__(cls, **kwargs):\n # Call up to allocate the new instance:\n try:\n instance = super(Schema, cls).__new__(cls, **kwargs)\n except TypeError:\n instance = super(Schema, cls).__new__(cls)\n \n # Create the “__fields__” attribute and retrieve the class-based\n # field indexes, “__field_names__” and “__field_index__”:\n instance.__fields__ = Flat()\n field_names, field_index = pyattrs(cls, 'field_names',\n 'field_index')\n \n # Set each of the field-default values through a call to\n # the underlying descriptor instances’ “get_default()” method:\n for field, nsfield in zip(field_names, field_index):\n instance.__fields__[nsfield] = stattr(instance, field).get_default()\n \n # Override defaults with any instance-specific values,\n # as specfied through keywords:\n for key, value in kwargs.items():\n if key in field_names:\n setattr(instance, key, value)\n \n for namespace in instance.__fields__.namespaces():\n if namespace in field_names:\n setattr(instance, namespace, field_names[namespace])\n \n # Return the new instance:\n return instance", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def __init__(self, schema_type, parameter_type, parameter_name):\n self.schema_type = schema_type\n self.parameter_type = parameter_type\n self.parameter_name = parameter_name", "def getSchema(cls):\n pass", "def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def _get_schema(self):\n self._pick()\n return Schema()", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def get_schema(self, name):\n return Schema(self, name)", "def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n schema_cls = attr_cls.get_schema_cls()\n\n name = d[\"name\"]\n exclusive = d.get(\"exclusive\", False)\n default = d.get(\"default\", None)\n return schema_cls(\n name,\n exclusive=exclusive,\n default=default,\n **schema_cls.get_kwargs(d)\n )", "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def __init__(self, instance=None):\n self.instance = instance\n self.schema = None\n if self.instance:\n self.schema = surveys.SurveySchema(self.instance.survey)", "def get_schema_cls() -> t.Any:\n return None", "def __init__(self, name, exclusive=False, default=None):\n self.name = name\n self.type = etau.get_class_name(self)[: -len(\"Schema\")]\n self.exclusive = exclusive\n self.default = default\n self._attr_cls = etau.get_class(self.type)", "def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def set_schema_class(self, schema):\n self.schema_class = schema", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def from_schema(cls, schema, *args, **kwargs):\r\n\r\n return cls(schema.get(u\"id\", u\"\"), schema, *args, **kwargs)", "def __init__(self, schema=None):\n self._dict = {}\n self.schema = schema", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def from_schema(cls, tag, schema):\n cls.tag = tag\n cls.schema = schema\n cls._parser = generate_parser(tag, schema)\n return cls", "def schema(cls):\n return Schema.get_instance(cls)", "def complex_type_factory(name, definition, schema):\n d = dict()\n basecls = None\n basedef = definition.basedef\n if basedef and basedef != ITSELF:\n basecls = complex_type_factory(basedef.name, basedef, schema)\n if definition.content_type.is_element_only():\n model = definition.content_type.partical.term\n complex_model(model, d, schema)\n complex_attributes(definition.attributes, d, schema)\n cls = type(name, (basecls or ComplexImp,), d)\n cls.definition = definition\n return cls", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def _schema_type(self) -> Optional[type]:\n pass", "def set_schema(self, schema):\r\n self.__schema = schema", "def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")", "def create_schemas():\n\n # TEXT: the field is indexed, analyzed. By default it is not stored.\n # phrase=False does not allow to search for phrases.\n # sortable=True allows to sort the indexed values\n # ID: the file is indexed, without being analyzed.\n # STORED: the file is saved but not indexed.\n\n pub_schema = Schema(\n pubtype=TEXT(stored=True),\n key=STORED,\n author=TEXT(stored=True),\n title=TEXT(stored=True),\n pages=STORED,\n year=TEXT(stored=True),\n journal=STORED,\n volume=STORED,\n number=STORED,\n url=STORED,\n ee=STORED,\n crossref=ID(stored=True),\n )\n\n ven_schema = Schema(\n pubtype=STORED,\n key=ID(stored=True),\n author=STORED,\n title=TEXT(stored=True),\n journal=STORED,\n publisher=TEXT(stored=True),\n url=STORED,\n ee=STORED,\n year=STORED,\n isbn=STORED,\n )\n\n return pub_schema, ven_schema", "def __init__(self, name):\n super(SchemaStub, self).__init__()\n self.model = SchemaStub._ModelStub()\n self.name = name", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def __init__(\n self,\n graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],\n dsl_schema: DSLSchema,\n ):\n self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type\n self._dsl_schema = dsl_schema\n log.debug(f\"Creating {self!r})\")", "def __new__(metacls, name, bases, attributes, **kwargs):\n # Use both a namespaced mapping and a standard dict\n # as class-based records of our field attributes:\n field_index = Flat()\n field_names = {}\n \n # Stow both the Python name and the namespaced name\n # for each field attribute defined on the schema,\n # additionally manually calling __set_name__(…) if\n # we’re on a pre-3.6 version of Python:\n for attribute, value in attributes.items():\n if isinstance(value, FieldBase):\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n # This is the same as the above, but for the base\n # ancestor class – this enables field inheritance:\n for base in bases:\n parent = base.__mro__[0]\n for attribute, value in vars(parent).items():\n if isinstance(value, FieldBase) and attribute not in attributes:\n if NEED_NAME:\n value.__set_name__(None, attribute)\n attributes[attribute] = value\n field_names[attribute] = value\n field_index.set(attribute, value,\n namespace=value.namespace)\n \n for namespace in field_index.namespaces():\n nsfield = Namespace(field_index, namespace=namespace)\n if NEED_NAME:\n nsfield.__set_name__(None, namespace)\n attributes[namespace] = nsfield\n field_names[namespace] = nsfield\n \n # Add both the field-index and the field-names mappings\n # to the class dictionary for the new type:\n attributes['__field_index__'] = field_index\n attributes['__field_names__'] = field_names\n \n # Create and return the schema type:\n return super(MetaSchema, metacls).__new__(metacls, name,\n bases,\n attributes,\n **kwargs)", "def schema(value: Any) -> Schema:\n raise InputTypeError(value)", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def schema(self, name):\n return model.Schema(self, name)", "def create_schema(self, name):\n self._connection.execute_nonquery(\n \"sql\", _CREATE_DATABASE_QUERY.format(quote_identifier(name)), True\n )\n return Schema(self, name)", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def schema(self, schema):\n # type: (object) -> None\n\n if schema is not None:\n if not isinstance(schema, object):\n raise TypeError(\"Invalid type for `schema`, type has to be `object`\")\n\n self._schema = schema", "def schema(self, schema):\n self._schema = schema", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def createSchema(schema):\n return \"CREATE SCHEMA \\\"{name}\\\";\\n\".format(name = schema.name)", "def get_schema(cls):\n return cls.schema()", "def instance_schema(self):\n raise NotImplementedError", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)", "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None", "def __init__(\n self,\n data_type,\n name=None,\n namespace=None,\n names=None,\n other_props=None,\n ):\n assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type)\n self._avro_name = names.get_name(name=name, namespace=namespace)\n\n super(NamedSchema, self).__init__(data_type, other_props)\n\n names.register(self)\n\n self._props['name'] = self.name\n if self.namespace:\n self._props['namespace'] = self.namespace", "def build_active_schema(cls, attrs):\n schema = cls()\n schema.add_attributes(attrs)\n return schema", "def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema", "def _schema_type(self) -> Optional[type]:\n return None", "def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def get_schema_structure(self) -> SchemaStructure:\n constructors: List[CombinatorData] = list(\n self._combinator_map.values()\n )\n methods: List[FunctionData] = list(\n self._function_map.values()\n )\n\n return SchemaStructure(constructors=constructors, methods=methods)", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def _CreateEnumSchema(\n self,\n descriptor: EnumDescriptor,\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n enum_schema_obj: EnumSchema = {\n \"type\": \"string\",\n }\n\n if descriptor.values:\n enum_schema_obj[\"enum\"] = (\n tuple([enum_value.name for enum_value in descriptor.values]))\n enum_schema_obj[\"description\"] = (\"\\n\".join([\n f\"{enum_value.name} == {enum_value.number}\"\n for enum_value in descriptor.values\n ]))\n else:\n enum_schema_obj[\"enum\"] = ()\n\n self.schema_objs[_GetTypeName(descriptor)] = enum_schema_obj", "def make_schema(obj):\n\n if not isinstance(obj, Schema):\n if isinstance(obj, dict):\n return DictStructure(obj)\n elif isinstance(obj, list):\n return ListStructure(obj)\n elif isinstance(obj, (int, float, str, bool)) or (obj is None):\n return Value(obj)\n else:\n raise ValueError(f\"object {obj} cannot be represented as a JSON Structure\")\n else:\n return obj", "def test_type_builder_builds_correct_model_for_simple_class():\n schema = [\n SchemaObject(\n name=\"TestClass\",\n properties=[\n SchemaValue(name=\"stringValue\", value_type=\"string\"),\n SchemaValue(name=\"booleanValue\", value_type=\"boolean\"),\n SchemaValue(name=\"anyValue\", value_type=\"any\"),\n SchemaValue(name=\"nullValue\", value_type=\"null\"),\n SchemaValue(name=\"optionalStringValue\", value_types=[\"null\", \"string\"]),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == ClassDefinition(\n name=\"TestClass\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"stringValue\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"boolean_value\",\n key=\"booleanValue\",\n value_type=\"bool\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"any_value\", key=\"anyValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"null_value\", key=\"nullValue\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"optional_string_value\",\n key=\"optionalStringValue\",\n value_type=\"Optional[str]\",\n known_type=True,\n ),\n ],\n depends_on=set(),\n )", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def init_validator(schema,etd):\n #Major version\n major_version=int(jsonschema.__version__.split('.')[0])\n if major_version < 4:\n #The easy way\n return ValidatorClass(schema,types=etd)\n else:\n #The hard way\n #Create the extra types functions dictionary\n etd_funcs={name:create_checker_func(typs) for name,typs in etd.items()}\n #Create the type checker\n type_checker = ValidatorClass.TYPE_CHECKER.redefine_many(etd_funcs)\n #Create the validator class\n CustomValidator = jsonschema.validators.extend(ValidatorClass, type_checker=type_checker)\n #Return the validator\n return CustomValidator(schema=schema)", "def schemaInitTypes():\n libxml2mod.xmlSchemaInitTypes()", "def load_schema_datatype_into_networkx(schema):\n\n G = nx.DiGraph()\n for record in schema[\"@graph\"]:\n if record[\"@id\"] in DATATYPES:\n G.add_node(\n record[\"@id\"],\n uri=record[\"@id\"],\n description=record[\"rdfs:comment\"],\n )\n if \"rdfs:subClassOf\" in record:\n parents = dict2list(record[\"rdfs:subClassOf\"])\n for _parent in parents:\n if _parent[\"@id\"] != \"rdfs:Class\":\n G.add_edge(_parent[\"@id\"], record[\"@id\"])\n elif \"@type\" in record and \"http://schema.org/DataType\" in record[\"@type\"]:\n G.add_edge(\"http://schema.org/DataType\", record[\"@id\"])\n return G", "def schema() -> None:\n pass", "def _CreateMessageSchema(\n self,\n descriptor: Descriptor,\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n type_name = _GetTypeName(descriptor)\n\n properties = dict()\n visiting.add(type_name)\n\n # Create schemas for the fields' types.\n for field_descriptor in descriptor.fields:\n self._CreateSchema(field_descriptor, visiting)\n field_name = casing.SnakeToCamel(field_descriptor.name)\n\n properties[field_name] = self._GetDescribedSchema(field_descriptor)\n\n visiting.remove(type_name)\n\n self.schema_objs[type_name] = cast(MessageSchema, {\n \"type\": \"object\",\n \"properties\": properties,\n })", "def registration_schema(self, ctx):\n schema = RegistrationSchema()\n schema.context['ctx'] = ctx\n return schema", "def create_schema(self, schema):\n base = '/api/storage/v1/schema'\n svc = \"%(base)s/%(prop)s\" % {'base': base, 'prop': schema['property']}\n ret = self.rclient.get(svc)\n if ret.status == restclient.Status.OK:\n LOG.warning('Property %s already exists.', schema['property'])\n return\n ret = self.rclient.post(base, schema)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error Creating '\n 'Property: %(property)s '\n 'Type: %(type)s '\n 'Description: %(description)s '\n 'Return code: %(ret.status)d '\n 'Message: %(ret.data)s.')\n % {'property': schema['property'],\n 'type': schema['type'],\n 'description': schema['description'],\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.ShareBackendException(msg=exception_msg)", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def build_song_schema():\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def create_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE SCHEMA IF NOT EXISTS {schema};'\n return sql", "def __init__(self, defined_type, many=False, optional=False,\n validate=True):\n\n self._validate_type(defined_type)\n\n self._type = defined_type\n self._many = many\n self._optional = optional\n self._validate = validate", "def __init__(self, py_dict=None):\n super(EdgeNATRulesSchema, self).__init__()\n self.set_data_type('xml')\n self.natRule = EdgeNATRuleSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def _dict2schema(dct):\n attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n\n class Meta(object):\n strict = True\n\n attrs[\"Meta\"] = Meta\n return type(str(\"\"), (ma.Schema,), attrs)", "def _schema_type(self) -> Optional[type]:\n return ImageSchema", "def _schema_builder(mocker):\n return mocker.create_autospec(SchemaBuilder)", "def __init__(\n self,\n data_type,\n name,\n index,\n has_default,\n default=_NO_DEFAULT,\n order=None,\n doc=None,\n other_props=None\n ):\n if (not isinstance(name, _str)) or (not name):\n raise SchemaParseException('Invalid record field name: %r.' % name)\n if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):\n raise SchemaParseException('Invalid record field order: %r.' % order)\n\n # All properties of this record field:\n self._props = {}\n\n self._has_default = has_default\n if other_props:\n self._props.update(other_props)\n\n self._index = index\n self._type = self._props['type'] = data_type\n self._name = self._props['name'] = name\n\n if has_default:\n self._props['default'] = default\n\n if order is not None:\n self._props['order'] = order\n\n if doc is not None:\n self._props['doc'] = doc", "def to_json_schema(cls):\n return parsers.to_json_schema(cls)", "def build_schema(schema):\n annotated_schema = {\"$schema\": \"http://json-schema.org/schema#\", **schema}\n jsonschema.Draft7Validator.check_schema(annotated_schema)\n return jsonschema.Draft7Validator(annotated_schema)", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def build_song_schema():\n schema = T.StructType(\n [\n T.StructField('artist_id', T.StringType(), True),\n T.StructField('artist_latitude', T.DecimalType(), True),\n T.StructField('artist_longitude', T.DecimalType(), True),\n T.StructField('artist_location', T.StringType(), True),\n T.StructField('artist_name', T.StringType(), True),\n T.StructField('duration', T.DecimalType(), True),\n T.StructField('num_songs', T.IntegerType(), True),\n T.StructField('song_id', T.StringType(), True),\n T.StructField('title', T.StringType(), True),\n T.StructField('year', T.IntegerType(), True)\n ]\n )\n return schema" ]
[ "0.7132769", "0.679428", "0.6635432", "0.659345", "0.6592825", "0.6465888", "0.6453353", "0.63999885", "0.6380593", "0.63766026", "0.636462", "0.63383156", "0.6335716", "0.63211817", "0.62860376", "0.6270915", "0.625923", "0.6207403", "0.62045443", "0.62017065", "0.6196828", "0.61719465", "0.6170851", "0.61543834", "0.615004", "0.6129977", "0.61233646", "0.61187416", "0.609859", "0.6090238", "0.60901433", "0.60592496", "0.6057852", "0.60575664", "0.6046233", "0.60388905", "0.60342586", "0.60104674", "0.5987569", "0.5968984", "0.59589016", "0.5940635", "0.5932157", "0.5903554", "0.58990496", "0.5883843", "0.58627313", "0.5853304", "0.5853246", "0.5815909", "0.58064514", "0.5806319", "0.57847625", "0.5768515", "0.5764464", "0.5754946", "0.57083744", "0.57027555", "0.57002044", "0.56953245", "0.5694148", "0.56901026", "0.5680998", "0.5675549", "0.56754285", "0.5669935", "0.56692713", "0.5667784", "0.56676394", "0.56634754", "0.56634754", "0.56634754", "0.5650001", "0.56486154", "0.5645708", "0.5632948", "0.5631668", "0.5626745", "0.5582351", "0.556977", "0.55493087", "0.5548774", "0.55425984", "0.5541282", "0.5535376", "0.55322176", "0.55313456", "0.55291075", "0.55197054", "0.55188227", "0.551429", "0.55138904", "0.550902", "0.5507111", "0.54824555", "0.5476026", "0.5474507", "0.54607666", "0.5444063", "0.54381245" ]
0.63470966
11
Create a new Schema instance. Firstly the schema will be loaded synchronously from the endpoint and stored as raw json for further processing. Then the request types will be parsed. Those are "Query", "Mutation" and "Subscription". After that the schema types and directives are parsed.
def __init__(self, endpoint: str, transporter: Transporter, settings: Settings, cache: Optional[Cache]): self.endpoint = endpoint self.transport = transporter self.settings = settings self.cache = cache if self.cache is not None: schema_introspection = self.cache.retrieve(self.endpoint, SCHEMA_KEY) if schema_introspection is None: schema_introspection = self.introspect_schema(endpoint, transporter) self.cache.store(self.endpoint, SCHEMA_KEY, schema_introspection) else: schema_introspection = self.introspect_schema(endpoint, transporter) # graphql schema properties self.raw_schema = schema_introspection.get(self.settings.default_response_key, {}).get("__schema", {}) self.query_type: str = self.parse_query_type(self.raw_schema) self.mutation_type: str = self.parse_mutation_type(self.raw_schema) self.subscription_type: str = self.parse_subscription_type(self.raw_schema) self.types: Dict[str, SchemaType] = self.parse_types(self.raw_schema.get("types", [])) self.directives: Dict[str, Directive] = self.parse_directives(self.raw_schema.get("directives", [])) # custom schema properties self.queries: Tuple[Operation] = self.parse_operations(self.query_type) self.mutations: Tuple[Operation] = self.parse_operations(self.mutation_type) self.subscriptions: Tuple[Operation] = self.parse_operations(self.subscription_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema", "def introspect_schema(cls, endpoint: str, transport: Transporter) -> Dict:\n return request_schema(endpoint, transport.session)", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "def _get_schema(self):\n self._pick()\n return Schema()", "def __init__(\n self,\n name,\n namespace,\n fields=None,\n make_fields=None,\n names=None,\n record_type=RECORD,\n doc=None,\n other_props=None\n ):\n if record_type == REQUEST:\n # Protocol requests are not named:\n super(RecordSchema, self).__init__(\n data_type=REQUEST,\n other_props=other_props,\n )\n elif record_type in [RECORD, ERROR]:\n # Register this record name in the tracker:\n super(RecordSchema, self).__init__(\n data_type=record_type,\n name=name,\n namespace=namespace,\n names=names,\n other_props=other_props,\n )\n else:\n raise SchemaParseException(\n 'Invalid record type: %r.' % record_type)\n\n if record_type in [RECORD, ERROR]:\n avro_name = names.get_name(name=name, namespace=namespace)\n nested_names = names.new_with_default_namespace(namespace=avro_name.namespace)\n elif record_type == REQUEST:\n # Protocol request has no name: no need to change default namespace:\n nested_names = names\n\n if fields is None:\n fields = make_fields(names=nested_names)\n else:\n assert make_fields is None\n self._fields = tuple(fields)\n\n self._field_map = RecordSchema._make_field_map(self._fields)\n\n self._props['fields'] = fields\n if doc is not None:\n self._props['doc'] = doc", "def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")", "def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema", "def __init__(self, schema=None):\n self.schema = schema or {}", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def schema(self):\n # NOTE This is exactly the same as the other thing.\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }", "def create_schema(self, schema: str):\n return", "def __init__(self, endpoint: str, ws_endpoint: str = None, transporter=None, settings=None, cache=None):\n if not endpoint:\n raise ValueError(\"No Endpoint specified.\")\n self.endpoint = endpoint\n\n if not ws_endpoint:\n ws_endpoint = adapt_websocket_endpoint(endpoint)\n self.ws_endpoint = ws_endpoint\n\n self.transporter: Transporter = transporter or Transporter()\n self.settings: Settings = settings or Settings()\n self.cache: Optional[Cache] = cache\n\n self._query_services: Optional[QueryServiceProxy] = None\n self._mutation_services: Optional[MutationServiceProxy] = None\n self._subscription_services: Optional[SubscriptionServiceProxy] = None\n\n self.schema = Schema(self.endpoint, self.transporter, self.settings, self.cache)", "def endpoint_schema(endpoint, extra_definitions={}):\n # load common schema template and update metadata\n schema = common.load_json(\"./templates/provider/endpoint.json\")\n schema[\"$id\"] = schema[\"$id\"].replace(\"endpoint.json\", f\"{endpoint}.json\")\n schema[\"title\"] = schema[\"title\"].replace(\"endpoint\", endpoint)\n\n # merge custom definitions with relevant common definitions\n definitions = common.load_definitions(\n \"string\",\n \"timestamp\",\n \"uuid\",\n \"version\",\n common.MDS_FEATURE_POINT\n )\n definitions.update(common.point_definition())\n definitions.update(extra_definitions)\n\n endpoint_schema = common.load_json(f\"./templates/provider/{endpoint}.json\")\n\n # for all but stops, merge standard vehicle info with items schema\n if endpoint not in [\"stops\"]:\n items = endpoint_schema[endpoint][\"items\"]\n vehicle = common.vehicle_definition()\n items[\"required\"] = vehicle[\"required\"] + items[\"required\"]\n items[\"properties\"] = { **vehicle[\"properties\"], **items[\"properties\"] }\n definitions.update(common.load_definitions(\"propulsion_type\", \"propulsion_types\", \"vehicle_type\"))\n\n # merge endpoint schema into the endpoint template\n data_schema = schema[\"properties\"][\"data\"]\n data_schema[\"required\"] = [endpoint]\n data_schema[\"properties\"] = endpoint_schema\n\n # insert definitions\n schema[\"definitions\"].update(definitions)\n\n return schema", "def get_schema_structure(self) -> SchemaStructure:\n constructors: List[CombinatorData] = list(\n self._combinator_map.values()\n )\n methods: List[FunctionData] = list(\n self._function_map.values()\n )\n\n return SchemaStructure(constructors=constructors, methods=methods)", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def _CreateMessageSchema(\n self,\n descriptor: Descriptor,\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n type_name = _GetTypeName(descriptor)\n\n properties = dict()\n visiting.add(type_name)\n\n # Create schemas for the fields' types.\n for field_descriptor in descriptor.fields:\n self._CreateSchema(field_descriptor, visiting)\n field_name = casing.SnakeToCamel(field_descriptor.name)\n\n properties[field_name] = self._GetDescribedSchema(field_descriptor)\n\n visiting.remove(type_name)\n\n self.schema_objs[type_name] = cast(MessageSchema, {\n \"type\": \"object\",\n \"properties\": properties,\n })", "def getSchema(cls):\n pass", "def __init__(self, schema=None):\n self._dict = {}\n self.schema = schema", "def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass", "def from_schema(cls, tag, schema):\n cls.tag = tag\n cls.schema = schema\n cls._parser = generate_parser(tag, schema)\n return cls", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None", "def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None", "def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def __init__(self, *args):\n _snap.Schema_swiginit(self, _snap.new_Schema(*args))", "def __init__(self, raw_field: Dict):\n self.name = raw_field.get(\"name\")\n self.description = raw_field.get(\"description\")\n self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get(\"args\", []))\n self.type: TypeDefer = TypeDefer(raw_field.get(\"type\")) if raw_field.get(\"type\") is not None else None\n self.is_deprecated: bool = raw_field.get(\"isDeprecated\")\n self.deprecation_reason: str = raw_field.get(\"deprecationReason\")", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'error_entity': 'DomainEntityRef',\n 'related_entity': 'DomainEntityRef',\n 'timestamp': 'datetime',\n 'level': 'str',\n 'category': 'str',\n 'correlation_id': 'str',\n 'event_message': 'EventMessage',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'error_entity': 'errorEntity',\n 'related_entity': 'relatedEntity',\n 'timestamp': 'timestamp',\n 'level': 'level',\n 'category': 'category',\n 'correlation_id': 'correlationId',\n 'event_message': 'eventMessage',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._error_entity = None\n self._related_entity = None\n self._timestamp = None\n self._level = None\n self._category = None\n self._correlation_id = None\n self._event_message = None\n self._self_uri = None", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema", "def schema(self):\n # TODO The schema of a container resource...\n # This is the same as the leaf.\n # However, this isn't actually the schema of the response\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }", "def schema(self):\n if not self._schema:\n # Inherit context from parent.\n context = getattr(self.parent, \"context\", {})\n if callable(self.nested) and not isinstance(self.nested, type):\n nested = self.nested()\n else:\n nested = self.nested\n if isinstance(nested, dict):\n # defer the import of `marshmallow.schema` to avoid circular imports\n from marshmallow.schema import Schema\n\n nested = Schema.from_dict(nested)\n\n if isinstance(nested, SchemaABC):\n self._schema = copy.copy(nested)\n self._schema.context.update(context)\n # Respect only and exclude passed from parent and re-initialize fields\n set_class = self._schema.set_class\n if self.only is not None:\n if self._schema.only is not None:\n original = self._schema.only\n else: # only=None -> all fields\n original = self._schema.fields.keys()\n self._schema.only = set_class(self.only) & set_class(original)\n if self.exclude:\n original = self._schema.exclude\n self._schema.exclude = set_class(self.exclude) | set_class(original)\n self._schema._init_fields()\n else:\n if isinstance(nested, type) and issubclass(nested, SchemaABC):\n schema_class = nested\n elif not isinstance(nested, (str, bytes)):\n raise ValueError(\n \"`Nested` fields must be passed a \"\n \"`Schema`, not {}.\".format(nested.__class__)\n )\n elif nested == \"self\":\n schema_class = self.root.__class__\n else:\n schema_class = class_registry.get_class(nested)\n self._schema = schema_class(\n many=self.many,\n only=self.only,\n exclude=self.exclude,\n context=context,\n load_only=self._nested_normalized_option(\"load_only\"),\n dump_only=self._nested_normalized_option(\"dump_only\"),\n )\n return self._schema", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "async def introspect(\n self, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n request = GraphQLRequest(\n query=graphql.get_introspection_query(descriptions=False),\n validate=False,\n headers=headers,\n )\n introspection = await self.query(request)\n try:\n return graphql.build_client_schema(introspection=introspection.data)\n except TypeError:\n raise GraphQLIntrospectionException(\n f\"Failed to build schema from introspection data: {introspection.errors}\"\n )", "def get_parser_context(self, http_request):\n res = super().get_parser_context(http_request)\n res['json_schema'] = self.create_payload_schema\n return res", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema", "def get_schema(self, name):\n return Schema(self, name)", "def read(self):\n schema_object = self.get_schema_object()\n if self.id is not None:\n self.response = self.request('GET', self.read_endpoint + \"/\"\n + str(self.id), \"\")\n\n else:\n self.response = self.request('GET', self.read_endpoint, \"\")\n self.log.debug(self.response.status)\n payload_schema = self.response.read()\n if payload_schema != None and payload_schema != \"\":\n schema_object.set_data(payload_schema, self.accept_type)\n else:\n return None\n return schema_object", "def __init__(self):\n self.swagger_types = {\n 'annotations': 'dict(str, str)',\n 'end_time': 'int',\n 'hosts': 'list[str]',\n 'is_ephemeral': 'bool',\n 'is_user_event': 'bool',\n 'name': 'str',\n 'start_time': 'int',\n 'summarized_events': 'int',\n 'table': 'str',\n 'tags': 'list[str]'\n }\n\n self.attribute_map = {\n 'annotations': 'annotations',\n 'end_time': 'endTime',\n 'hosts': 'hosts',\n 'is_ephemeral': 'isEphemeral',\n 'is_user_event': 'isUserEvent',\n 'name': 'name',\n 'start_time': 'startTime',\n 'summarized_events': 'summarizedEvents',\n 'table': 'table',\n 'tags': 'tags'\n }\n\n self._annotations = None\n self._end_time = None\n self._hosts = None\n self._is_ephemeral = False\n self._is_user_event = False\n self._name = None\n self._start_time = None\n self._summarized_events = None\n self._table = None\n self._tags = None", "def get_schema(cls):\n return cls.schema()", "def __new__(cls, **kwargs):\n # Call up to allocate the new instance:\n try:\n instance = super(Schema, cls).__new__(cls, **kwargs)\n except TypeError:\n instance = super(Schema, cls).__new__(cls)\n \n # Create the “__fields__” attribute and retrieve the class-based\n # field indexes, “__field_names__” and “__field_index__”:\n instance.__fields__ = Flat()\n field_names, field_index = pyattrs(cls, 'field_names',\n 'field_index')\n \n # Set each of the field-default values through a call to\n # the underlying descriptor instances’ “get_default()” method:\n for field, nsfield in zip(field_names, field_index):\n instance.__fields__[nsfield] = stattr(instance, field).get_default()\n \n # Override defaults with any instance-specific values,\n # as specfied through keywords:\n for key, value in kwargs.items():\n if key in field_names:\n setattr(instance, key, value)\n \n for namespace in instance.__fields__.namespaces():\n if namespace in field_names:\n setattr(instance, namespace, field_names[namespace])\n \n # Return the new instance:\n return instance", "def load(self, request, resource=None, **kwargs):\n schema = self.get_schema(request, resource=resource, **kwargs)\n data = yield from self.parse(request)\n resource, errors = schema.load(data, partial=resource is not None)\n if errors:\n raise RESTBadRequest(reason='Bad request', json={'errors': errors})\n return resource", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'identifier': 'int',\n 'success': 'bool',\n 'description': 'str',\n 'duration': 'float',\n 'bag_name': 'str',\n 'bag_store_name': 'str',\n 'results': 'object',\n 'bag': 'BagSummary'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'identifier': 'identifier',\n 'success': 'success',\n 'description': 'description',\n 'duration': 'duration',\n 'bag_name': 'bag_name',\n 'bag_store_name': 'bag_store_name',\n 'results': 'results',\n 'bag': 'bag'\n }\n\n self._detail_type = None\n self._identifier = None\n self._success = None\n self._description = None\n self._duration = None\n self._bag_name = None\n self._bag_store_name = None\n self._results = None\n self._bag = None", "def schema(self) -> graphql.GraphQLSchema:\n return self._schema", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'device_token': 'str',\n 'notification_id': 'str',\n 'make': 'str',\n 'model': 'str',\n 'accept_notifications': 'bool',\n 'type': 'str',\n 'session_hash': 'str',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'device_token': 'deviceToken',\n 'notification_id': 'notificationId',\n 'make': 'make',\n 'model': 'model',\n 'accept_notifications': 'acceptNotifications',\n 'type': 'type',\n 'session_hash': 'sessionHash',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._device_token = None\n self._notification_id = None\n self._make = None\n self._model = None\n self._accept_notifications = None\n self._type = None\n self._session_hash = None\n self._self_uri = None", "def build_active_schema(cls, attrs):\n schema = cls()\n schema.add_attributes(attrs)\n return schema", "def __init__(self, schema ):\n self.schema = schema", "def _get_schema_from_object(self, data):\n if \"items\" in data:\n return self._get_schema_from_object(data[\"items\"])\n\n url_key = None\n\n if '$id' in data:\n url_key = '$id'\n\n if 'id' in data:\n url_key = 'id'\n\n if url_key:\n url = data[url_key]\n schema = Schema().build()\n schema.domain_entity = self.get_domain_entity_from_url(url)\n schema.high_level_entity = self.get_high_level_entity_from_url(url)\n schema.module = self.get_module_from_url(url)\n schema.url = url\n return schema\n\n return None", "def __init__(self, raw): #pylint: disable=super-init-not-called\n self.raw = raw\n try:\n if not isinstance(raw, dict):\n raw = self.decoder(raw) #pylint: disable=not-callable,too-many-function-args\n self.decoded = self.schema(raw) #pylint: disable=not-callable,too-many-function-args\n except voluptuous.Error:\n log.exception('Api response failed Entity schema validation: %s', self.raw)\n exc_info = sys.exc_info()\n raise InvalidEntity, exc_info[1], exc_info[2]\n except Exception:\n log.exception('Failed to parse api response using decoder %s: %s', self.decoder.__name__, self.raw)\n exc_info = sys.exc_info()\n raise MalformedEntity, exc_info[1], exc_info[2]\n if isinstance(self.schema.schema, dict):\n for key in self.schema.schema.iterkeys():\n setattr(self, str(key), self.decoded.get(key))", "def registration_schema(self, ctx):\n schema = RegistrationSchema()\n schema.context['ctx'] = ctx\n return schema", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()", "def to_json_schema(cls):\n return parsers.to_json_schema(cls)", "def load_request_data(schema):\n\n def wrapper(func):\n @wraps(func)\n def decorated_func(*args, **kwargs):\n data = schema().load(request.get_json())\n return func(data=data, *args, **kwargs)\n\n return decorated_func\n\n return wrapper", "def __init__(\n self,\n graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],\n dsl_schema: DSLSchema,\n ):\n self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type\n self._dsl_schema = dsl_schema\n log.debug(f\"Creating {self!r})\")", "def load_schema(path, collection, readonly):\n return JSONStorage(path, collection, readonly)", "def create_schemas():\n\n # TEXT: the field is indexed, analyzed. By default it is not stored.\n # phrase=False does not allow to search for phrases.\n # sortable=True allows to sort the indexed values\n # ID: the file is indexed, without being analyzed.\n # STORED: the file is saved but not indexed.\n\n pub_schema = Schema(\n pubtype=TEXT(stored=True),\n key=STORED,\n author=TEXT(stored=True),\n title=TEXT(stored=True),\n pages=STORED,\n year=TEXT(stored=True),\n journal=STORED,\n volume=STORED,\n number=STORED,\n url=STORED,\n ee=STORED,\n crossref=ID(stored=True),\n )\n\n ven_schema = Schema(\n pubtype=STORED,\n key=ID(stored=True),\n author=STORED,\n title=TEXT(stored=True),\n journal=STORED,\n publisher=TEXT(stored=True),\n url=STORED,\n ee=STORED,\n year=STORED,\n isbn=STORED,\n )\n\n return pub_schema, ven_schema", "def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)", "def __init__(self, name):\n super(SchemaStub, self).__init__()\n self.model = SchemaStub._ModelStub()\n self.name = name", "def load_validation_schema(self) -> t.Dict[str, t.Any]:\n if self._schema is None:\n try:\n self._schema = json.loads(self.schema())\n except KeyError:\n device_type_striped = self._device_type.lower().rstrip(string.digits)\n with open(_CT_FILES[device_type_striped], encoding=\"utf-8\") as file_:\n self._schema = json.load(file_)\n return self._schema # type: ignore", "def get_schema(self) -> dict:", "def schema(self, name):\n return model.Schema(self, name)", "def __new__(cls,name,description,args_in,required=True,data_type=None,schema=None):\n mydict={\n \"name\":name,\n \"description\":description,\n \"in\":args_in,\n \"required\":required,\n \"schema\":schema,\n \"type\":data_type,\n }\n if args_in!=\"body\":\n mydict[\"type\"]=data_type\n return mydict", "def setup_schema(command, conf, vars):\n import ming\n import allura\n\n # turbogears has its own special magic wired up for its globals, can't use a regular Registry\n tgl = RequestLocals()\n tgl.tmpl_context = EmptyClass()\n tgl.app_globals = config['tg.app_globals']\n tg.request_local.context._push_object(tgl)\n\n REGISTRY.prepare()\n REGISTRY.register(allura.credentials, allura.lib.security.Credentials())\n\n configure_ming(conf)\n if asbool(conf.get('activitystream.recording.enabled', False)):\n activitystream.configure(**h.convert_bools(conf, prefix='activitystream.'))\n # Nothing to do\n log.info('setup_schema called')", "def generate_url_schema():\n json_str = json.dumps({'fields': [\n {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def get_schema(cls):\n sa_question = schema_fields.FieldRegistry(\n 'Short Answer Question',\n description='short answer question',\n extra_schema_dict_values={'className': 'sa-container'})\n\n sa_question.add_property(schema_fields.SchemaField(\n 'version', '', 'string', optional=True, hidden=True))\n sa_question.add_property(schema_fields.SchemaField(\n 'question', 'Question', 'html', optional=True,\n extra_schema_dict_values={'className': 'sa-question'}))\n sa_question.add_property(schema_fields.SchemaField(\n 'description', 'Description', 'string', optional=True,\n extra_schema_dict_values={'className': 'sa-description'},\n description=messages.QUESTION_DESCRIPTION))\n sa_question.add_property(schema_fields.SchemaField(\n 'hint', 'Hint', 'html', optional=True,\n extra_schema_dict_values={'className': 'sa-hint'}))\n sa_question.add_property(schema_fields.SchemaField(\n 'defaultFeedback', 'Feedback', 'html', optional=True,\n extra_schema_dict_values={'className': 'sa-feedback'},\n description=messages.INCORRECT_ANSWER_FEEDBACK))\n\n sa_question.add_property(schema_fields.SchemaField(\n 'rows', 'Rows', 'string', optional=True,\n extra_schema_dict_values={\n 'className': 'sa-rows',\n 'value': SaQuestionConstants.DEFAULT_HEIGHT_ROWS\n },\n description=messages.INPUT_FIELD_HEIGHT_DESCRIPTION))\n sa_question.add_property(schema_fields.SchemaField(\n 'columns', 'Columns', 'string', optional=True,\n extra_schema_dict_values={\n 'className': 'sa-columns',\n 'value': SaQuestionConstants.DEFAULT_WIDTH_COLUMNS\n },\n description=messages.INPUT_FIELD_WIDTH_DESCRIPTION))\n\n grader_type = schema_fields.FieldRegistry(\n 'Answer',\n extra_schema_dict_values={'className': 'sa-grader'})\n grader_type.add_property(schema_fields.SchemaField(\n 'score', 'Score', 'string', optional=True,\n extra_schema_dict_values={'className': 'sa-grader-score'}))\n grader_type.add_property(schema_fields.SchemaField(\n 'matcher', 'Grading', 'string', optional=True,\n select_data=cls.GRADER_TYPES,\n extra_schema_dict_values={'className': 'sa-grader-score'}))\n grader_type.add_property(schema_fields.SchemaField(\n 'response', 'Response', 'string', optional=True,\n extra_schema_dict_values={'className': 'sa-grader-text'}))\n grader_type.add_property(schema_fields.SchemaField(\n 'feedback', 'Feedback', 'html', optional=True,\n extra_schema_dict_values={'className': 'sa-grader-feedback'}))\n\n graders_array = schema_fields.FieldArray(\n 'graders', '', item_type=grader_type,\n extra_schema_dict_values={\n 'className': 'sa-grader-container',\n 'listAddLabel': 'Add an answer',\n 'listRemoveLabel': 'Delete this answer'})\n\n sa_question.add_property(graders_array)\n\n return sa_question", "def get_schema() -> Dict[str, Any]:\n\n # Schema for Dataset\n data_schema = {\n \"id\": merge(tstring, required),\n \"name\": merge(tstring, nullable, default(None)),\n \"validation_ratio\": merge(tfloat, default(0.0005)),\n \"num_proc\": merge(tinteger, default(64)),\n \"eval_num_proc\": merge(tinteger, default(4)),\n }\n\n # Schema for Model\n model_schema = {\n \"id\": merge(tstring, required),\n \"gradient_checkpointing\": merge(tboolean, default(False)),\n \"gc_checkpoint_every\": merge(tinteger, default(-1)),\n \"pretrained_tokenizer\": merge(tboolean, default(True)),\n \"seq_len\": merge(tinteger, default(1024)),\n \"reorder_attn\": merge(tboolean, default(True)),\n \"upcast_attn\": merge(tboolean, default(True)),\n \"initial_weights\": merge(tstring, nullable, default(None)),\n \"config_path\": merge(tstring, nullable, default(None)),\n }\n\n # Schema for Huggingface Trainer and Training Arguments\n trainer_schema = {\n \"output_dir\": merge(tstring, nullable, default(None)),\n \"do_train\": merge(tboolean, default(True)),\n \"evaluation_strategy\": merge(tstring, default(\"steps\")),\n \"per_device_train_batch_size\": merge(tinteger, default(2)),\n \"per_device_eval_batch_size\": merge(tinteger, default(8)),\n \"gradient_accumulation_steps\": merge(tinteger, default(1)),\n \"prediction_loss_only\": merge(tboolean, default(True)),\n \"learning_rate\": merge(tfloat, default(5.0e-5)),\n \"weight_decay\": merge(tfloat, default(0.01)),\n \"adam_beta1\": merge(tfloat, default(0.9)),\n \"adam_beta2\": merge(tfloat, default(0.999)),\n \"adam_epsilon\": merge(tfloat, default(1.0e-8)),\n \"max_grad_norm\": merge(tfloat, default(1.0)),\n \"max_steps\": merge(tinteger, default(-1)),\n \"lr_scheduler_type\": merge(tstring, default(\"cosine\")),\n \"warmup_steps\": merge(tinteger, default(1000)),\n \"run_name\": merge(tstring, nullable, default(None)),\n \"logging_dir\": merge(tstring, default(\"logs\")),\n \"logging_first_step\": merge(tboolean, default(True)),\n \"logging_steps\": merge(tinteger, default(100)),\n \"eval_steps\": merge(tinteger, default(1000)),\n \"save_steps\": merge(tinteger, default(1000)),\n \"ignore_data_skip\": merge(tboolean, default(False)),\n \"seed\": merge(tinteger, default(42)),\n \"fp16\": merge(tboolean, default(True)),\n \"fp16_backend\": merge(tstring, default(\"auto\")),\n \"sharded_ddp\": merge(tstring, nullable, default(None)),\n \"deepspeed\": merge(tstring, nullable, default(None)),\n \"dataloader_num_workers\": merge(tinteger, default(4)),\n \"local_rank\": merge(tinteger, nullable, default(None)),\n }\n\n # Schema for Online Custom Evaluation Datasets (e.g. LAMBADA)\n online_eval_schema = {\n \"do_wikitext\": merge(tboolean, default(True)),\n \"do_lambada\": merge(tboolean, default(True)),\n \"stride\": merge(tinteger, default(512)),\n }\n\n # Schema for Storing Training and Data Artifacts\n artifacts_schema = {\n \"cache_dir\": merge(tstring, default(\"/u/scr/nlp/mercury/mistral/artifacts\")),\n \"run_dir\": merge(tstring, default(\"/u/scr/nlp/mercury/mistral/runs\")),\n }\n\n # Combined Schema for `train.py`\n mistral_schema = {\n \"dataset\": stdict(data_schema),\n \"model\": stdict(model_schema),\n \"training_arguments\": stdict(trainer_schema),\n \"online_eval\": stdict(online_eval_schema),\n \"artifacts\": stdict(artifacts_schema),\n \"effective_bsz\": merge(tinteger, default(512)),\n \"resume\": merge(tboolean, default(False)),\n \"resume_checkpoint\": merge(tstring, nullable, default(None)),\n \"checkpoint_frequency\": merge(merge(tlist, schema(merge(tlist, schema(tinteger)))), nullable, default(None)),\n \"log_level\": merge(tinteger, default(20)),\n \"run_id\": merge(tstring, nullable, default(None)),\n \"wandb\": merge(tstring, nullable, default(None)),\n \"group\": merge(tstring, nullable, default(None)),\n \"seed\": merge(tinteger, default(42)),\n \"run_training\": merge(tboolean, default(True)),\n \"run_final_eval\": merge(tboolean, default(True)),\n \"use_gpu\": merge(tboolean, default(True)),\n # Infra Params - Passed in from `torch.distributed`\n \"local_rank\": merge(tinteger, default(-1)),\n \"nnodes\": merge(tinteger, default(-1)),\n \"nproc_per_node\": merge(tinteger, default(-1)),\n # Infra Params - Passed in from DeepSpeed\n \"num_gpus\": merge(tinteger, default(-1)),\n \"num_nodes\": merge(tinteger, default(-1)),\n \"world_size\": merge(tinteger, default(-1)),\n }\n\n return mistral_schema", "async def create_block_schema(self, block_schema: BlockSchemaCreate) -> BlockSchema:\n try:\n response = await self._client.post(\n \"/block_schemas/\",\n json=block_schema.dict(\n json_compatible=True,\n exclude_unset=True,\n exclude={\"id\", \"block_type\", \"checksum\"},\n ),\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_409_CONFLICT:\n raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e\n else:\n raise\n return BlockSchema.parse_obj(response.json())", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'ticket_id': 'str',\n 'type': 'str',\n 'from_number': 'str',\n 'from_name': 'str',\n 'to_number': 'str',\n 'to_name': 'str',\n 'via_number': 'str',\n 'date_created': 'datetime',\n 'date_answered': 'datetime',\n 'date_finished': 'datetime'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'ticket_id': 'ticketId',\n 'type': 'type',\n 'from_number': 'fromNumber',\n 'from_name': 'fromName',\n 'to_number': 'toNumber',\n 'to_name': 'toName',\n 'via_number': 'viaNumber',\n 'date_created': 'dateCreated',\n 'date_answered': 'dateAnswered',\n 'date_finished': 'dateFinished'\n }\n\n self._id = None\n self._ticket_id = None\n self._type = None\n self._from_number = None\n self._from_name = None\n self._to_number = None\n self._to_name = None\n self._via_number = None\n self._date_created = None\n self._date_answered = None\n self._date_finished = None", "def schema(cls):\n return Schema.get_instance(cls)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def create_schema(client):\n base = WOQLQuery().doctype(\"EphemeralEntity\").label(\"Ephemeral Entity\").description(\"An entity that has a lifespan\")\n base.property(\"lifespan_start\", \"dateTime\").label(\"Existed From\")\n base.property(\"lifespan_end\", \"dateTime\").label(\"Existed To\")\n \n country = WOQLQuery().add_class(\"Country\").label(\"Country\").description(\"A nation state\").parent(\"EphemeralEntity\")\n country.property(\"iso_code\", \"string\").label(\"ISO Code\")\n country.property(\"fip_code\", \"string\").label(\"FIP Code\") \n\n airline = WOQLQuery().add_class(\"Airline\").label(\"Airline\").description(\"An operator of airplane flights\").parent(\"EphemeralEntity\")\n airline.property(\"registered_in\", \"Country\").label(\"Registered In\"),\n \n airport = WOQLQuery().add_class(\"Airport\").label(\"Airport\").description(\"An airport where flights terminate\").parent(\"EphemeralEntity\")\n airport.property(\"situated_in\", \"Country\").label(\"Situated In\"),\n \n flight = WOQLQuery().add_class(\"Flight\").label(\"Flight\").description(\"A flight between airports\").parent(\"EphemeralEntity\")\n flight.property(\"departs\", \"Airport\").label(\"Departs\")\n flight.property(\"arrives\", \"Airport\").label(\"Arrives\")\n flight .property(\"operated_by\", \"Airline\").label(\"Operated By\") \n\n schema = WOQLQuery().when(True).woql_and(base, country, airline, airport, flight)\n return schema.execute(client)", "def __init__(self, instance=None):\n self.instance = instance\n self.schema = None\n if self.instance:\n self.schema = surveys.SurveySchema(self.instance.survey)", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def from_schema(cls, schema, *args, **kwargs):\r\n\r\n return cls(schema.get(u\"id\", u\"\"), schema, *args, **kwargs)", "def __init__(self):\n\t\tself.parsed = False\n\t\tdir_path = os.path.dirname(os.path.realpath(__file__))\n\t\tself.xsdfilename = os.path.join(dir_path, 'xml', 'schema.xsd')\n\t\tself.schema = 'schema.xsd'\n\t\tself.predictors = []\n\t\tself.predictors_types = []\n\t\tself.preprocessing_methods = []", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def get_schema() -> dict:\n raise NotImplementedError()", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def __init__(self):\n self.swagger_types = {\n 'discovery': 'Discovery',\n 'groups': 'list[str]',\n 'labels': 'object'\n }\n\n self.attribute_map = {\n 'discovery': 'discovery',\n 'groups': 'groups',\n 'labels': 'labels'\n }\n\n self._discovery = None\n self._groups = None\n self._labels = None", "def schema(self, schema, in_='formData'):\n parameters = core.parameters_from_object_schema(schema, in_=in_)\n return compose(*map(self.parameter, parameters))", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def resolve_schema_in_request_body(self, request_body):\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def schema_helper(self, name, _, schema=None, **kwargs):\n if schema is None:\n return None\n\n schema_instance = resolve_schema_instance(schema)\n\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n\n return json_schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def to_model(cls, result, additional_opts=None):\n opts = {}\n opts.update(cls.get_schema_opts())\n if additional_opts is not None:\n opts.update(additional_opts)\n schema = cls.Schema(**opts)\n model, errors = schema.load(result)\n return model", "def schema() -> None:\n pass", "def _Dynamic_GetSchema(self, req, schema, request_id=None):\n # This is not used, but it is required for the method signature.\n del request_id\n\n app_str = req.app()\n self.__ValidateAppId(app_str)\n schema.set_more_results(False)", "async def get_schema(request: Request, namespace: str, project: str):\n # endpoint to schema.databio.org/...\n # like pipelines/ProseqPEP.yaml\n\n try:\n schema = eido.read_schema(\n f\"https://schema.databio.org/{namespace}/{project}.yaml\"\n )[0]\n except IndexError:\n raise HTTPException(status_code=404, detail=\"Schema not found\")\n\n return schema", "def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def build_song_schema():\n schema = T.StructType(\n [\n T.StructField('artist_id', T.StringType(), True),\n T.StructField('artist_latitude', T.DecimalType(), True),\n T.StructField('artist_longitude', T.DecimalType(), True),\n T.StructField('artist_location', T.StringType(), True),\n T.StructField('artist_name', T.StringType(), True),\n T.StructField('duration', T.DecimalType(), True),\n T.StructField('num_songs', T.IntegerType(), True),\n T.StructField('song_id', T.StringType(), True),\n T.StructField('title', T.StringType(), True),\n T.StructField('year', T.IntegerType(), True)\n ]\n )\n return schema", "def __init__(self):\n self.swagger_types = {\n 'status': 'str',\n 'download_url': 'str',\n 'download_id': 'str',\n 'message': 'str',\n 'number_of_pages': 'int',\n 'validation_errors': 'str'\n }\n\n self.attribute_map = {\n 'status': 'status',\n 'download_url': 'download_url',\n 'download_id': 'download_id',\n 'message': 'message',\n 'number_of_pages': 'number_of_pages',\n 'validation_errors': 'validation_errors'\n }\n\n self._status = None\n self._download_url = None\n self._download_id = None\n self._message = None\n self._number_of_pages = None\n self._validation_errors = None" ]
[ "0.62221414", "0.6016679", "0.5968359", "0.5946103", "0.5910788", "0.59025574", "0.5863402", "0.58183753", "0.5814539", "0.57973915", "0.57765687", "0.5719145", "0.57047343", "0.5694497", "0.565712", "0.56565285", "0.56380844", "0.56073123", "0.55723673", "0.5570385", "0.55048895", "0.54854393", "0.5479786", "0.54777753", "0.54689896", "0.5460747", "0.5459226", "0.5449624", "0.5425599", "0.5399636", "0.5393008", "0.5391334", "0.53851616", "0.53758514", "0.53626174", "0.5355976", "0.5323404", "0.5316405", "0.5315464", "0.5312455", "0.530457", "0.530187", "0.52944905", "0.5285919", "0.528041", "0.5272388", "0.5265292", "0.52571213", "0.52503395", "0.52426374", "0.5234752", "0.5229711", "0.51956004", "0.5187567", "0.51860374", "0.5183247", "0.5180811", "0.51795727", "0.51736796", "0.5171795", "0.51575273", "0.51401573", "0.5135389", "0.51301104", "0.5127689", "0.5127253", "0.5118966", "0.5117194", "0.51169753", "0.5097252", "0.50809485", "0.50745124", "0.50683784", "0.50661474", "0.50637245", "0.5062369", "0.5047935", "0.5042027", "0.50291425", "0.50195616", "0.50158125", "0.50073344", "0.49974093", "0.4989746", "0.4986609", "0.49821594", "0.49798182", "0.49687216", "0.49685133", "0.49643275", "0.49588346", "0.4957385", "0.49568987", "0.4954345", "0.4950904", "0.4950663", "0.4949728", "0.49385536", "0.49333733", "0.4932597" ]
0.67564994
0
Make a synchronous request to the endpoint and return the response as json.
def introspect_schema(cls, endpoint: str, transport: Transporter) -> Dict: return request_schema(endpoint, transport.session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json", "def request_json(self, endpoint, method='get', **kwargs):\n request = self.request(endpoint, method=method, **kwargs)\n if not request.status_code == 200:\n try:\n raise RuntimeError(\"Server responded with HTTP response code {}, with content: {}.\".format(request.status_code, json.dumps(request.json())))\n except:\n raise RuntimeError(\"Server responded with HTTP response code {}, with content: {}.\".format(request.status_code, request.content.decode('utf-8')))\n return request.json()", "def req(url):\n headers = {'Accept': 'application/json'}\n timeout = 10\n r = requests.get(url, headers=headers, timeout=timeout)\n response_json = r.text\n return response_json", "def make_request(self, url):\n try:\n response = requests.get(url)\n if response.status_code != 200:\n return None\n return response.json()\n except requests.ConnectionError:\n return None", "def _call(self, method, url, params):\n if not url.startswith('http'):\n url = self.root + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n r = self._session.request(method, url,\n headers=headers,\n proxies=self.proxies,\n params=params,\n timeout=self.requests_timeout)\n r.raise_for_status() # Check for error\n return r.json()", "def api_call(endpoint, params, headers):\n\n api_response = get(BASE_URL.format(endpoint=endpoint), params=params,\n headers=headers)\n\n api_response.raise_for_status()\n json_resp = api_response.json()\n\n api_response.close()\n return json_resp", "async def _request(self, method_name: str, params: dict) -> dict:\n url = self.api_url + method_name\n\n async with self._session() as sess:\n async with sess.get(url, params=params) as res:\n return await res.json()", "async def _get_request(self, url):\n # Request the specific URL\n async with self.session.get(url, headers=self.headers) as resp:\n # Finally return the response\n return await resp.json()", "def json_api_call(url):\n response = requests.get(url)\n return response.json()", "def request(self):\n cookies = {\n \"session\": self.session_cookie\n }\n url = self.get_request_url()\n\n r = requests.get(url, cookies=cookies)\n return r.json()", "def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None", "def make_request(url, params):\n response = requests.get(url, params=params)\n return json.loads(response.text)", "def getrequest_json(self, url, params, headers=None):\n if headers is None:\n headers = {}\n full_url = \"%s/%s\" % (self.connection_url, url)\n response = self.request.get(full_url, headers=headers, json=params)\n return response", "async def _async_request(\n self, method: str, endpoint: str, **kwargs: Dict[str, Any]\n ) -> Union[Dict[str, Any], List[str]]:\n url = f\"http://{self._ip_address}/{endpoint}\"\n\n use_running_session = self._session and not self._session.closed\n if use_running_session:\n session = self._session\n else:\n session = ClientSession(timeout=ClientTimeout(total=DEFAULT_TIMEOUT))\n\n assert session\n\n data: Dict[str, Any] = {}\n\n try:\n async with session.request(method, url, **kwargs) as resp:\n data = await resp.json()\n resp.raise_for_status()\n except (ClientError, json.decoder.JSONDecodeError) as err:\n raise RequestError(f\"Error while requesting {url}: {err}\") from err\n finally:\n if not use_running_session:\n await session.close()\n\n LOGGER.debug(\"Received data for %s: %s\", url, data)\n\n return data", "def _make_request(self):\n response = urllib2.urlopen(\n url=self.api_url,\n data=self._get_request_data()\n )\n content = response.read()\n return json.loads(content.decode('utf8'))", "def get_json(self, url, *, timeout, headers):", "async def _request(\n self, req_method: str, endpoint: str, extra_query: QueryDict = None, json=True\n ) -> Union[Dict, bytes]:\n if req_method not in (\"GET\", \"POST\"):\n raise APIError(f\"{req_method} not a known request method!\")\n\n url = await self._create_url(endpoint, extra_query=extra_query)\n\n async with aiohttp.ClientSession() as session:\n\n session_methods = {\"GET\": session.get, \"POST\": session.post}\n\n async with session_methods[req_method](url) as resp:\n self.logger.debug(\"got response: %s\", resp)\n\n if resp.status == 200:\n if json:\n data = await resp.json()\n self.logger.debug(\"got json: %s\", data)\n if data[\"subsonic-response\"][\"status\"] == \"failed\":\n raise APIError(\n data[\"subsonic-response\"][\"error\"][\"message\"]\n )\n return data\n\n data = await resp.read()\n return data\n\n raise APIError(f\"got status code {resp.status}!\")", "def get_whole_response_as_json(url, session=None):\n req = session or requests\n response = req.get(url, headers=get_headers())\n response.raise_for_status()\n if response.status_code == requests.codes.no_content:\n raise NoContent(\"204 No Content\")\n elif response.status_code == requests.codes.accepted:\n raise Accepted(\"202 Accepted. No cached data. Retry.\")\n return response.json()", "def get_json(self, url, params, timeout=5, retries=3, data=None):\n return self.request('GET', url, params, timeout=timeout, retries=retries, data=data)", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "async def async_query_api(self, endpoint, payload=None):\n async with RetryClient() as client:\n # The Eskom API occasionally drops incoming connections, implement reies\n async with client.get(\n url=self.base_url + endpoint,\n headers=self.headers,\n params=payload,\n ssl=self.ssl_context,\n retry_attempts=50,\n retry_exceptions={\n ClientConnectorError,\n ServerDisconnectedError,\n ConnectionError,\n OSError,\n },\n ) as res:\n return await res.json()", "async def fetch(self, session, url):\n async with session.get(url) as response:\n if response.status != 200:\n response.raise_for_status()\n response = await response.text()\n return json.loads(response)", "def request(self, endpoint, query_args={}):\n query_defaults = {\n 'clientId': settings.CSE_API_CLIENT_ID,\n 'appId': settings.CSE_API_APP_ID\n }\n\n query = dict(query_defaults.items() + query_args.items())\n result = urllib2.urlopen('%s%s?%s' % (settings.CSE_API_HOST, endpoint, urllib.urlencode(query)))\n data = result.read()\n\n return json_to_obj(data)", "async def request(\n self, method: str, path: Optional[str] = \"\", json: Optional[dict] = None\n ) -> dict:\n LOGGER.debug('Sending \"%s\" \"%s\" to \"%s %s\"', method, json, self.host, path)\n\n url = f\"http://{self.host}:{self.port}/api/{self.api_key}{path}\"\n\n try:\n async with self.session.request(method, url, json=json) as res:\n\n if res.content_type != \"application/json\":\n raise ResponseError(\n \"Invalid content type: {}\".format(res.content_type)\n )\n\n response = await res.json()\n LOGGER.debug(\"HTTP request response: %s\", pformat(response))\n\n _raise_on_error(response)\n\n return response\n\n except client_exceptions.ClientError as err:\n raise RequestError(\n \"Error requesting data from {}: {}\".format(self.host, err)\n ) from None", "async def request(\r\n self, method: str, url: str, params: dict = None, data: dict = None\r\n ):\r\n async with self._session.request(\r\n method,\r\n url,\r\n params=params,\r\n json=data,\r\n headers={\"Authorization\": \"Bearer \" + self._token},\r\n ) as resp:\r\n if resp.status == 200:\r\n return await resp.json()\r\n if resp.status in (400, 422, 429, 500):\r\n data = None\r\n try:\r\n data = await resp.json()\r\n except Exception: # pylint: disable=broad-except\r\n pass\r\n raise APIResponseError(\r\n resp.request_info,\r\n resp.history,\r\n status=resp.status,\r\n message=resp.reason,\r\n headers=resp.headers,\r\n data=data,\r\n )\r\n resp.raise_for_status()", "def call_api(self, url, method='GET', headers=None, params=None, data=None):\n r = requests.request(method=method, url=url, headers=headers, params=params, data=data)\n \n self.log.debug(f'Called endpoint {url} with result {r}')\n\n try:\n jayson = json.loads(r.text)\n return jayson\n except:\n self.log.info(f'ERROR! Text of response object: {r.text}')", "def make_request(url):\r\n\r\n req = urllib2.Request(url)\r\n response = urllib2.urlopen(req)\r\n data = json.loads(response.read())\r\n response.close()\r\n\r\n return data", "def http_request(endpoint, data, method='POST'):\n url = BASE_API + endpoint\n data['authkey'] = AUTH_KEY\n\n response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n return_error('Response JSON decoding failed due to {}'.format(str(e)))\n\n else:\n return_error('API Returned, {}:{}'.format(response.status_code, response.reason))", "def json_request(self, method, uri, body=None, params=None,\n extra_headers=None):\n if extra_headers:\n extra_headers = CaseInsensitiveDict(extra_headers)\n else:\n extra_headers = CaseInsensitiveDict()\n extra_headers['Content-Type'] = 'application/json'\n extra_headers['Accept'] = 'application/json'\n if body is not None:\n body = json.dumps(body, cls=self.JsonEncoder)\n r = self._request(method, uri, body, params, extra_headers)\n if r.status_code == 204 or len(r.content) == 0:\n return None # no data\n return r.json()", "def get_json(self, url, params=None, headers=None, timeout=10):\r\n headers = headers or self.headers\r\n try:\r\n return self.request(url=url, method='GET', params=params, extra_headers=headers, timeout=timeout).json()\r\n except ValueError:\r\n return None\r\n except requests.exceptions.ProxyError:\r\n return None\r\n except requests.RequestException as error:\r\n print(error)\r\n if self._debug:\r\n logging.exception(\r\n ''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))\r\n return None", "def request_json(url):\n return json.loads(requests.get(url).content.decode('utf-8'))", "async def send_request(self, url: str, params: dict) -> dict:\n if self.session is None:\n # Create a session if one doesn't exist\n await self.create_session()\n\n async with self.session.get(url, params=params, headers=self.headers) as resp:\n # Make sure that the response of the request\n # returns code 200. Something wrong happened if it doesn't\n if not (300 > resp.status >= 200):\n # Raise an error if the status code isn't 200\n raise ParsingError(f\"Library error parsing request from API: {str(resp.status)}\")\n\n try:\n # We attempt to return the contents\n # of the request in JSON format\n response = await resp.json()\n if resp.status >= 400:\n # This is a validation error from the API\n # Likely has to do with missing/improper params\n missing_params = list()\n for param in response[\"detail\"]:\n missing_params.append(f\"{param['msg']} - {param['loc'][0]}\")\n raise InvalidParams(f\"Impropert params in given request: {missing_params}\")\n # If that fails, simply return the contents of the request\n # without ant kind of formatting (aka just read it)\n except aiohttp.ClientResponseError:\n raise ParsingError(\"Could not return contents from the request\")\n\n # Return the respose from the request, if any\n return response", "def get(self):\n self.finish(json.dumps(self.build_response_dict()))", "async def fetch(session, url: str, params: dict = None) -> dict:\n async with session.post(url, data=params) as response:\n return await response.json(content_type=None)", "def do(self, method, **kwargs):\n d = {}\n if kwargs:\n d['data'] = kwargs\n\n res = getattr(requests, method)(\n self.uri,\n headers=self.HEADERS,\n **d\n )\n res.raise_for_status()\n return json.loads(res.content)", "async def request_api(url):\n\theaders = {\"User-Agent\": f\"Mozilla/5.0 aiotfm/{__version__}\"}\n\n\ttry:\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url, headers=headers) as resp:\n\t\t\t\treturn await resp.json()\n\texcept aiohttp.ClientError:\n\t\treturn {}", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def httpRequest(self, method, url='', data='', params={}, headers={}):\n\n headers[Connection.kHeaderContentType] = Connection.kContentJson\n\n absUrl = Connection.urljoin(self.url, url)\n result = self._getHttpSession().request(method,\n absUrl,\n data=str(data),\n params=params,\n headers=headers)\n return result", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def _do_call(cls, method, url, params={}):\n headers = {\n 'User-Agent': 'py-retain/' + __version__,\n 'content-type': 'application/json'\n }\n try:\n r = cls.request_map[method.lower()]\n except KeyError:\n raise ValueError(\"Unknow HTTP Method\")\n response = r(\n url,\n auth=(cls.app_id, cls.api_key),\n headers=headers,\n data=json.dumps(params),\n timeout=cls.timeout)\n return response.json()", "def _api_request(self, endpoint, params=None):\n \n if params:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header},\n params=params)\n else:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header})\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API call: {self.api_url}/{endpoint} | {code}\")", "def __call(method, resource, headers=None, json=None):\n result = requests.request(method, resource, headers=headers, json=json)\n\n if result:\n try:\n return result.json()\n except ValueError:\n pass\n\n _LOGGER.debug(\"Erroneous response (%s)\", result)\n return result", "def request_get(self, path, params=None):\n\tif params is None:\n\t\tparams = {}\n\t\trequest_url = self.host_url + path\n\t\ttry:\n\t\t\tresponse = self.session.get(request_url, auth=self.api_key, params=params)\n\t\texcept requests.RequestException as e:\n\t\t\traise self.DataUnavailable(\"Network exception\") from e\n\n\tif response.status_code != 200:\n\t\traise self.DataUnavailable(\n\t\t\t\"Unexpected response status (%s)\" % response.status_code\n\t\t)\n\n\treturn response.json()", "def send_request(url, params=None):\n try:\n prepped_request = requests.Request(\n \"GET\", url, params=params, auth=(USERNAME, TOKEN)\n ).prepare()\n r = fetch_url(prepped_request, session=SESSION)\n if isinstance(r, requests.Response):\n return r.json()\n except Exception as e:\n logger.warning(str(e))\n return None", "def _request_get(self, url):\n try:\n r = requests.get(url)\n except Exception:\n raise Exception('Cannot connect')\n if (r.status_code != 200):\n raise Exception('%d %s' % (r.status_code, r.text))\n if (not r.text) or (not r.text.strip()):\n raise Exception('Empty answer')\n try:\n response = json.loads(r.text)\n except Exception:\n raise Exception('Cannot parse response')\n return response", "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['gateway_api_port'].value}\"\n url = f\"{base_url}/{path_url}\"\n client = await self._http_client()\n if method == \"get\":\n if len(params) > 0:\n response = await client.get(url, params=params)\n else:\n response = await client.get(url)\n elif method == \"post\":\n response = await client.post(url, data=params)\n\n parsed_response = json.loads(await response.text())\n if response.status != 200:\n err_msg = \"\"\n if \"error\" in parsed_response:\n err_msg = f\" Message: {parsed_response['error']}\"\n raise IOError(f\"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}\")\n if \"error\" in parsed_response:\n raise Exception(f\"Error: {parsed_response['error']}\")\n\n return parsed_response", "def synchronous_call_function(function_url, request_data_body_dict, custom_header_dict = {}):\n \n logging.info(\"Sync Call FaaS function: {}\".format(function_url))\n\n headers = dict()\n\n for key, val in custom_header_dict.items():\n headers[key] = val\n \n response = requests.get(function_url, data = json.dumps(request_data_body_dict), headers = headers)\n\n logging.info(\"Sync Call Status: {}\".format(response.status_code))\n \n return response", "async def json(request):\n requester = request.headers.get('X-FORWARDED-FOR', None)\n print(\"Serving JSON requested by\", requester)\n try:\n component = request.match_info['component']\n except:\n component = None\n json_data = await data.get_data(component=component)\n return web.json_response(json_data)", "async def get(self, path, params=None, json_data=None):\n response = await self.request('GET', path, params, json_data)\n return response", "async def _request(method, url, session=None, **kwargs):\n\n loop = asyncio.get_event_loop()\n\n client = session or aiohttp.ClientSession(loop=loop)\n try:\n resp = await client.request(method, url, **kwargs)\n status = resp.status\n content = await resp.read()\n await resp.release()\n finally:\n await client.close()\n\n r = Response(status, content)\n if r.status >= 400:\n raise HTTPRequestError(r.status, r.text)\n return r", "def _get(self, url, **queryparams):\n url = urljoin(self.base_url, url)\n if len(queryparams):\n url += '?' + urlencode(queryparams)\n try:\n r = self._make_request(**dict(\n method='GET',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()", "def get_json(self, url):\n json_response = self.testapp.get(url)\n self.assertEqual(json_response.status_int, 200)\n return self._parse_json_response(json_response, expect_errors=False)", "def _doRequest(self, httpClientMethod, *args):\n try:\n resp = httpClientMethod(*args)\n return resp.json()\n except RequestException as e:\n raise checkedError(e)", "def call(self, params):\n return json.loads(self._fetch_http(self._api_url, params))", "def __exec_request(self, URL) -> Any:\n headers = {\n \"X-ELS-APIKey\": self.config['apikey'],\n \"Accept\": 'application/json'\n }\n\n request = requests.get(\n URL,\n headers=headers\n )\n self._status_code = request.status_code\n\n if request.status_code == 200:\n return json.loads(request.text, strict=False)\n else:\n return \"failed\"", "def request_json_from_url(url, params={}):\n params[\"format\"] = \"json\"\n r = requests.get(url=url, params=params, headers=get_headers())\n r.raise_for_status()\n return r.json()", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "async def _api_call(self, url, payload={}, retry=False):\n timeout = aiohttp.ClientTimeout(total=self.api_timeout)\n try:\n async with self._client_session.get(\n API_URL + url, headers=self.headers, timeout=timeout, data=payload\n ) as resp:\n if not retry and resp.status == 401:\n await self.renew_auth()\n return await self._api_call(url, payload, True)\n\n # 4xx represents unauthenticated\n if resp.status == 401 or resp.status == 403 or resp.status == 404:\n raise SenseAuthenticationException(f\"API Return Code: {resp.status}\")\n\n if resp.status != 200:\n raise SenseAPIException(f\"API Return Code: {resp.status}\")\n\n return await resp.json()\n except asyncio.TimeoutError as ex:\n # timed out\n raise SenseAPITimeoutException(\"API call timed out\") from ex", "async def get_records_from_api(url: str, session: ClientSession):\n try:\n response = await session.request(method='GET', url=url)\n response.raise_for_status()\n log.info(f\"Response status ({url}): {response.status}\")\n return await response.json()\n except HttpProcessingError as http_err:\n log.info('An error occurred during the request. Error: ', http_err)\n raise http_err\n except Exception as err:\n log.info('Unable to proceed: Error: ', err)\n raise err", "def api_call():\n\n json_str = load_input()\n output = {\n 'inputs': json_str,\n 'results': 'cool results'}\n\n return json.dumps(output), 200, {'Content-Type': 'text/plain;charset=utf-8'}", "def _request(self, endpoint, params=dict(), data=None):\n client_value = \"Python Netinfo\"\n headers = {'X-Request-Client': client_value}\n url = '/'.join([self.url, endpoint])\n kwargs = {'url': url, 'headers': headers, 'timeout': 30,\n 'params': params, 'data': data}\n response = requests.get(**kwargs)\n if response.status_code not in range(200, 299):\n raise RequestFailure(response.status_code, response.content)\n try:\n loaded = json.loads(response.content)\n except Exception as error:\n raise InvalidResponse(error)\n return loaded", "def request_routine(self, url, request_method, json_data=None):\n response_obj = requests.request(request_method,\n url=url,\n headers=self.header,\n data=json.dumps(json_data),\n verify=self.verify)\n\n LOG.debug('JovianDSS: Response code: %s', response_obj.status_code)\n LOG.debug('JovianDSS: Response data: %s', response_obj.text)\n\n ret = dict()\n ret['code'] = response_obj.status_code\n\n if '{' in response_obj.text and '}' in response_obj.text:\n if \"error\" in response_obj.text:\n ret[\"error\"] = json.loads(response_obj.text)[\"error\"]\n else:\n ret[\"error\"] = None\n if \"data\" in response_obj.text:\n ret[\"data\"] = json.loads(response_obj.text)[\"data\"]\n else:\n ret[\"data\"] = None\n\n return ret", "async def async_get(\n self, endpoint: str | None = None, entry: str | None = None\n ) -> Any:\n url = f\"{self.url}/api/\"\n if endpoint:\n url = self.endpoints[endpoint]\n if entry:\n url = f\"{url}{entry}\"\n with async_timeout.timeout(10):\n resp = await self.session.get(\n url=url,\n headers=self.headers,\n raise_for_status=True,\n )\n\n return await resp.json()", "def _request(self, endpoint, params=dict(), data=None):\n client_value = \"pyGreyNoise v%s\" % (str(self.CLIENT_VERSION))\n headers = {'X-Request-Client': 'pyGreyNoise', 'key': self.api_key}\n url = '/'.join([self.BASE_URL, self.API_VERSION, endpoint])\n self._log.debug('Requesting: %s', url)\n response = requests.get(url, headers=headers, timeout=7, params=params,\n data=data)\n if response.status_code not in range(200, 299):\n raise RequestFailure(response.status_code, response.content)\n try:\n loaded = json.loads(response.content)\n except Exception as error:\n raise InvalidResponse(error)\n return loaded", "def _fetch_json(self, url, payload):\n params = {\n 'data': json.dumps(payload),\n 'headers': {'content-type': 'application/json'},\n 'params': {'sid': self.sma_sid} if self.sma_sid else None,\n }\n for _ in range(3):\n try:\n with async_timeout.timeout(3):\n res = yield from self._aio_session.post(\n self._url + url, **params)\n return (yield from res.json()) or {}\n except asyncio.TimeoutError:\n continue\n return {'err': \"Could not connect to SMA at {} (timeout)\"\n .format(self._url)}", "def getJson(self,url):\n r = req.get(str(url),\"GET\")\n jsonResponse = json.loads(r.text)\n return jsonResponse", "def send_api_request(self, url, **kwargs):\n\n params = self._params.copy()\n dct = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}\n params.update(dct)\n\n res = requests.get(url, params=params)\n if res.status_code != 200:\n try:\n error = res.json()['error']\n except ValueError:\n error = None\n raise SwrveApiException(error, res.status_code, url, params)\n\n return res.json()", "def _get_json(self, url: str) -> dict:\n r = self._req_get(url)\n return r.json() if r else None", "def fetch_json(uri):\n data = requests.get(uri)\n # Raise an exception if the fetch failed.\n data.raise_for_status()\n return data.json()", "def _get(url, *, verbose=False): \n r = get_from_api(url, verbose=verbose)\n return json.loads(r.content)", "def _get_json_response(self, url, data, headers):\n if data:\n data = json.dumps(data)\n req = urllib2.Request(url, data, headers)\n response = urllib2.urlopen(req)\n raw_response = response.read()\n return raw_response", "def make_get_request(client, endpoint):\n return client.get(endpoint)", "def auth_getrequest_json(self, url, token, params):\n headers = {\n \"Authorization\": token,\n \"Content-Type\": \"application/json\"\n }\n\n response = self.getrequest_json(url, params, headers)\n return response", "def call(self):\n\n self.url = self._prepare_url()\n status_code, response = self._do_request(self.url)\n return self._process_response(status_code, response)", "async def request(self) -> Any:\n raise NotImplementedError()", "def get_json(self, *args, **kwargs):\r\n resp = self.request_with_auth(\"get\", *args, **kwargs)\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp[\"Content-Type\"].startswith(\"application/json\"))\r\n return json.loads(resp.content)", "def get_request(query_url):\n\n stream = urlopen(query_url)\n result = json.loads(stream.read().decode())\n return result", "def get_response(request_url):\n response = requests.get(request_url)\n return json.loads(response.text)", "def Access_URL(url): \n r = requests.get(url) \n json = r.json() \n return json", "def api_request(method, url, **kwargs):\n if not settings.BLOCKSTORE_API_AUTH_TOKEN:\n raise ImproperlyConfigured(\"Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.\")\n kwargs.setdefault('headers', {})['Authorization'] = f\"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}\"\n response = requests.request(method, url, **kwargs)\n if response.status_code == 404:\n raise NotFound\n response.raise_for_status()\n if response.status_code == 204:\n return None # No content\n return response.json()", "def get_response(endpoint, method, json_data=None, auth=None):\n request = requested_method(endpoint, method=method, json_data=json_data, auth=auth)\n return request.json()", "def base_request(url_path):\n response = requests.get(settings.URL_API + url_path)\n if response.status_code != 200:\n return response\n else:\n return response.json()", "def _get(self, endpoint):\n res = self._request(\"get\", endpoint)\n if not res.content:\n return {}\n try:\n res = res.json()\n except ValueError:\n raise ValueError(\"Cannot parse {} as JSON\".format(res))\n if \"error\" in res:\n raise AirthingsError(res[\"error\"])\n return res", "def get_json(url):\n r = requests.get(url)\n return r.json()", "def getRequest(endpoint):\n\n headers = {\n \"Accept\": \"application/json\",\n \"Authorization\": \"bearer 7TUFM5BG2ikyTr5RQzNvFm3ALKC_7PUzGNcybO91WWny93bEzmmeeze0iXJNSisEno42aN2CKJIQOBr52ZeUeyFABfo1lq0aQDhCMJfsMFmC_l-FRCTeIN1DpqOHvkIgLmq5hlvUBk-q11VzNQZzA8L1QY4JoAGXEnXa-SojXw_2elYfzgpstXxtph9FcStOPX4YX0L1uDNPQAbO7gYLcbiD1d7qIoIA6wK71TO_WoDa10v6z-m1tNmYmhw98odThrSbkxhRJD9ktSAGOX8IgelS6TnkrsUGkzgxZjtGcMu_YnW_391vR7zJWwIPY-8LFc8Ueg\"\n }\n\n response = requests.get(endpoint, headers=headers)\n\n # print(response.json()) # use for testing and debugging\n\n return response.json()", "def send_request(self, request):\n json_results = requests.get(request).json()\n\n status = json_results['status']\n\n if status == const.STATUS_OK:\n return json_results['results']\n\n self.log.warning(self.get_status_code(status))", "def send_get(self, api_url, query=None):\n resp = requests.get(self.base_url + api_url, params=query)\n\n return resp", "def _call(self, request_method, endpoint, params=None, data=None):\n response = request_method(\n self._construct_url(endpoint),\n params=params,\n data=data,\n auth=BsdApiAuth(self.api_id, self.api_secret)\n )\n\n if response.status_code == 202 and endpoint != \"get_deferred_results\":\n return self._resolve_deferred_response(response, self.deferred_result_max_attempts)\n else:\n return response", "def request_json(\n url, parameters=None, body=None, headers=None, cache=True, agent=None\n):\n assert url\n session = get_session()\n\n log.info(\"-\" * 80)\n log.info(\"url: %s\", url)\n\n if isinstance(headers, dict):\n headers = clean_dict(headers)\n else:\n headers = dict()\n if isinstance(parameters, dict):\n parameters = d2l(clean_dict(parameters))\n if body:\n method = \"POST\"\n headers[\"content-type\"] = \"application/json\"\n headers[\"user-agent\"] = get_user_agent(agent)\n headers[\"content-length\"] = ustr(len(body))\n else:\n method = \"GET\"\n headers[\"user-agent\"] = get_user_agent(agent)\n\n initial_cache_state = session._is_cache_disabled # yes, i'm a bad person\n try:\n session._is_cache_disabled = not cache\n response = session.request(\n url=url,\n params=parameters,\n json=body,\n headers=headers,\n method=method,\n timeout=1,\n )\n status = response.status_code\n content = response.json() if status // 100 == 2 else None\n cache = getattr(response, \"from_cache\", False)\n except Exception as e:\n content = None\n status = 500\n log.debug(e, exc_info=True)\n else:\n log.debug(\"method: %s\", method)\n log.debug(\"headers: %r\", headers)\n log.debug(\"parameters: %r\", parameters)\n log.debug(\"cache: %r\", cache)\n log.info(\"status: %d\", status)\n log.debug(\"content: %s\", content)\n finally:\n session._is_cache_disabled = initial_cache_state\n return status, content", "def get(self, url):\n headers = {\"Authorization\": \"Bearer \" + self.token}\n full_url = self.api_url + starts_slash(url)\n logging.info(\"GET url: \" + str(full_url))\n logging.info(\"GET header: \" + str(headers))\n try:\n result = requests.get(full_url, headers=headers).json()\n except json.decoder.JSONDecodeError:\n result = \"error parsing JSON response\"\n logging.info(\"GET result: \" + str(result))\n return result", "def _request(self, method, url, payload=None, **params):\n kwargs = dict(params=params)\n kwargs[\"timeout\"] = self._timeout\n if not url.startswith('http'):\n url = self.prefix + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n if payload:\n kwargs[\"data\"] = json.dumps(payload)\n gs = self._gpool.spawn if self._gpool else gevent.spawn\n r = gs(self.session.request, method, url, headers=headers, **kwargs)\n r.fetch = partial(self.join, r)\n update_wrapper(r.fetch, self.join)\n #gevent.sleep(0.05)\n return r", "def _request(self, opts, query, query_key='q'):\n params = opts['params']\n params[query_key] = query\n resp = requests.get(opts['url'], params=params, headers=self._headers)\n if not resp.ok:\n raise Exception(\"Server threw an error for: {}\".format(resp.url))\n return resp.json()", "def __getJson(self, _uri):\n\n #-------------------- \n # Add explicit format=json if not already there\n #-------------------- \n if 'format=json' not in _uri:\n if '?' in _uri:\n _uri += '&format=json'\n else:\n _uri += '?format=json'\n\n\n #-------------------- \n # Get the response from httpRequest\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _uri)\n r = self.__httpsRequest('GET', xnatUrl)\n\n #-------------------- \n # Try to load the response as a JSON...\n #-------------------- \n try:\n return r.json()['ResultSet']['Result']\n except Exception as e:\n self.exceptionPopup.setText(str(e))\n self.runEventCallbacks('jsonError', self.host.encode(),\n self.username.encode(), r)", "def __GetJson(self, url, auth, responseProcessor = None):\n\n conn = self.__GetConnection()\n conn.request(\"GET\", url, \"\", self.__MakeHeaders(auth))\n response = conn.getresponse()\n if (responseProcessor != None):\n if (responseProcessor(response) == False):\n return None\n\n self.__CheckResponse(response)\n data = response.read()\n return cjson.decode(data)", "async def get(url, session=None, **kwargs):\n\n method = 'GET'\n resp = await _request(method, url, session=session, **kwargs)\n return resp", "def getjson(url, **kwargs):\n json = fetch_resource(url, **kwargs)\n return simplejson.loads(json)", "def makeApiCall(url, endpointParams, type):\r\n\r\n if type == 'POST': # post request\r\n data = requests.post(url, endpointParams)\r\n else: # get request\r\n data = requests.get(url, endpointParams)\r\n\r\n response = dict() # hold response info\r\n response['url'] = url # url we are hitting\r\n response['endpoint_params'] = endpointParams # parameters for the endpoint\r\n response['endpoint_params_pretty'] = json.dumps(endpointParams, indent=4) # pretty print for cli\r\n response['json_data'] = json.loads(data.content) # response data from the api\r\n response['json_data_pretty'] = json.dumps(response['json_data'], indent=4) # pretty print for cli\r\n\r\n return response # get and return content\r", "def req(url, headers=None):\n if headers is None:\n headers = {}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n response = json.loads(response.text)\n return response\n return None", "def _request(self, path, method='GET', body=None, headers=None):\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n response, content = super(DSBaseService, self)._request(url,\n method=method,\n body=str(body).replace(\"'\", '\"'),\n headers=headers)\n if int(response['status']) == 200:\n return json.loads(content)\n else:\n raise RuntimeError('{} responded with status code {}'.format(url, response['status']))", "def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well" ]
[ "0.69455296", "0.69230396", "0.6754933", "0.67506415", "0.673607", "0.66864365", "0.6583713", "0.6581421", "0.653614", "0.6509615", "0.6475695", "0.64537466", "0.643002", "0.6425151", "0.64179856", "0.636375", "0.6342285", "0.63297737", "0.6329244", "0.6312979", "0.63046056", "0.62933487", "0.6261643", "0.62614584", "0.6251446", "0.6246688", "0.6238003", "0.6230471", "0.6220187", "0.6187422", "0.6163945", "0.6157011", "0.61373734", "0.61320424", "0.6126515", "0.6124516", "0.611421", "0.610937", "0.6103774", "0.6088864", "0.60752004", "0.6071114", "0.6066358", "0.606313", "0.60492754", "0.6045947", "0.6005929", "0.60058945", "0.6005714", "0.5989711", "0.59890074", "0.59729", "0.5967818", "0.59674907", "0.59653974", "0.59629536", "0.5961452", "0.5961452", "0.59564936", "0.5949152", "0.5934217", "0.5933905", "0.59242034", "0.59194344", "0.5917667", "0.5904134", "0.589903", "0.5889547", "0.58867234", "0.58791876", "0.5872478", "0.5861162", "0.5860149", "0.5837926", "0.58336174", "0.58321184", "0.58253306", "0.58244073", "0.5820727", "0.5809153", "0.5804759", "0.5803551", "0.58033115", "0.58021194", "0.57956296", "0.57942736", "0.5793927", "0.5781587", "0.57637084", "0.5763523", "0.5761248", "0.57585937", "0.57541186", "0.5749929", "0.57337964", "0.57261413", "0.5719041", "0.5713095", "0.57119346", "0.57077914", "0.570053" ]
0.0
-1
Parse the query type from the root schema. This can either return a string or None. The latter when the endpoint does not support queries.
def parse_query_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "queryType")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def _schema_type(self) -> Optional[type]:\n return None", "def parse_query_spec(self, query_spec):\n try:\n return self.QUERY_TYPE_MAP[query_spec['type']](query_spec)\n except KeyError:\n raise exceptions.QueryError('invalid query spec')\n except TypeError:\n raise exceptions.QueryError('Query must be a dictionary specifyng type and value of the query')", "def _schema_type(self) -> Optional[type]:\n pass", "def query(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"query\")", "def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def typ(self) -> Optional[str]:\n return self.get(\"/Type\")", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def parse_query(self, query_dict):\n if query_dict is None:\n return xapian.Query('') # Match everything\n elif query_dict == {}:\n return xapian.Query() # Match nothing\n\n query_tree = self.build_query_tree(query_dict)\n\n return query_tree.to_query(self.schema, self.database)", "def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def result_type(self) -> Optional[str]:\n if hasattr(self, \"_result_type\"):\n return self._result_type\n _args: list[Arg] = []\n _ctx = self._select(\"resultType\", _args)\n return _ctx.execute_sync(Optional[str])", "def base_query(self) -> Optional[str]:\n return pulumi.get(self, \"base_query\")", "def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def query_schema(self, name, param):\n\n alias, name, need_list = self.parse_entry(name)\n\n if not name:\n result = self.process_multiple_query(need_list, param)\n else:\n result = self.process_single_query(name, need_list, param)\n return alias, result", "def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]", "def simd_type(self):\n for node in self.query_nodes:\n return node.get('infos').get('system_configurations').get('simd_type')\n raise Exception(\"No query node found\")", "def infer_value_type(self, value):\n if isinstance(value, str):\n if self.TIMESTAMP_MATCHER.match(value):\n return 'TIMESTAMP'\n elif self.DATE_MATCHER.match(value):\n return 'DATE'\n elif self.TIME_MATCHER.match(value):\n return 'TIME'\n elif not self.quoted_values_are_strings:\n # Implement the same type inference algorithm as 'bq load' for\n # quoted values that look like ints, floats or bools.\n if self.INTEGER_MATCHER.match(value):\n if (int(value) < self.INTEGER_MIN_VALUE\n or self.INTEGER_MAX_VALUE < int(value)):\n return 'QFLOAT' # quoted float\n else:\n return 'QINTEGER' # quoted integer\n elif self.FLOAT_MATCHER.match(value):\n return 'QFLOAT' # quoted float\n elif value.lower() in ['true', 'false']:\n return 'QBOOLEAN' # quoted boolean\n else:\n return 'STRING'\n else:\n return 'STRING'\n # Python 'bool' is a subclass of 'int' so we must check it first\n elif isinstance(value, bool):\n return 'BOOLEAN'\n elif isinstance(value, int):\n if value < self.INTEGER_MIN_VALUE or self.INTEGER_MAX_VALUE < value:\n return 'FLOAT'\n else:\n return 'INTEGER'\n elif isinstance(value, float):\n return 'FLOAT'\n elif value is None:\n return '__null__'\n elif isinstance(value, dict):\n if value:\n return 'RECORD'\n else:\n return '__empty_record__'\n elif isinstance(value, list):\n if value:\n return '__array__'\n else:\n return '__empty_array__'\n else:\n raise Exception(\n f'Unsupported node type: {type(value)} (should not happen)'\n )", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None", "def wql_istype_query(self, node, nodetype):\n self.tr_id = get_tr_id()\n if isinstance(node, Literal) or isinstance(type, Literal):\n return None # No literals allowed here\n xml_msg = self._create_wql_istype_msg(self.tr_id,\n node, type)\n self.conn.connect()\n self.conn.send(xml_msg)\n response = self.conn.receive()\n self._check_error(response)\n if \"results\" in response:\n if response[\"results\"] == \"TRUE\":\n return True\n else:\n return False\n else:\n raise SIBError(M3_SIB_ERROR)", "def query(self, name, python_type, optional=False, **kwargs):\n return self.simple_param('query', name, python_type, optional=optional,\n **kwargs)", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")", "def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")", "def _parsed_query(self, query_string):\r\n return urlparse(self.runtime.handler_url(self.block, 'handler', query=query_string)).query", "def getType_(self, ctx):\n # type: (Optional[RelayParser.Type_Context]) -> Optional[ty.Type]\n\n if ctx is None:\n return None\n\n return self.visit(ctx)", "def convert_raw_type_to_xdm_type(schema_type: str) -> str:\n converting_dict = {\n \"string\": SCHEMA_TYPE_STRING,\n \"int\": SCHEMA_TYPE_NUMBER,\n \"boolean\": SCHEMA_TYPE_BOOLEAN,\n }\n\n return converting_dict.get(schema_type, SCHEMA_TYPE_STRING)", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def infer_display_type(stream):\n\n type_=None\n\n if len(stream)==1:\n dquery=stream[0]\n type_=_infer_from_dquery(dquery)\n\n if type_ is None:\n type_=sdconst.SA_TYPE_DATASET # default\n\n return type_", "def get_query(self, minimal: bool = False) -> Optional[str]:\n if minimal:\n return self.minimal_query\n return self.query", "def get_query(self):\n return self.query_class(self)", "def resolve_type(value: t.Any) -> t.Any:\n value = str(value).strip()\n if value.lower() == \"true\":\n return True\n elif value.lower() == \"false\":\n return False\n elif value.lower() == \"none\":\n return None\n else:\n # attempt to cast\n try:\n return int(value)\n except:\n pass\n try:\n return float(value)\n except:\n pass\n # attempt to parse\n try:\n return literal_eval(value)\n except ValueError:\n pass\n except SyntaxError: # happens with single topics starting with '/'\n pass\n # unparseable, return as str\n return value", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def document_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"document_type\")", "def get_schema_cls() -> t.Any:\n return None", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def extract_query(pkt):\n request = DNSRecord.parse(pkt)\n\n # Extract the hostname\n qname = request.q.qname\n hostname = str(qname)\n # Remove tailing period (artifact of the dnslib)\n if hostname[-1] == '.':\n hostname = hostname[:-1]\n\n # Record type\n qtype = QTYPE.get(request.q.qtype)\n\n return (hostname, qtype)", "def endpoint_type(self) -> Optional[pulumi.Input[Union[str, 'EndpointType']]]:\n return pulumi.get(self, \"endpoint_type\")", "def get_type_from_str(type_str: str) -> str:\n query = [x\n for x in PRIMITIVE_TYPES\n if type_str.lower() in PRIMITIVE_TYPES[x]]\n return query[0] if len(query) > 0 else 'None'", "def endpoint_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def field_type(self) -> Optional[NameObject]:\n return self.get(\"/FT\")", "def get_elasticsearch_query(self) -> dict:\n body = self.json\n\n if body is not None and isinstance(body, dict) and \"query\" in body:\n return body\n else:\n # Raise InvalidUsage (400) and log error\n # Import logger here to prevent circular dependency on module import\n message = \"Invalid request body whilst trying to parse for Elasticsearch query\"\n logger.error(self.request_id, message, extra={\"body\": body})\n raise InvalidUsage(message)", "def _get_query_parser(self):\n return whoosh.qparser.MultifieldParser(\n ('title', 'content'),\n self._index.schema,\n plugins=[whoosh.qparser.PrefixPlugin],\n group=whoosh.qparser.OrGroup.factory(0.9)\n )", "def start_type(self):\n return self._query_config()['start_type']", "def get_hierarchy_query_record(self, hierarchy_record_type):\n return # osid.hierarchy.records.HierarchyQueryRecord", "def get_function_result_type(\n self,\n function: str,\n ) -> Optional[str]:\n if function in constants.TREND_FUNCTION_TYPE_MAP:\n # HACK: Don't invalid query here if we don't recognize the function\n # this is cause non-snql tests still need to run and will check here\n # TODO: once non-snql is removed and trends has its own builder this\n # can be removed\n return constants.TREND_FUNCTION_TYPE_MAP.get(function)\n\n resolved_function = self.resolve_function(function, resolve_only=True)\n\n if not isinstance(resolved_function, Function) or resolved_function.alias is None:\n return None\n\n function_details = self.function_alias_map.get(resolved_function.alias)\n if function_details is None:\n return None\n\n result_type: Optional[str] = function_details.instance.get_result_type(\n function_details.field, function_details.arguments\n )\n return result_type", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def _parsed_query(self, query_string):\r\n return urlparse(handler_url(self.block, 'handler', query=query_string)).query", "def query_graphql(raw_query, endpoint):\n query = \" \".join(shlex.split(raw_query, posix=False))\n r = requests.get(endpoint, params={\"query\": query})\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 400:\n response = r.json()\n assert \"errors\" in response\n raise GraphQLError(\"\".join([e[\"message\"] for e in response[\"errors\"]]))\n else:\n raise requests.exceptions.RequestException(\n f\"HTTP Status: {r.status_code}, Response Body: {r.text}\"\n )", "def get_query_result(query_string: str) -> Any:\n table = get_template_attribute(\"_query_table.html\", \"querytable\")\n contents, types, rows = g.ledger.query_shell.execute_query(\n g.filtered.entries, query_string\n )\n if contents and \"ERROR\" in contents:\n raise FavaAPIError(contents)\n table = table(g.ledger, contents, types, rows)\n\n if types and g.ledger.charts.can_plot_query(types):\n return QueryResult(table, g.ledger.charts.query(types, rows))\n return QueryResult(table)", "def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])", "def _get_resource_type(self, resource_path):\n remove_query = resource_path.split('?')[0] # remove query parameters\n remove_slashes = remove_query.strip('/') # strip leading and trailing slashes\n return remove_slashes.rstrip('s') # remove trailing 's'", "def type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_name\")", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def _schema_type(self) -> Optional[type]:\n return ImageSchema", "def test_type_as_query_arg(app, lang_type, example_records, client, h, prefix):\n # Ensure we cannot pass invalid query parameters\n res = client.get(f'{prefix}?invalid=test', headers=h)\n assert res.status_code == 200\n assert 'invalid' not in res.json['links']['self']\n\n # It should not be possible to pass 'type' in query args (because it's in\n # the URL route instead)\n res = client.get(f'{prefix}?type={lang_type.id}', headers=h)\n assert res.status_code == 200\n assert lang_type.id not in res.json['links']['self']\n assert 'type=' not in res.json['links']['self']\n assert 'resourcetypes' in res.json['links']['self']", "def root_type(self) -> BaseXsdType:\n if getattr(self, 'attributes', None):\n return cast('XsdComplexType', self.maps.types[XSD_ANY_TYPE])\n elif self.base_type is None:\n if self.is_simple():\n return cast('XsdSimpleType', self)\n return cast('XsdComplexType', self.maps.types[XSD_ANY_TYPE])\n\n primitive_type: BaseXsdType\n try:\n if self.base_type.is_simple():\n primitive_type = self.base_type.primitive_type # type: ignore[union-attr]\n else:\n primitive_type = self.base_type.content.primitive_type # type: ignore[union-attr]\n except AttributeError:\n # The type has complex or XsdList content\n return self.base_type.root_type\n else:\n return primitive_type", "def doQueryString(self, query) :\n\t\tqr = self.doQuery(query)['results']['bindings']\n\t\tif qr :\n\t\t\treturn qr[0].values()[0]['value']\n\t\telse :\n\t\t\treturn None", "def version_query_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_query_name\")", "def inspect_query(query):\n return _parse_query(query)", "def prefix_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_type\")", "def engine_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_type\")" ]
[ "0.70593387", "0.5939255", "0.5924762", "0.5911261", "0.5840816", "0.57200265", "0.57065624", "0.5640995", "0.5613772", "0.5587588", "0.55517954", "0.54139596", "0.53744495", "0.5370533", "0.53518116", "0.5295712", "0.52832526", "0.5279426", "0.52663785", "0.52663785", "0.52650476", "0.52557695", "0.52557695", "0.52380216", "0.5224146", "0.5205046", "0.518808", "0.51812875", "0.5163579", "0.51416224", "0.51391196", "0.5135188", "0.51046664", "0.50995934", "0.50995934", "0.50822127", "0.5072854", "0.50606936", "0.50604194", "0.50318897", "0.50315297", "0.50231427", "0.50174934", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.5012386", "0.50041866", "0.4993457", "0.49888644", "0.4962042", "0.4957742", "0.4957742", "0.4957742", "0.4957742", "0.4957742", "0.49306285", "0.493003", "0.49206215", "0.4919867", "0.4913792", "0.49106106", "0.4904179", "0.48989993", "0.4894654", "0.48877034", "0.48815677", "0.48810935", "0.48765212", "0.48357123", "0.48312178", "0.4800316", "0.47959447", "0.47813427", "0.4779026", "0.4769659", "0.4769442", "0.4768482", "0.47544506", "0.47361112", "0.4732873", "0.47169", "0.47040948" ]
0.8196511
0
Parse the mutation type from the root schema. This can either return a string or None. The latter when the endpoint does not support mutations.
def parse_mutation_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "mutationType")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def set_mutation_type(self, mut_type=''):\n if mut_type:\n # specified mutation type\n self.mutation_type = mut_type\n else:\n # interpret mutation type from attributes\n if not self.is_valid:\n # does not correctly fall into a category\n self.mutation_type = 'not valid'\n elif self.unknown_effect:\n self.mutation_type = 'unknown effect'\n elif self.is_missing_info:\n self.mutation_type = 'missing'\n elif self.is_substitution:\n self.mutation_type = 'substitution'\n elif self.is_deletion:\n self.mutation_type = 'deletion'\n elif self.is_insertion:\n self.mutation_type = 'insertion'\n\n # check if mutation at splice site\n self.__set_splice_mutation()", "def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")", "def _is_mutation_type(data):\n try:\n QuiverMutationType(data)\n return True\n except Exception:\n return False", "def _mutation_type_error(data):\n if data[2] is None:\n del data[2]\n return_str = str(data) + ' is not a valid quiver mutation type'\n return_str += '\\n Finite types have the form [ \\'?\\', n ] for type ? and rank n'\n return_str += '\\n Affine type A has the form [ \\'A\\', [ i, j ], 1 ] for rank i+j'\n return_str += '\\n Affine type ? has the form [ \\'?\\', k, \\pm 1 ] for rank k+1'\n return_str += '\\n Elliptic type ? has the form [ \\'?\\', k, [i, j] ] (1 <= i,j <= 3) for rank k+2'\n return_str += '\\n For correct syntax in other types, please consult the documentation.'\n\n raise ValueError(return_str)", "def get_random_mutation_type(self):\n return self.random_state.choice(\n self.mutation_types, p=self.mutation_probabilities)", "def _schema_type(self) -> Optional[type]:\n return None", "def _schema_type(self) -> Optional[type]:\n pass", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def token_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"token_type\")", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")", "def Type(self):\n if self.currtok[1].name in {\"INT\", \"FLOAT\", \"BOOLEAN\"}:\n type = self.currtok[0]\n self.currtok = next(self.tg)\n return type\n raise SLUCSyntaxError(\"ERROR: Unexpected token {0} on line {1}\".\n format(self.currtok[1], str(self.currtok[2] - 1)))", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None", "def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")", "def typ(self) -> Optional[str]:\n return self.get(\"/Type\")", "def mutations_node(self):\n return self._mutations_node", "def _repr_(self):\n return \"QuiverMutationType\"", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def action_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_type\")", "def event_data_content_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"event_data_content_type\")", "def gen_type_string(self, node):\n return self._gen_table[node.node_type()](self, node)", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def mutation_types(self):\n return list(self.mutation_pool.keys())", "def expected_form(self) -> str:\n return self.get_type()", "def _get_data_type_by_parse_type_proto(self, type_proto, node):\n data_type_name = self._get_data_type_name_by_value(type_proto, type_proto.data_type, field_name='data_type')\n if type_proto.data_type == DataType.DT_TENSOR:\n tensor_type_proto = type_proto.tensor_type\n value = type_proto.tensor_type.elem_type\n elem_type_name = self._get_data_type_name_by_value(tensor_type_proto, value, field_name='elem_type')\n node.elem_types.append(elem_type_name)\n return f'{data_type_name}[{elem_type_name}]'\n\n if type_proto.data_type == DataType.DT_TUPLE:\n data_types = []\n for elem_type in type_proto.sequence_type.elem_types:\n data_types.append(self._get_data_type_by_parse_type_proto(elem_type, node))\n return f'{data_type_name}{str(data_types)}'\n\n node.elem_types.append(data_type_name)\n\n return data_type_name", "def type(self) -> str:\n return self._event.get('type')", "def parse_mutation_data(mutation_data):\n data = copy.deepcopy(EMPTY_MUT_DATA)\n jsonschema.validate(mutation_data, MUT_DATA_SCHEMA)\n for k in data:\n data[k] = mutation_data[k]\n\n return data", "def node_type(self) -> Optional[str]:\n return pulumi.get(self, \"node_type\")", "def type(self):\n return self.raw.get(\"type\")", "def token_type(self) -> str:\n return self._token_type", "def token_type(self) -> str:\n return self._token_type", "def read_type(self):\n return self.node.get_attr(Type).read()", "def parse_statement(self, stmt):\r\n if 'type' not in stmt:\r\n raise TypeError('Type field required')\r\n\r\n if stmt['type'] == 'property':\r\n return self.parse_property(stmt)\r\n elif stmt['type'] == 'edge':\r\n return self.parse_edge(stmt)\r\n elif stmt['type'] == 'key_index':\r\n return self.parse_key_index(stmt)\r\n elif stmt['type'] == 'defaults':\r\n return self.parse_defaults(stmt)\r\n else:\r\n raise ValueError('Invalid `type` value {}'.format(stmt['type']))", "def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema", "def ParseType(ast):\n type_node = ast[0]\n type_name = type_node[0]\n if type_name == 'number_type':\n type = NumberType(type_node[1])\n elif type_name == 'vec_type':\n type = VecType(type_node[1])\n elif type_name == 'string_type':\n type = StringType(type_node[1])\n elif type_name == 'binary_type':\n type = BinaryType(type_node[1])\n elif type_name == 'array_type':\n type = ArrayType(type_node[1])\n elif type_name == 'map_type':\n type = MapType(type_node[1])\n elif type_name == 'custom_type':\n type = CustomType(type_node[1])\n else:\n logging.info('Unknown AST type: %s' % (type_name))\n return None\n for node in ast[1:]:\n node_name = node[0]\n node_value = node[1]\n if node_name == 'type_opt':\n type.is_optional = True\n return type", "def transaction_type(self) -> str:\n return self.chunks[2].decode(\"ascii\")", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def payload_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"payload_type\")", "def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])", "def getType_(self, ctx):\n # type: (Optional[RelayParser.Type_Context]) -> Optional[ty.Type]\n\n if ctx is None:\n return None\n\n return self.visit(ctx)", "def action_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action_type\")", "def type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_name\")", "def result_type(self):\n\n anc = self.find_ancestor(ASTDeclarationNode) or self.find_ancestor(ASTAssignmentNode)\n if anc:\n return anc.type()\n return get_expression_type(self)", "def action_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"action_type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def event_data_content_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"event_data_content_type\")", "def infer_value_type(self, value):\n if isinstance(value, str):\n if self.TIMESTAMP_MATCHER.match(value):\n return 'TIMESTAMP'\n elif self.DATE_MATCHER.match(value):\n return 'DATE'\n elif self.TIME_MATCHER.match(value):\n return 'TIME'\n elif not self.quoted_values_are_strings:\n # Implement the same type inference algorithm as 'bq load' for\n # quoted values that look like ints, floats or bools.\n if self.INTEGER_MATCHER.match(value):\n if (int(value) < self.INTEGER_MIN_VALUE\n or self.INTEGER_MAX_VALUE < int(value)):\n return 'QFLOAT' # quoted float\n else:\n return 'QINTEGER' # quoted integer\n elif self.FLOAT_MATCHER.match(value):\n return 'QFLOAT' # quoted float\n elif value.lower() in ['true', 'false']:\n return 'QBOOLEAN' # quoted boolean\n else:\n return 'STRING'\n else:\n return 'STRING'\n # Python 'bool' is a subclass of 'int' so we must check it first\n elif isinstance(value, bool):\n return 'BOOLEAN'\n elif isinstance(value, int):\n if value < self.INTEGER_MIN_VALUE or self.INTEGER_MAX_VALUE < value:\n return 'FLOAT'\n else:\n return 'INTEGER'\n elif isinstance(value, float):\n return 'FLOAT'\n elif value is None:\n return '__null__'\n elif isinstance(value, dict):\n if value:\n return 'RECORD'\n else:\n return '__empty_record__'\n elif isinstance(value, list):\n if value:\n return '__array__'\n else:\n return '__empty_array__'\n else:\n raise Exception(\n f'Unsupported node type: {type(value)} (should not happen)'\n )", "def get_metadata_type(output_type: Any) -> str:\n if output_type is str or output_type is AnyStr:\n return \"String\"\n if output_type is datetime.datetime or output_type is datetime.date:\n return \"Date\"\n if output_type is int or output_type is float:\n return \"Number\"\n if output_type is bool:\n return \"Boolean\"\n return \"Unknown\"", "def GetOutputType(self, response_type):\n if response_type == \"KML\":\n return \"xml\"\n return \"json\"", "def get_parsed_declaration(self) -> str:\n parent_type = self.node.underlying_typedef_type.spelling\n\n # Function prototypes need to be handled different. When clang can't\n # successfully parse the file it falls back to naming the return type\n # as the display name.\n # Unfortunately some versions of clang behave a little differently, some\n # will return a `POINTER` while others will return `FUNCITONNOPROTO`. The\n # `POINTER`s are easy to derive the real type from, but the test\n # environment doesn't use that version of clang.\n type_ = self.node.underlying_typedef_type\n if type_.kind == cindex.TypeKind.POINTER: # pragma: no cover\n type_ = type_.get_pointee()\n\n if type_.kind in (\n cindex.TypeKind.FUNCTIONPROTO,\n cindex.TypeKind.FUNCTIONNOPROTO,\n ):\n ret_value, paren, signature = parent_type.partition(\")\")\n signature = \"\".join((ret_value, self.name, paren, signature))\n\n return f\"typedef {signature}\"\n\n return f\"typedef {parent_type} {self.name}\"", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"type\")" ]
[ "0.6133898", "0.5771993", "0.55856854", "0.55424494", "0.55416554", "0.5471137", "0.52651036", "0.52617633", "0.5190931", "0.5140541", "0.50858825", "0.5052965", "0.5041517", "0.5041517", "0.50238866", "0.50197643", "0.49824637", "0.4972244", "0.4961925", "0.49438107", "0.49402195", "0.49166182", "0.48954162", "0.48913866", "0.48913866", "0.48913866", "0.48913866", "0.48912144", "0.48700345", "0.48580256", "0.48408493", "0.48299444", "0.48283544", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.48046717", "0.4795219", "0.47855863", "0.47643587", "0.4752278", "0.47390446", "0.47367674", "0.47158614", "0.47015208", "0.4700641", "0.4700641", "0.46839485", "0.4674436", "0.46717903", "0.4669574", "0.46571174", "0.4649381", "0.4646675", "0.46300656", "0.46294343", "0.46274915", "0.46273258", "0.46160713", "0.46081343", "0.4595649", "0.45956308", "0.45928445", "0.45928445", "0.45928445", "0.45928445", "0.45928445", "0.45928445", "0.45928445", "0.45878214", "0.45740747", "0.45642996", "0.4561314", "0.45602125", "0.45489353", "0.45274484", "0.45274484", "0.45274484", "0.45274484", "0.45274484", "0.45274484", "0.45274484", "0.45274484" ]
0.80457693
0
Parse the subscription type from the root schema. This can either return a string or None. The latter when the endpoint does not support subscriptions.
def parse_subscription_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "subscriptionType")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscription_type(self) -> str:\n return pulumi.get(self, \"subscription_type\")", "def typ(self) -> Optional[str]:\n return self.get(\"/Type\")", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None", "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema", "def _schema_type(self) -> Optional[type]:\n return None", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]", "def get_xsd_type(self, item):\n if not self.xsd_types or isinstance(self.xsd_types, AbstractSchemaProxy):\n return\n elif isinstance(item, str):\n xsd_type = self.xsd_types.get(item)\n elif isinstance(item, AttributeNode):\n xsd_type = self.xsd_types.get(item[0])\n else:\n xsd_type = self.xsd_types.get(item.tag)\n\n if not xsd_type:\n return\n elif not isinstance(xsd_type, list):\n return xsd_type\n elif isinstance(item, AttributeNode):\n for x in xsd_type:\n if x.is_valid(item[1]):\n return x\n elif not isinstance(item, str):\n for x in xsd_type:\n if x.is_simple():\n if x.is_valid(item.text):\n return x\n elif x.is_valid(item):\n return x\n\n return xsd_type[0]", "def _schema_type(self) -> Optional[type]:\n pass", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def prefix_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_type\")", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def get_resource_type(self):\n category = self.get_first_category(DATA_KIND_SCHEME)\n if category is not None:\n return category.label\n else:\n return None", "def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")", "def _get_resource_type(self, resource_path):\n remove_query = resource_path.split('?')[0] # remove query parameters\n remove_slashes = remove_query.strip('/') # strip leading and trailing slashes\n return remove_slashes.rstrip('s') # remove trailing 's'", "def _schema_type(self) -> Optional[type]:\n return EpisodeSchema", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def discover_schema(node):\n xmlns = node.get('__xmlns__', None)\n\n if xmlns:\n node['Schema'] = 'Unknown'\n if xmlns.startswith('smpte_stereo'):\n node['Schema'] = 'SMPTE Stereoscopic'\n elif xmlns.startswith('smpte'):\n node['Schema'] = 'SMPTE'\n elif xmlns.startswith('interop'):\n node['Schema'] = 'Interop'\n elif xmlns.startswith('atmos'):\n node['Schema'] = 'Atmos'", "def get_unsubscription_type(self):\n unsubscription_types = dict(UNSUBSCRIPTION_TYPE_CHOICES)\n return unsubscription_types.get(self.unsubscription_type, \"N/A\")", "def service_type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_type_name\")", "def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def subscribe_wql_istype(self, node, type,\n msg_handler):\n if isinstance(node, Literal) or isinstance(type, Literal):\n return None # No literals allowed here\n self.msg_handler = msg_handler\n self.tr_id = get_tr_id()\n xml_msg = self._create_wql_istype_msg(self.tr_id, node,\n type)\n self.conn.connect()\n self.conn.send(xml_msg)\n cnf = self.conn.receive()\n self._check_error(cnf)\n\n self.sub_id = cnf[\"subscription_id\"]\n # self.msg_handler.handle(initial_result)\n sub_h = WQLBooleanSubscribeHandler(self.node_id, self.tr_id,\n self.conn, msg_handler)\n sub_h.start()\n if cnf[\"results\"] == \"TRUE\":\n return True\n else:\n return False", "def type_name(self) -> Optional[str]:\n return pulumi.get(self, \"type_name\")", "def render_subtypes(spec_catalog, data_type, prefix=None):\n subtypes = spec_catalog.get_subtypes(data_type)\n if len(subtypes) == 0:\n return None\n re = prefix if prefix is not None else \"\"\n re += \", \".join([RSTDocument.get_reference(RSTSectionLabelHelper.get_section_label(ct), ct)\n for ct in subtypes])\n return re", "def get_type_s(self, type):\r\n\r\n return HTTP2_NAMES.get(type, None)", "def endpoint_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def root_type(self) -> BaseXsdType:\n if getattr(self, 'attributes', None):\n return cast('XsdComplexType', self.maps.types[XSD_ANY_TYPE])\n elif self.base_type is None:\n if self.is_simple():\n return cast('XsdSimpleType', self)\n return cast('XsdComplexType', self.maps.types[XSD_ANY_TYPE])\n\n primitive_type: BaseXsdType\n try:\n if self.base_type.is_simple():\n primitive_type = self.base_type.primitive_type # type: ignore[union-attr]\n else:\n primitive_type = self.base_type.content.primitive_type # type: ignore[union-attr]\n except AttributeError:\n # The type has complex or XsdList content\n return self.base_type.root_type\n else:\n return primitive_type", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def service_type_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_type_name\")", "def subscribe_wql_nodetypes(self, node, msg_handler):\n if isinstance(node, Literal):\n raise KPError(M3_KP_ERROR)\n self.msg_handler = msg_handler\n self.tr_id = get_tr_id()\n xml_msg = self._create_wql_nodetypes_msg(self.tr_id, node)\n self.conn.connect()\n self.conn.send(xml_msg)\n cnf = self.conn.receive()\n self._check_error(cnf)\n\n self.sub_id = cnf[\"subscription_id\"]\n initial_result = parse_URI_list(cnf[\"results\"])\n sub_h = WQLNodeSubscribeHandler(self.node_id, self.tr_id,\n self.conn, msg_handler)\n sub_h.start()\n return initial_result", "def _get_service_type(service):\n\n return service.split(':')[3]", "def get_valid_subtypes(trigger_type: str) -> Optional[Sequence[str]]:\n for trigger_info in TRIGGER_CAPABILITIES.values():\n if trigger_info.conf == trigger_type:\n return trigger_info.subconfs\n return None", "def subscription_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subscription_id\")", "def saas_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"saas_subscription_id\")", "def pubsub_topic(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pubsub_topic\")", "def get_type_from_str(type_str: str) -> str:\n query = [x\n for x in PRIMITIVE_TYPES\n if type_str.lower() in PRIMITIVE_TYPES[x]]\n return query[0] if len(query) > 0 else 'None'", "def XmlTypeNamespace(self) -> str:", "def parse_typename(typename):\n if typename is None:\n raise ValueError(\"function type must be provided\")\n idx = typename.rfind(\"/\")\n if idx < 0:\n raise ValueError(\"function type must be of the from namespace/name\")\n namespace = typename[:idx]\n if not namespace:\n raise ValueError(\"function type's namespace must not be empty\")\n type = typename[idx + 1:]\n if not type:\n raise ValueError(\"function type's name must not be empty\")\n return namespace, type", "def payload_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"payload_type\")", "def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def get_schema(self, name, namespace=None):\n avro_name = self.get_name(name=name, namespace=namespace)\n return self._names.get(avro_name.fullname, None)", "def simple_type(self) -> Optional['XsdSimpleType']:\n raise NotImplementedError()", "def endpoint_type(self) -> Optional[pulumi.Input[Union[str, 'EndpointType']]]:\n return pulumi.get(self, \"endpoint_type\")", "def gettype(self, failobj=None):\n missing = []\n value = self.get('content-type', missing)\n if value is missing:\n return failobj\n return re.split(r';\\s*', value.strip())[0].lower()", "def resolve_type(value: t.Any) -> t.Any:\n value = str(value).strip()\n if value.lower() == \"true\":\n return True\n elif value.lower() == \"false\":\n return False\n elif value.lower() == \"none\":\n return None\n else:\n # attempt to cast\n try:\n return int(value)\n except:\n pass\n try:\n return float(value)\n except:\n pass\n # attempt to parse\n try:\n return literal_eval(value)\n except ValueError:\n pass\n except SyntaxError: # happens with single topics starting with '/'\n pass\n # unparseable, return as str\n return value", "def resource_type(self) -> Optional[str]:\n return pulumi.get(self, \"resource_type\")", "def resource_type(self) -> Optional[str]:\n return pulumi.get(self, \"resource_type\")", "def get_schema_url(self):\n return self.NAME_TYPE_SCHEMAS.get(self.name_type, None)", "def get_subtype(object_dictionary):\n type = object_dictionary['type']\n\n if type == 'CUSTOM_AUTHENTICATION':\n return 'CustomAuthenticationPolicy'\n else:\n return 'AuthenticationPolicy'", "def _get_xml_namespace(root_tag):\n m = re.match(r\"\\{.*\\}\", root_tag)\n return m.group(0) if m else \"\"", "def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")", "def root_namespace(self) -> str:\n return self.name_components[0]", "def _schema_type(self) -> Optional[type]:\n return ImageContainerSchema", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def get_stream_type(self) -> str:", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def _get_short_type_name(cls, type_name: str) -> str:\n import re\n match = re.match('(typing\\.)?(?P<type>\\w+)(?:\\[.+\\])?', type_name)\n return match.group('type') if match else type_name", "def schema_item_type_link(self) -> Optional[str]:\n return pulumi.get(self, \"schema_item_type_link\")", "def typeName (self, typecode):\n if typecode == qmf2.SCHEMA_DATA_VOID: return \"void\"\n elif typecode == qmf2.SCHEMA_DATA_BOOL: return \"bool\"\n elif typecode == qmf2.SCHEMA_DATA_INT: return \"int\"\n elif typecode == qmf2.SCHEMA_DATA_FLOAT: return \"float\"\n elif typecode == qmf2.SCHEMA_DATA_STRING: return \"string\"\n elif typecode == qmf2.SCHEMA_DATA_MAP: return \"map\"\n elif typecode == qmf2.SCHEMA_DATA_LIST: return \"list\"\n elif typecode == qmf2.SCHEMA_DATA_UUID: return \"uuid\"\n else:\n raise ValueError (\"Invalid type code: %s\" % str(typecode))", "def type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_name\")", "def resolver(schema):\n name = schema.__name__\n if name.endswith(\"Schema\"):\n return name[:-6] or name\n return name", "def _get_type(self, obj):\n typever = obj['Type']\n typesplit = typever.split('.')\n return typesplit[0] + '.' + typesplit[1]", "def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]", "def type_as_string(self):\n return self.properties.get('TypeAsString', None)", "def document_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"document_type\")", "def install_type(self):\r\n result = libxml2.parseDoc(self.xml).xpathEval('/template/os/install')[0]\r\n if result:\r\n return result.prop('type')\r\n else:\r\n return None", "def type(self):\n return self.raw.get(\"type\")", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def GetOutputType(self, response_type):\n if response_type == \"KML\":\n return \"xml\"\n return \"json\"", "def storage_account_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"storage_account_subscription_id\")", "def prefix_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"prefix_type\")", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def scheme_type(self) -> Union[str, None]:\n if self.scheme is None:\n return None\n\n if \"contact_1\" in self.scheme and \"contact_2\" in self.scheme:\n return \"pairs\"\n elif \"contact\" in self.scheme:\n return \"contacts\"\n else:\n raise KeyError(\n \"The passed scheme appears to be neither contacts \" \"nor pairs\"\n )", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")" ]
[ "0.660987", "0.5427683", "0.5366075", "0.5344622", "0.531378", "0.5235341", "0.52084655", "0.514429", "0.514082", "0.5107135", "0.5022559", "0.4995361", "0.4984606", "0.49678668", "0.49628568", "0.49607036", "0.49584213", "0.4956243", "0.49261507", "0.49177787", "0.49068826", "0.49068826", "0.48645335", "0.48645335", "0.48605448", "0.48605448", "0.48605448", "0.48605448", "0.48605448", "0.48605448", "0.48605448", "0.48563135", "0.48516834", "0.4848163", "0.48362228", "0.48270196", "0.4817951", "0.48064372", "0.47964534", "0.47756234", "0.4773857", "0.47545052", "0.4734536", "0.47339493", "0.47300246", "0.47081956", "0.47060484", "0.46909893", "0.46900544", "0.46862423", "0.46836638", "0.46818033", "0.4653932", "0.46352628", "0.46348962", "0.46283206", "0.4628158", "0.46218443", "0.46218443", "0.46093705", "0.46045545", "0.4602807", "0.45977607", "0.45910022", "0.45882878", "0.4581583", "0.45815632", "0.45801383", "0.45801383", "0.45801383", "0.45801383", "0.45801383", "0.45782265", "0.45648038", "0.45546943", "0.45540285", "0.45504868", "0.45488527", "0.4542485", "0.45406803", "0.45386416", "0.45382", "0.45288855", "0.45287895", "0.4522942", "0.45203814", "0.4519574", "0.4514216", "0.45094913", "0.45087546", "0.45069826", "0.45069826", "0.45069826", "0.45069826", "0.45069826", "0.45069826", "0.45069826", "0.45069826", "0.45069826", "0.45069826" ]
0.8100664
0
Parse an operation type from the root schema. This can either return a string or None. The latter when the endpoint does not support the passed by operation.
def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]: query_type = raw_schema.get(op_type, {}) if not query_type: return None return query_type.get("name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")", "def get_operation_type(self, operation_name):\n # type: (Optional[str]) -> Optional[str]\n operations_map = self.operations_map\n if not operation_name and len(operations_map) == 1:\n return next(iter(operations_map.values()))\n return operations_map.get(operation_name)", "def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")", "def parse_operations(self, operation_type: str) -> Tuple[Operation]:\n if operation_type is None:\n return tuple()\n query_type: SchemaType = self.types.get(operation_type)\n if query_type is None:\n return tuple()\n return tuple([Operation(f, self.settings) for f in query_type.fields])", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def get_op_type(self):\n return self.op_type", "def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")", "def _get_singa_op_type(cls, op):\n return type(op).__name__", "def typ(self) -> Optional[str]:\n return self.get(\"/Type\")", "def unaryop_type(cls, op):\n return None", "def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown operation\")", "def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")", "def get_op(self, op_complete_url):\n url_parsed = urlsplit(op_complete_url)\n op_url = url_parsed.path\n\n conf, op = self.best_match(op_url)\n if op is not None:\n return Operation(\n op_complete_url,\n op,\n conf[\"conf\"][op],\n conf[\"tp\"],\n conf[\"sparql_http_method\"],\n conf[\"addon\"],\n )\n else:\n sc = 404\n return (\n sc,\n \"HTTP status code %s: the operation requested does not exist\" % sc,\n \"text/plain\",\n )", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None", "def operation_id(self) -> Optional[str]:\n return pulumi.get(self, \"operation_id\")", "def get_operation_id(self):\n operation_id = self.yaml_parser.object.get('operationId', None)\n if not operation_id:\n operation_id = self.method + \"-\" + self.path.strip(\"/\").replace(\"/\", \"-\")\n\n return operation_id", "def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']", "def _schema_type(self) -> Optional[type]:\n return None", "def _OpTypeByName(op_name):\n op_name_to_type = {\n 'REPLACE': common.OpType.REPLACE,\n 'REPLACE_BZ': common.OpType.REPLACE_BZ,\n 'MOVE': common.OpType.MOVE,\n 'BSDIFF': common.OpType.BSDIFF,\n 'SOURCE_COPY': common.OpType.SOURCE_COPY,\n 'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,\n 'ZERO': common.OpType.ZERO,\n 'DISCARD': common.OpType.DISCARD,\n 'REPLACE_XZ': common.OpType.REPLACE_XZ,\n 'PUFFDIFF': common.OpType.PUFFDIFF,\n 'BROTLI_BSDIFF': common.OpType.BROTLI_BSDIFF,\n }\n return op_name_to_type[op_name]", "def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)", "def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def operation2string(self, operation):\n op = \"Custom\"\n if operation == QNetworkAccessManager.HeadOperation:\n op = \"HEAD\"\n elif operation == QNetworkAccessManager.GetOperation:\n op = \"GET\"\n elif operation == QNetworkAccessManager.PutOperation:\n op = \"PUT\"\n elif operation == QNetworkAccessManager.PostOperation:\n op = \"POST\"\n elif operation == QNetworkAccessManager.DeleteOperation:\n op = \"DELETE\"\n return op", "def _schema_type(self) -> Optional[type]:\n pass", "def get_operation(operation):\n if operation == 'query':\n return banking_pb2.QUERY\n if operation == 'deposit':\n return banking_pb2.DEPOSIT\n if operation == 'withdraw':\n return banking_pb2.WITHDRAW", "def deserialize(cls, payload):\n return operations_pb2.Operation.FromString(payload)", "def action_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_type\")", "def _normalize_operation(operation):\n try:\n operation = operation.upper()\n except AttributeError:\n pass\n\n try:\n operation = ops_map[operation]()\n except KeyError:\n pass\n\n return operation", "def GetOutputType(self, response_type):\n if response_type == \"KML\":\n return \"xml\"\n return \"json\"", "def offending_op(self):\r\n return type(self.r.owner.op)", "def find_type(token_string: str):\n if re.compile('\\d+').match(token_string):\n return 'number'\n elif re.compile('[a-zA-Z]').match(token_string):\n return 'id'\n elif re.compile('\\*|\\+|-|/').match(token_string):\n return 'op'\n else:\n return 'undefined'", "def parse_action(element: Element) -> ActionType:\n # edit is a special type of action for strings\n if \"edit\" in element.attributes:\n if element.attributes[\"edit\"] == \"append\":\n return edit_append\n raise error_at(element)(f'invalid edit=\"{element.attributes[\"edit\"]}\"')\n # default action is replace\n action = element.attributes.get(\"action\", \"replace\")\n if action == \"replace\":\n return replace\n if action == \"append\":\n return append\n if action == \"delete\":\n return delete\n if action == \"merge\":\n return merge\n raise error_at(element)('invalid action=\"{:s}\".'.format(action))", "def get_op(ring_size: int, op_str: str) -> Callable[..., Any]:\n ops = RING_SIZE_TO_OP.get(ring_size, None)\n\n if ops is None:\n raise ValueError(f\"Do not have operations for ring size {ring_size}\")\n\n op = ops.get(op_str, None)\n if op is None:\n raise ValueError(\n f\"Operator {op_str} does not exist for ring size {ring_size}\"\n )\n\n return op", "def parse_dispatch_type(dispatch_string: str):\n if not dispatch_string:\n return None\n\n dispatch_string = dispatch_string.lower().strip()\n\n if dispatch_string == \"load\":\n return DispatchType.LOAD\n\n if dispatch_string == \"generating\":\n return DispatchType.GENERATOR\n\n if dispatch_string == \"generator\":\n return DispatchType.GENERATOR\n\n raise Exception(\"Unknown dispatch type: {}\".format(dispatch_string))", "def opstring(op_string_name, op_type=None):\n \n name_list = op_string_name.strip().split()\n\n if name_list == [] or (len(name_list) == 1 and name_list[0] in ['1', '1.0', 'I']):\n if op_type is None:\n raise ValueError('When specifying an identity operator, you need to provide an op_type.')\n else:\n return OperatorString([], [], op_type)\n \n if name_list[0] in VALID_OPS:\n orbital_operators = name_list[0::2]\n orbital_labels = np.array(name_list[1::2], dtype=int)\n prefactor = 1.0\n elif name_list[0] in ['1', '1.0', '1j'] and name_list[1] in VALID_OPS:\n orbital_operators = name_list[1::2]\n orbital_labels = np.array(name_list[2::2], dtype=int)\n prefactor = complex(name_list[0])\n else:\n raise ValueError('Invalid name for an operator string: {}'.format(op_string_name))\n\n if orbital_operators[-1] in PAULI_OPS:\n deduced_op_type = 'Pauli'\n elif orbital_operators[-1] in MAJORANA_OPS:\n deduced_op_type = 'Majorana'\n elif orbital_operators[-1] in FERMION_OPS:\n deduced_op_type = 'Fermion'\n else:\n raise ValueError('Invalid name for an operator string: {}'.format(orbital_operators[-1]))\n\n if op_type is not None and deduced_op_type != op_type:\n raise ValueError('The input op_type and the deduced op_type of the OperatorString do not agree: {} {}'.format(op_type, deduced_op_type))\n \n return OperatorString(orbital_operators, orbital_labels, deduced_op_type, prefactor=prefactor)", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def endpoint_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def os_type(self) -> Optional[pulumi.Input[Union[str, 'OSType']]]:\n return pulumi.get(self, \"os_type\")", "def _GetStatusFromOp(op):\n for prop in op.response.additionalProperties:\n if prop.key == 'status':\n return prop.value.string_value\n return 'UNKNOWN'", "def command_type(self):\n t = self.current_command.split(' ')[0]\n if t in commands.get('arithmetic'):\n return 'C_ARITHMETIC'\n\n if t not in commands:\n raise ValueError('{} is an invalid command type.'.format(t))\n\n return commands.get(t)", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def endpoint_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"endpoint_type\")", "def _GetTpuOperationRef(self, operation):\n return resources.REGISTRY.ParseRelativeName(\n operation.name, collection='tpu.projects.locations.operations')", "def path_to_operation(path, verb):\n # type: (unicode, unicode) -> unicode\n character_map = {\n ord(\"{\"): None,\n ord(\"}\"): None,\n ord(\"_\"): u\"/\"\n }\n if path == u\"/\":\n operation = ROOT_OPERATION\n else:\n sanitised = path.translate(character_map)\n operation = u\"_\".join(p for p in sanitised.split(\"/\"))\n\n return \"{}_{}\".format(verb, operation)", "def endpoint_type(self) -> Optional[pulumi.Input[Union[str, 'EndpointType']]]:\n return pulumi.get(self, \"endpoint_type\")", "def command_type(self):\n if self._is_push_command():\n command_type = Parser.C_PUSH\n elif self._is_pop_command():\n command_type = Parser.C_POP\n elif self._is_arithmetic_command():\n command_type = Parser.C_ARITHMETIC\n elif self._is_comment_line() or self._is_blank_line():\n command_type = Parser.IGNORE\n else:\n command_type = Parser.INVALID\n return command_type", "def _get_operation_from_ajax_url(ajax_url):\n return ajax_url.split('/')[1]", "def os_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"os_type\")", "def get_operation_name(operation):\n if operation == banking_pb2.QUERY:\n return 'QUERY'\n if operation == banking_pb2.DEPOSIT:\n return 'DEPOSIT'\n if operation == banking_pb2.WITHDRAW:\n return 'WITHDRAW'", "def process_type(self, swagger_type, context):\n pass", "def operation_definition(servicename, operationname):\n with open(service_definition_file(servicename), encoding=\"UTF-8\") as definition_file:\n service_definition = json.loads(definition_file.read())\n return service_definition['operations'][operationname]", "def get_target_object_type(self, data):\n if data.get(\"jsonmodel_type\") == \"archival_object\":\n if self.aspace_helper.has_children(data[\"uri\"]):\n return \"archival_object_collection\"\n return data.get(\"jsonmodel_type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type\")", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")", "def _extract_operation(self, words):\n operation = self.client\n\n for word in words:\n attr = getattr(operation, word, None)\n if attr is None:\n return operation, words[-1]\n\n operation = attr\n\n return operation, \"\"", "def odata_type(self) -> str:\n return pulumi.get(self, \"odata_type\")", "def odata_type(self) -> str:\n return pulumi.get(self, \"odata_type\")", "def odata_type(self) -> str:\n return pulumi.get(self, \"odata_type\")", "def odata_type(self) -> str:\n return pulumi.get(self, \"odata_type\")", "def odata_type(self) -> str:\n return pulumi.get(self, \"odata_type\")", "def _process_operation(operation_pb):\n match = _OPERATION_NAME_RE.match(operation_pb.name)\n if match is None:\n raise ValueError('Operation name was not in the expected '\n 'format after instance creation.',\n operation_pb.name)\n location_id = match.group('location_id')\n operation_id = int(match.group('operation_id'))\n\n request_metadata = _parse_pb_any_to_native(operation_pb.metadata)\n operation_begin = _pb_timestamp_to_datetime(\n request_metadata.request_time)\n\n return operation_id, location_id, operation_begin", "def operation(self, name):\n\n try:\n return self.operations[name]\n except KeyError:\n return self.operation_not_found(name)", "def operation(self) -> str:\n return self._operation", "def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def _get_op_str(self):\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n\n if type(self.operation) is str:\n op_name_str = self.operation\n else:\n op_name_str = self.operation.__name__\n\n try:\n getattr(cca_trans, op_name_str)\n op_str = f\"cca_trans.{op_name_str}\"\n except AttributeError:\n try:\n getattr(cca_out, op_name_str)\n op_str = f\"cca_out.{op_name_str}\"\n except AttributeError:\n op_str = op_name_str\n\n return op_str", "def _get_resource_type(self, resource_path):\n remove_query = resource_path.split('?')[0] # remove query parameters\n remove_slashes = remove_query.strip('/') # strip leading and trailing slashes\n return remove_slashes.rstrip('s') # remove trailing 's'", "def _get_service_type(service):\n\n return service.split(':')[3]", "def resolve_operation_id(self, operation):\n oid = operation.operation_id\n if \".\" in oid:\n oid = oid.split(\".\")[-1]\n # Append the operation function to this module.\n setattr(self.me, oid, noop)\n return self.me.__name__ + \".\" + oid", "def Type(self):\n if self.currtok[1].name in {\"INT\", \"FLOAT\", \"BOOLEAN\"}:\n type = self.currtok[0]\n self.currtok = next(self.tg)\n return type\n raise SLUCSyntaxError(\"ERROR: Unexpected token {0} on line {1}\".\n format(self.currtok[1], str(self.currtok[2] - 1)))", "def op(self) -> str:\n return self._node.get(\"op\")", "def resolve_type(value: t.Any) -> t.Any:\n value = str(value).strip()\n if value.lower() == \"true\":\n return True\n elif value.lower() == \"false\":\n return False\n elif value.lower() == \"none\":\n return None\n else:\n # attempt to cast\n try:\n return int(value)\n except:\n pass\n try:\n return float(value)\n except:\n pass\n # attempt to parse\n try:\n return literal_eval(value)\n except ValueError:\n pass\n except SyntaxError: # happens with single topics starting with '/'\n pass\n # unparseable, return as str\n return value", "def find_label_operator(query):\n # If you apply any changes into these regex patterns, please update the JSON schema consequently at:\n # depc/schemas/v1_config.json\n # Rule\n regex = r\"^rule.(.+|'.+')$\"\n match = re.search(regex, query)\n if match:\n rule = match.group(1)\n if rule.startswith(\"'\"):\n rule = rule[1:-1]\n return RuleOperator, {\"rule\": rule}\n\n # Operation AND, OR (no argument)\n regex = (\n r\"^operation.(AND|OR)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n )\n match = re.search(regex, query)\n if match:\n # Transform '[Foo, Bar]' into a Python list\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation ATLEAST (integer argument)\n regex = r\"^operation.(ATLEAST\\([0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation RATIO (float integer less than 0)\n regex = r\"^operation.(RATIO\\(0.[0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Aggregation AVERAGE, MIN, MAX\n regex = r\"^aggregation.(AVERAGE|MIN|MAX)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return AggregationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # We validate the schema before save it in database,\n # it's not possible to go here.\n return None, None", "def action_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action_type\")" ]
[ "0.67735726", "0.6728999", "0.6351823", "0.63367105", "0.59829354", "0.59829354", "0.59829354", "0.59829354", "0.5836189", "0.5831232", "0.5829081", "0.5590442", "0.5587668", "0.55367893", "0.5521317", "0.55096656", "0.5479498", "0.5452105", "0.5383543", "0.5341075", "0.530389", "0.5299449", "0.5298689", "0.52676916", "0.52676916", "0.52222395", "0.52138656", "0.520446", "0.520236", "0.51969385", "0.518336", "0.512934", "0.5128193", "0.51269054", "0.5120843", "0.5091152", "0.5068211", "0.5055467", "0.5033564", "0.5029983", "0.5007074", "0.49862167", "0.4985389", "0.49771482", "0.49771482", "0.49771482", "0.49771482", "0.49771482", "0.49722472", "0.49704602", "0.49639165", "0.496386", "0.4957306", "0.49352437", "0.49279067", "0.4917995", "0.49116734", "0.4887451", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48580796", "0.48499364", "0.48499364", "0.48458177", "0.4843337", "0.4843337", "0.4843337", "0.4843337", "0.4843337", "0.48383614", "0.48205996", "0.48135525", "0.4810422", "0.48081034", "0.4806997", "0.48020306", "0.47971472", "0.4796066", "0.47936144", "0.47886378", "0.47849038", "0.47707582" ]
0.79709095
0
Parse all operations for a given operation type.
def parse_operations(self, operation_type: str) -> Tuple[Operation]: if operation_type is None: return tuple() query_type: SchemaType = self.types.get(operation_type) if query_type is None: return tuple() return tuple([Operation(f, self.settings) for f in query_type.fields])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown operation\")", "def __operations(self, conf):\n result = \"\"\"## Operations [back to top](#toc)\nThe operations that this API implements are:\n\"\"\"\n ops = \"\\n\"\n\n for op in conf[\"conf_json\"][1:]:\n params = []\n for p in findall(PARAM_NAME, op[\"url\"]):\n p_type = \"str\"\n p_shape = \".+\"\n if p in op:\n p_type, p_shape = findall(\"^\\s*([^\\(]+)\\((.+)\\)\\s*$\", op[p])[0]\n\n params.append(\n \"<em>%s</em>: type <em>%s</em>, regular expression shape <code>%s</code>\"\n % (p, p_type, p_shape)\n )\n result += \"\\n* [%s](#%s): %s\" % (\n op[\"url\"],\n op[\"url\"],\n op[\"description\"].split(\"\\n\")[0],\n )\n ops += \"\"\"<div id=\"%s\">\n<h3>%s <a href=\"#operations\">back to operations</a></h3>\n\n%s\n\n<p class=\"attr\"><strong>Accepted HTTP method(s)</strong> <span class=\"attr_val method\">%s</span></p>\n<p class=\"attr params\"><strong>Parameter(s)</strong> <span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Result fields type</strong><span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Example</strong><span class=\"attr_val\"><a target=\"_blank\" href=\"%s\">%s</a></span></p>\n<p class=\"ex attr\"><strong>Exemplar output (in JSON)</strong></p>\n<pre><code>%s</code></pre></div>\"\"\" % (\n op[\"url\"],\n op[\"url\"],\n markdown(op[\"description\"]),\n \", \".join(split(\"\\s+\", op[\"method\"].strip())),\n \"</li><li>\".join(params),\n \", \".join(\n [\n \"%s <em>(%s)</em>\" % (f, t)\n for t, f in findall(FIELD_TYPE_RE, op[\"field_type\"])\n ]\n ),\n conf[\"website\"] + conf[\"base_url\"] + op[\"call\"],\n op[\"call\"],\n op[\"output_json\"],\n )\n return markdown(result) + ops", "def get_op_types(self):\n return self.cur_config['ops']", "def _pull_argops(op_dict):\n import inspect\n out = []\n keys = op_dict.keys()\n keys.sort() # Not necessary, but makes scanning the printout easier\n for k in keys:\n # Create a dictionary that will be used to fill the 'code' template\n d = {}\n d[\"enum_name\"] = enum_name = op_dict[k][3:] # <NAME>\n d[\"funct_name\"] = \"%s\" % enum_name.lower() # <name>\n class_name = \"%s4args\" % enum_name\n klass = getattr(_type, class_name, None)\n if klass is None:\n # This operation takes no arguments\n d[\"funct_args\"] = d[\"create_args\"] = d[\"set_args\"] = \"\"\n else:\n if type(klass) is dict:\n arg_list = \"enum_value\"\n d[\"create_args\"] = \"args = enum_value\"\n else:\n arg_list = \", \".join(inspect.getargspec(klass.__init__)[0][1:])\n d[\"create_args\"] = \"args = _type.%s(%s)\" % (class_name, arg_list)\n d[\"funct_args\"] = arg_list\n if enum_name.startswith(\"CB_\"):\n d[\"set_args\"] = \"opcb%s=args\" % enum_name.lower()[3:]\n else:\n d[\"set_args\"] = \"op%s=args\" % enum_name.lower()\n if enum_name.startswith(\"CB_\"):\n d[\"argop\"] = \"nfs_cb_argop4\"\n else:\n d[\"argop\"] = \"nfs_argop4\"\n out.append(d)\n return out", "def operations_map(self):\n # type: () -> Dict[Union[str, None], str]\n document_ast = self.document_ast\n operations = {} # type: Dict[Union[str, None], str]\n for definition in document_ast.definitions:\n if isinstance(definition, ast.OperationDefinition):\n if definition.name:\n operations[definition.name.value] = definition.operation\n else:\n operations[None] = definition.operation\n\n return operations", "def list_operations():", "def _extract_operation(self, words):\n operation = self.client\n\n for word in words:\n attr = getattr(operation, word, None)\n if attr is None:\n return operation, words[-1]\n\n operation = attr\n\n return operation, \"\"", "def parse(self, data):\n val = data.get(self.name, missing)\n if not isinstance(val, dict):\n return (self.operators['$eq'], self.field.deserialize(val)),\n\n return tuple(\n (\n self.operators[op],\n (self.field.deserialize(val)) if op not in self.list_ops else [\n self.field.deserialize(v) for v in val])\n for (op, val) in val.items() if op in self.operators\n )", "def split_terms(self, operation):\n return [self.format_number(t) for t in operation.split('/')]", "def find_operations(self, span_kind: str, service: str) -> List[Operation]:\n match_query = [{\"process.serviceName\": service}]\n if span_kind != \"\":\n tag_query = {\"tags\": {\"$elemMatch\": {\"key\": SPAN_KIND_NAME, \"vStr\": span_kind}}}\n match_query.append(tag_query)\n match_stage = {\"$and\": match_query}\n aggregation = [\n {\"$match\": match_stage},\n {\"$unwind\": {\"path\": \"$tags\"}},\n {\"$match\": {\"tags.key\": \"span.kind\"}},\n {\"$group\": {\"_id\": {\"operationName\": \"$operationName\", \"tags\": \"$tags\"}}},\n {\"$replaceRoot\": {\"newRoot\": \"$_id\"}},\n ]\n results = self.collection.aggregate(aggregation)\n return [\n Operation(name=result[\"operationName\"], span_kind=result[\"tags\"][\"vStr\"])\n for result in results\n ]", "def parse_command(self, command):\n \n #chcek operation type\n mod_type = re.findall('.*(rotate|translate|zoom|make|time).*',command)[0]\n \n #for each operation type recover necessary parameters\n if mod_type == 'rotate':\n angle = int(re.findall('.*rotate by (\\d+).*', command)[0])\n axis = list(map(int,re.findall('.*around \\((\\d+)\\,(\\d+)\\,(\\d+).*', command)[0]))\n\n #if the rotation angle is large split it into 3 to ensure the rotation is accomplished fully\n if angle >= 180:\n new_q = self.q.create_from_axis_angle(angle/3*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False)\n result = [(mod_type, new_q),(mod_type, new_q),(mod_type, new_q)]\n else:\n new_q = self.q.create_from_axis_angle(angle*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False)\n result = (mod_type, new_q)\n\n elif mod_type == 'zoom':\n factor = float(re.findall('.*factor of (\\d*\\.*\\d+).*', command)[0])\n result = (mod_type, factor)\n\n elif mod_type == 'translate':\n translate = np.array(list(map(int,re.findall('.*by \\((\\-*\\d+)\\,(\\-*\\d+)\\,(\\-*\\d+).*', command)[0])))\n result = (mod_type, translate)\n\n elif mod_type == 'make':\n layer = int(re.findall('.*make layer (\\d+).*', command)[0])\n vis_status = command.split()[-1]\n if vis_status == 'invisible':\n result = ('vis', layer, False)\n else:\n result = ('vis', layer, True)\n \n elif mod_type == 'time':\n time_shift = int(re.findall('.*by (\\-*\\d+).*', command)[0])\n result = (mod_type, time_shift)\n return result", "def parse(seq):\n\tdef eval_expr(z, list):\n\t\treturn reduce(lambda s, (f, x): f(s, x), list, z)\n\tunarg = lambda f: lambda x: f(*x)\n\tconst = lambda x: lambda _: x # like ^^^ in Scala\n\n\ttokval = lambda x: x.value # returns the value of a token\n\top = lambda s: a(Token('Op', s)) >> tokval # return the value if token is Op\n\top_ = lambda s: skip(op(s)) # checks if token is Op and ignores it\n\ttoktype = lambda t: some(lambda x: x.type == t) >> tokval # checks type of token\n\tdef lst(h,t):\n\t\treturn [h,] + t\n\tcall = lambda x: Call(x[0], x[1])\n\n\tmakeop = lambda s, f: op(s) >> const(f)\n\n\tadd = makeop('+', Plus)\n\tsub = makeop('-', Minus)\n\tmul = makeop('*', Times)\n\tdiv = makeop('/', Div)\n\n\tdef make_const(i):\n\t\treturn const(int(i))\n\n\tnumber = toktype('Number') >> Const\n\n\tmul_op = mul | div\n\tadd_op = add | sub\n\n\tfactor = with_forward_decls(lambda:\n\t\tnumber | op_('(') + exp + op_(')') | call)\n\tterm = factor + many(mul_op + factor) >> unarg(eval_expr)\n\texp = term + many(add_op + term) >> unarg(eval_expr)\n\texp_lst = with_forward_decls(lambda:\n\t\texp + many(op_(',') + exp) >> unarg(lst))\n\tcall = toktype('Name') + op_('(') + exp_lst + op_(')') >> call\n\n\treturn exp.parse(seq)", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def find_node_by_op_type(self, op_type: str) -> List[Operator]:\n return list(self.__op_type_list[op_type])", "def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")", "def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")", "def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")", "def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")", "def parse(self, commands):\n raise NotImplementedError()", "def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)", "def group_operations(text: str) -> list:\n \n parts = text_to_parts(text)\n \n def modify_list_group_by(operators):\n i = 0\n len_parts = len(parts)\n while i < len_parts:\n part = parts[i]\n if part[0] in operators:\n part0 = parts.pop(i-1)\n operation = parts.pop(i-1)\n part1 = parts.pop(i-1)\n parts.insert(i-1, \"(\" + part0 + operation + part1 + \")\")\n i -= 1\n len_parts -= 2\n i += 1\n \n for i, part in enumerate(parts):\n if part[0] == \"(\":\n parts[i] = \"\".join(group_operations(part[1:-1]))\n \n modify_list_group_by(\"*/\")\n modify_list_group_by(\"+-\")\n\n return parts", "def parse(self, ins):\n if type(ins)!=Instr:\n raise Exception(\"You are parsing object that isn't a instruction\")\n self.type = ins.instr\n if ins.instr in control_instructions:\n self.parse_control(ins)\n elif ins.instr in loadstore_instructions:\n self.parse_ls(ins) \n elif ins.instr in intarithm_instructions :\n self.parse_int(ins)\n elif ins.instr in floatarithm_instructions:\n self.parse_float(ins)\n elif ins.instr in misc_instructions:\n self.parse_misc(ins)\n else:\n self.parse_unknown(ins)", "def parse_operand(binary, module, kind):\n if kind == 'Id':\n return [parse_id(binary, module)]\n elif kind == 'LiteralNumber':\n return [binary.get_next_word()]\n elif kind == 'LiteralString':\n return [parse_literal_string(binary)]\n elif kind == 'OptionalLiteralString':\n word = binary.get_next_word(peek=True, accept_eol=True)\n if word is None:\n return []\n return [parse_literal_string(binary)]\n elif kind == 'VariableLiteralNumber' or kind == 'OptionalLiteralNumber':\n operands = []\n while True:\n word = binary.get_next_word(accept_eol=True)\n if word is None:\n return operands\n operands.append(word)\n elif kind in ['VariableId', 'OptionalId']:\n operands = []\n while True:\n tmp_id = parse_id(binary, module, accept_eol=True)\n if tmp_id is None:\n return operands\n operands.append(tmp_id)\n elif kind == 'VariableIdLiteralPair':\n operands = []\n while True:\n tmp_id = parse_id(binary, module, accept_eol=True)\n if tmp_id is None:\n return operands\n operands.append(tmp_id)\n word = binary.get_next_word()\n operands.append(word)\n elif kind == 'VariableLiteralIdPair':\n operands = []\n while True:\n word = binary.get_next_word(accept_eol=True)\n if word is None:\n return operands\n operands.append(word)\n tmp_id = parse_id(binary, module)\n operands.append(tmp_id)\n elif kind == 'OptionalMemoryAccessMask':\n val = binary.get_next_word(accept_eol=True)\n if val is None:\n return []\n result = expand_mask(kind[8:], val)\n try:\n aligned_idx = result.index('Aligned')\n except ValueError:\n pass\n else:\n result[aligned_idx] = (\n 'Aligned', binary.get_next_word(accept_eol=False))\n return [result]\n\n elif kind[:8] == 'Optional' and kind[-4:] == 'Mask':\n val = binary.get_next_word(accept_eol=True)\n if val is None:\n return []\n return [expand_mask(kind[8:], val)]\n elif kind in ir.MASKS:\n val = binary.get_next_word()\n return [expand_mask(kind, val)]\n elif kind in spirv.spv:\n val = binary.get_next_word()\n constants = spirv.spv[kind]\n for name in constants:\n if constants[name] == val:\n return [name]\n raise ParseError('Unknown \"' + kind + '\" value' + str(val))\n\n raise ParseError('Unknown kind \"' + kind + '\"')", "def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]", "def extract_operators(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n\n elif isinstance(e, Mul):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n return list(set(ops))", "def print_operation(operations):\n for operation in operations:\n print ' ',\n change_color_by_tag(operation)\n if operation['ExtAttributes']:\n print_extattributes_of_member(operation['ExtAttributes'])\n print operation['Type'],\n if operation['Arguments']:\n print operation['Name'],\n print_argument(operation['Arguments'])\n else:\n print operation['Name']", "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def operation_list(self):\n return OPERATION_LIST", "def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)", "def _op(op):\n def _process(self, ty, args=None, result=None, **metadata):\n if args is None:\n args = []\n assert ty is not None\n assert isinstance(args, list), args\n assert not any(arg is None for arg in flatten(args)), args\n result = Op(op, ty, args, result)\n if metadata:\n result.add_metadata(metadata)\n self._insert_op(result)\n return result\n\n def _process_void(self, *args, **kwds):\n result = kwds.pop('result', None)\n op = _process(self, types.Void, list(args), result)\n if kwds:\n op.add_metadata(kwds)\n return op\n\n if ops.is_void(op):\n build_op = _process_void\n else:\n build_op = _process\n\n if config.op_verify:\n build_op = op_verifier(build_op)\n\n return build_op", "def _parse_operator(self) -> Tuple:\n string = \"\".join(self.buffer)\n negated = string.endswith(\"not\")\n if not string.strip(\"\\t\\n\\r \"):\n params = False, 1, 1\n elif string.strip() == \"not\":\n params = True, 1, 1\n elif OPTION_RE.match(string):\n params = negated, 0, 1\n elif ZERO_PLUS_RE.match(string):\n params = negated, 0, self.inf\n elif ONE_PLUS_RE.match(string):\n params = negated, 1, self.inf\n elif AT_LEAST_RE.match(string):\n m = AT_LEAST_RE.match(string)\n params = negated, int(m.group(\"min\")), self.inf # type: ignore\n elif AT_MOST_RE.match(string):\n m = AT_MOST_RE.match(string)\n params = negated, 0, int(m.group(\"max\")) # type: ignore\n elif RANGE_RE.match(string):\n m = RANGE_RE.match(string)\n min_ = int(m.group(\"min\")) # type: ignore\n max_ = int(m.group(\"max\")) # type: ignore\n params = negated, min_, max_\n else:\n tail_lines = 0\n while string[-(tail_lines + 1)] == \"\\n\":\n tail_lines += 1\n raise ValueError(\n f'Can\\'t parse \"{string}\" as an operator'\n f\"at line {self.line_number - tail_lines}.\"\n )\n return params", "def clean_input(operation):\r\n num = ''\r\n statement = []\r\n \r\n for element in operation:\r\n if element.isnumeric():\r\n num += element\r\n elif element in OPERATORS:\r\n statement.append(float(num))\r\n statement.append(element)\r\n num = ''\r\n statement.append(float(num))\r\n\r\n return statement", "def find_label_operator(query):\n # If you apply any changes into these regex patterns, please update the JSON schema consequently at:\n # depc/schemas/v1_config.json\n # Rule\n regex = r\"^rule.(.+|'.+')$\"\n match = re.search(regex, query)\n if match:\n rule = match.group(1)\n if rule.startswith(\"'\"):\n rule = rule[1:-1]\n return RuleOperator, {\"rule\": rule}\n\n # Operation AND, OR (no argument)\n regex = (\n r\"^operation.(AND|OR)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n )\n match = re.search(regex, query)\n if match:\n # Transform '[Foo, Bar]' into a Python list\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation ATLEAST (integer argument)\n regex = r\"^operation.(ATLEAST\\([0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation RATIO (float integer less than 0)\n regex = r\"^operation.(RATIO\\(0.[0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Aggregation AVERAGE, MIN, MAX\n regex = r\"^aggregation.(AVERAGE|MIN|MAX)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return AggregationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # We validate the schema before save it in database,\n # it's not possible to go here.\n return None, None", "def parse_equations(eqs, ops):\n eeqs = []\n prop_list = ['unit of', 'commutative', 'associative', 'distributes over', 'inverse of', \n 'annihilates', 'idempotent', 'absorbs', 'absorptive', 'involutive']\n props = []\n for eq in eqs:\n if not any_in(prop_list, eq):\n eeqs.append(Eq.parse_eq(eq, ops))\n else:\n if 'unit of' in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*unit of\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Unit(unit, op, side))\n elif \"annihilates\" in eq: \n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*annihilates\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Annih(unit, op, side))\n elif \"distributes over\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*distributes over\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Dist(op1, op2, side))\n elif \"absorbs\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*absorbs\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Absorb(op1, op2, side))\n elif \"inverse of\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*inverse of\\s+'(\\w+)'\\s+with\\s+'(\\w+)'$\", eq)\n uop, side, op, unit = m.groups()\n props.append(Inverse(uop, op, unit, side))\n elif \"absorptive\" in eq:\n m = re.search(\"^'(\\w+)'\\s+and\\s+'(\\w+)'\\s+absorptive$\", eq)\n op1, op2 = m.groups()\n props.append(Absorb(op1, op2, None))\n props.append(Absorb(op2, op1, None))\n else:\n m = re.search(\"^'(\\w+)'\\s+(.*)$\", eq)\n op = m.group(1)\n kws = splitstrip(m.group(2), \",\")\n if 'associative' in kws:\n props.append(Assoc(op))\n if 'commutative' in kws:\n props.append(Comm(op))\n if 'idempotent' in kws:\n props.append(Idemp(op))\n if 'involutive' in kws:\n props.append(Invol(op))\n\n return eeqs, props", "def calc_all_myOperations():\n while len(myOperations)>1:\n f_nb = float(myOperations[0])\n operator = myOperations[1]\n s_nb = float(myOperations[2])\n \n if operator == \"+\": \n result = f_nb + s_nb\n elif operator== \"-\":\n result = f_nb - s_nb\n elif operator== \"*\":\n result = f_nb * s_nb\n elif operator== \"/\":\n result = f_nb / s_nb\n elif operator== \"% of \":\n print(myOperations)\n result = f_nb/100*s_nb\n \n round_result=round(result,5)\n del myOperations[:2]\n myOperations[0]=round_result\n \n clear_myOperations() \n return round_result", "def get_op_types_by_precision(self, precision):\n assert precision in list(self.cur_config['ops'].keys())\n\n return self.cur_config['ops'][precision]", "def gen_parse_op_text(conversation):\n ret_text = \"\"\n conv_parse_ops = conversation.parse_operation\n for i in range(1, len(conv_parse_ops)):\n ret_text += conv_parse_ops[i] + \" \"\n ret_text = ret_text[:-1]\n return ret_text", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def generate_operations(self):\n combinations = self.COMBINATIONS.items()[:self.limit]\n for (term1, term2), type in combinations:\n yield (term1, term2, type)", "def add_operations_from(self, obj):\n\n for name in dir(obj):\n op = getattr(obj, name)\n if isinstance(op, Operation):\n self.add_operation(op)", "def parse_single_op_string(opstring) :\n ops = {'+' : \"plus\",\n '?' : \"opt\" , \n '*' : \"star\"}\n return '('.join(ops[c] for c in reversed(opstring)) + '('", "def find_ops(optype):\n gd = tf.get_default_graph()\n return [var for var in gd.get_operations() if var.type == optype]", "def get_current_operations_for_gui(cls, operation_types=None):\n db = cls._core.get_db()\n #TODO CHECK HOW LISTS ARE HANDLED IN FDB\n if operation_types is not None and type(operation_types) == list:\n stmnt = \"SELECT OPE_ID, OPE_OPE_PARENT, OPE_INVOKED, OPE_TYPE, OPE_STATUS FROM OPERATIONS WHERE OPE_TYPE IN (?) ORDER BY OPE_INVOKED ;\"\n cur = db.query(cls._core,stmnt,(operation_types))\n else:\n stmnt = \"SELECT OPE_ID, OPE_OPE_PARENT, OPE_INVOKED, OPE_TYPE, OPE_STATUS FROM OPERATIONS ORDER BY OPE_INVOKED ;\"\n cur = db.query(cls._core,stmnt)\n ret = {}\n for row in cur.fetchallmap():\n operation = cls.restore_operation(row)\n custom_values = operation.get_values()\n\n ret[row[\"OPE_ID\"]] = {\"id\":row[\"OPE_ID\"],\n \"parent\":row[\"OPE_OPE_PARENT\"],\n \"invoked\":str(row[\"OPE_INVOKED\"]),\n \"type\":row[\"OPE_TYPE\"],\n \"status\":row[\"OPE_STATUS\"],\n \"data\":custom_values}\n return ret", "def parseAction(self, action):\n action = self.AGENT_TYPES[action]\n\n\n full_action = {}\n full_action[\"action\"] = action\n if action == \"eli-kw\":\n keywords = self.dataset.getSuggestedKeywords()\n full_action[\"keywords\"] = keywords[:self.N]\n elif action == \"info\" or action == \"info-all\":\n full_action[\"function\"] = self.current_function\n\n elif action == \"sugg\" or action == \"sugg-info-all\":\n top_hit = self.dataset.getTopHits(1)\n if not top_hit:\n full_action[\"action\"] = \"eli-query\"\n else:\n functions = self.dataset.getTopHits(1, self.result_index)\n if functions:\n full_action[\"function\"] = functions[0]\n else:\n full_action[\"function\"] = \"\"\n\n self.result_index += 1\n\n elif action == \"sugg-all\":\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n\n elif action == \"change-page\":\n self.result_index += self.K\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n return full_action", "def ExtractOperations(toolF):\n return [o[\"uri\"] for o in toolF[\"operation\"]]", "def parse_action(element: Element) -> ActionType:\n # edit is a special type of action for strings\n if \"edit\" in element.attributes:\n if element.attributes[\"edit\"] == \"append\":\n return edit_append\n raise error_at(element)(f'invalid edit=\"{element.attributes[\"edit\"]}\"')\n # default action is replace\n action = element.attributes.get(\"action\", \"replace\")\n if action == \"replace\":\n return replace\n if action == \"append\":\n return append\n if action == \"delete\":\n return delete\n if action == \"merge\":\n return merge\n raise error_at(element)('invalid action=\"{:s}\".'.format(action))", "def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr", "def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op", "def _parse_op_nodes(self, node_protos):\n logger.debug(\"Start to parse op nodes from proto.\")\n for topological_index, node_proto in enumerate(node_protos):\n if not node_proto.name:\n logger.warning(\"Finding a node with an empty name will not save it.\")\n continue\n\n self._parse_op_node(topological_index, node_proto)", "def calculator_ops(calc_str):\n return re.findall(r\"\\(([+-*/]\\s+\\d+\\s+\\d+)\\)\", calc_str)", "def _process_op_fetches(self, op_fetches):\n if op_fetches is None:\n return []\n\n if not isinstance(op_fetches, (list, tuple)):\n op_fetches = [op_fetches]\n\n fetches = []\n for fetch in op_fetches:\n if isinstance(fetch, ops.Operation):\n fetches.append(fetch)\n elif isinstance(fetch, tensor_lib.Tensor):\n fetches.append(fetch.op)\n else:\n logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %\n fetch)\n return fetches", "def restore_operation(cls, operation_record):\n classname = operation_record[\"OPE_TYPE\"]\n module = \"\" #TODO Implement modulename from database if Operation belongs to Module\n is_operation_of_module = False\n exec \"\"\"\ntry:\n type(%(class)s)\nexcept NameError,e:\n is_operation_of_module = True\"\"\"%{'class':classname}\n\n if is_operation_of_module:\n exec \"\"\"\nfrom %(module)s import %(class)s\noperation = %(class)s(cls._core)\"\"\"%{'class':classname,'module':module}\n else:\n exec \"\"\"\noperation = %(class)s(cls._core)\"\"\"%{'class':classname}\n\n operation.set_id(operation_record['OPE_ID'])\n db = cls._core.get_db()\n stmnt = \"SELECT OPD_KEY, OPD_VALUE, OPD_TYPE FROM OPERATIONDATA WHERE OPD_OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation_record[\"OPE_ID\"],))\n for row in cur.fetchallmap():\n val = row[\"OPD_VALUE\"]\n exec \"\"\"val = %s(val)\"\"\"%row[\"OPD_TYPE\"]\n operation.set_value(row[\"OPD_KEY\"], val)\n return operation", "def __init__(self, operations = []):\n self.operations = operations", "def all_operations():\n return OperationHandler().get_all_classes()", "def get_operation_obect(self, method):\n pass", "def _parse_and_build_commands(self):\n for root in self.roots:\n for commands in root.iter('commands'):\n for command_element in commands.iter('command'):\n try:\n self._collect_command(command_element)\n\n except Exception as exception:\n command_name = GLGenerator.get_command_name(command_element)\n print('Error processing command {}: {}'.format(command_name, str(exception)))\n raise\n\n extension_name_max_len = 0\n for extension in self.extensions:\n extension_name_max_len = max(extension_name_max_len, len(extension))\n\n enum_value = 1\n declarations = []\n map_entries = []\n case_entries = []\n\n for extension in sorted(set(self.extensions)):\n quoted_extension = '\"' + extension + '\"'\n declaration = f' Extension_{extension:{extension_name_max_len}} = {enum_value:>6}'\n map_entry = ' g_extension_map.insert(std::pair<std::string, Extension>({0:{1}}, Extension::Extension_{2:{3}}));'.format(\n quoted_extension, extension_name_max_len + 2, extension, extension_name_max_len\n )\n case_entry = ' case Extension::Extension_{0:{1}}: return \"{0}\";'.format(\n extension, extension_name_max_len\n )\n declarations.append(declaration)\n map_entries.append (map_entry)\n case_entries.append(case_entry)\n enum_value += 1\n\n declarations.append(f' Extension_Count = {enum_value:>6}')\n self.extension_enum_declarations = ',\\n'.join(declarations)\n self.extension_map_entries = '\\n'.join(map_entries)\n self.extension_case_entries = '\\n'.join(case_entries)\n\n commands = set(self.command_list)\n\n commands = sorted(commands)\n\n command_name_max_len = 0\n for command in commands:\n command_name_max_len = max(command_name_max_len, len(command))\n\n enum_value = 1\n declarations = []\n map_entries = []\n case_entries = []\n for command in commands:\n declaration = f' Command_{command:{command_name_max_len}} = {enum_value:>6}'\n map_entry = ' g_command_map.insert(std::pair<std::string, Command>({0:{1}}, Command::Command_{2:{1}}));'.format(\n '\"' + command + '\"', command_name_max_len, command\n )\n case_entry = ' case Command::Command_{0:{1}}: return \"{0}\";'.format(\n command, command_name_max_len\n )\n declarations.append(declaration)\n map_entries.append (map_entry)\n case_entries.append(case_entry)\n enum_value += 1\n\n declarations.append(' Command_Count = {:>6}'.format(enum_value))\n self.command_enum_declarations = ',\\n'.join(declarations)\n self.command_map_entries = '\\n'.join(map_entries)\n self.command_case_entries = '\\n'.join(case_entries)", "def get_file_operations() -> dict:\n\n from FileWrangler.fileops.CompletelyReplace import CompletelyReplaceUIOperation\n from FileWrangler.fileops.Separator import SeparatorUIOperation\n from FileWrangler.fileops.PatternFinding import PatternExtractingUIOperation\n from FileWrangler.fileops.PathComponents import PathComponentsUIOperation\n operations = [\n CompletelyReplaceUIOperation(),\n SeparatorUIOperation(),\n PatternExtractingUIOperation(),\n PathComponentsUIOperation()\n ]\n return {x.name: x for x in operations}", "def test_operation_mode_expressions(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int m = 4\\nMZgate(0, 1) | [m*2, -1+m]\"\n )\n assert bb.operations == [{'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [8, 3]}]", "def convert_to_model(self, *args):\n operation_types_data, *_ = args\n return [OperationType(**operation_type) for operation_type in operation_types_data]", "def _execute_op(self, op):\n if op.op_type == Operation.Type.GET:\n if op.key in self._store:\n return Result.OK, self._store[op.key]\n else:\n return Result.NOT_FOUND, \"\"\n elif op.op_type == Operation.Type.PUT:\n self._store[op.key] = op.value\n return Result.OK, \"\"\n elif op.op_type == Operation.Type.DEL:\n self._store.pop(op.key, None)\n return Result.OK, \"\"\n else:\n raise ValueError(\"Invalid operation type\")", "def _execute_op(self, op):\n if op.op_type == Operation.Type.GET:\n if op.key in self._store:\n return Result.OK, self._store[op.key]\n else:\n return Result.NOT_FOUND, \"\"\n elif op.op_type == Operation.Type.PUT:\n self._store[op.key] = op.value\n return Result.OK, \"\"\n elif op.op_type == Operation.Type.DEL:\n self._store.pop(op.key, None)\n return Result.OK, \"\"\n else:\n raise ValueError(\"Invalid operation type\")", "def parse_options(type):\n # TODO: conflict_handler='resolve' is really required ??\n parser = ArgumentParser(conflict_handler='resolve')\n if type == 'backup':\n for name, description in _get_parameters_backup().items():\n parser.add_argument('--{}'.format(name),\n help=description, required=True)\n elif type == 'restore':\n for name, description in _get_parameters_restore().items():\n if name in _get_parameters_restore_optional().keys():\n \tparser.add_argument('--{}'.format(name), help=description, required=False)\n else:\n parser.add_argument('--{}'.format(name), help=description, required=True)\n elif type == 'blob_operation':\n for name, description in _get_parameters_blob_operation().items():\n parser.add_argument('--{}'.format(name),\n help=description, required=True)\n else:\n raise Exception('Use either \\'backup\\' or \\'restore\\' as type.')\n\n for key, credentials in _get_parameters_credentials().items():\n for name, description in credentials.items():\n parser.add_argument('--{}'.format(name), help=description)\n configuration = vars(parser.parse_args())\n assert configuration['type'] == 'online' or configuration['type'] == 'offline', \\\n '--type must be \\'online\\' or \\'offline\\''\n return configuration", "def _lex_operators(self):\n try:\n val = self._current\n type = Lexer._OPERATORS[self._current]\n self._advance()\n return Token(val, type)\n except KeyError:\n raise ParserError(self._expr,\n \"Encountered invalid token '{t}' at {i}\".format(\n t=self._current, i=self._index))", "def _parse_args(\n self,\n tokens: List[int],\n operator: type,\n num_args: int) -> Tuple[op.Expression, int]:\n if len(tokens[1:]) < num_args:\n return None, 1\n args = [\n self.token_op_table[tok]\n for tok in tokens[1:1+num_args]\n ]\n return operator(*args), 1+num_args", "def _process_operation(operation_pb):\n match = _OPERATION_NAME_RE.match(operation_pb.name)\n if match is None:\n raise ValueError('Operation name was not in the expected '\n 'format after instance creation.',\n operation_pb.name)\n location_id = match.group('location_id')\n operation_id = int(match.group('operation_id'))\n\n request_metadata = _parse_pb_any_to_native(operation_pb.metadata)\n operation_begin = _pb_timestamp_to_datetime(\n request_metadata.request_time)\n\n return operation_id, location_id, operation_begin", "def command_type(self):\n if self._is_push_command():\n command_type = Parser.C_PUSH\n elif self._is_pop_command():\n command_type = Parser.C_POP\n elif self._is_arithmetic_command():\n command_type = Parser.C_ARITHMETIC\n elif self._is_comment_line() or self._is_blank_line():\n command_type = Parser.IGNORE\n else:\n command_type = Parser.INVALID\n return command_type", "def operate(\n self, op: OperatorType, *other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def operations(self):\n return set(self._operation_map.keys())", "def operations(self):\n return set(self._operation_map.keys())", "def parse(operators, *term_strs):\n scope = Scope()\n rvs = []\n for ts in term_strs:\n p = Parser(operators, ts, scope)\n try:\n term = p.parse()\n except SyntaxError:\n print 'While parsing: %s' % ts\n raise\n rvs.append(term)\n rvs.append(scope)\n return tuple(rvs)", "def parse(self, tokens: List[str]) -> List:\r\n self._check_brackets(tokens)\r\n\r\n objs = self._parse_individual_tokens(tokens)\r\n objs = self._parse_functions(objs)\r\n objs = self._parse_matrices(objs)\r\n\r\n return objs", "def _calculate_operation_math(self, rule, left, right):\n\n # Attempt to keep integer data type for the result, when possible.\n if isinstance(left, IntegerRule) and isinstance(right, IntegerRule):\n result = self.evaluate_binop_math(rule.operation, left.value, right.value)\n if isinstance(result, list):\n return ListRule([IntegerRule(r) for r in result])\n return IntegerRule(result)\n\n # Otherwise the result is float.\n if isinstance(left, NumberRule) and isinstance(right, NumberRule):\n result = self.evaluate_binop_math(rule.operation, left.value, right.value)\n if isinstance(result, list):\n return ListRule([FloatRule(r) for r in result])\n return FloatRule(result)\n\n # This point should never be reached.\n raise Exception()", "def list_operations(\n self,\n ) -> Callable[\n [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_operations\" not in self._stubs:\n self._stubs[\"list_operations\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/ListOperations\",\n request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,\n response_deserializer=operations_pb2.ListOperationsResponse.FromString,\n )\n return self._stubs[\"list_operations\"]", "def list_operations(\n self,\n ) -> Callable[\n [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_operations\" not in self._stubs:\n self._stubs[\"list_operations\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/ListOperations\",\n request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,\n response_deserializer=operations_pb2.ListOperationsResponse.FromString,\n )\n return self._stubs[\"list_operations\"]", "def list_operations(\n self,\n ) -> Callable[\n [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_operations\" not in self._stubs:\n self._stubs[\"list_operations\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/ListOperations\",\n request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,\n response_deserializer=operations_pb2.ListOperationsResponse.FromString,\n )\n return self._stubs[\"list_operations\"]", "def orderOp(self, stream):\n ops = []\n if stream.op(OP_ORDER):\n for op in stream.ops['ops']:\n if op.startswith(OP_ORDER):\n ops.append( (self.leftStream(stream, op), op) )\n return ops", "def create() -> 'Tokenizer':\n token_op_table = [\n EOS,\n op.Concat,\n op.ConstStr,\n op.SubStr,\n op.GetSpan,\n op.Trim,\n ]\n\n # Nesting operators and their args get \"compacted\" into\n # \"primitive\" tokens\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetToken, type_, index))\n\n for case in op.Case:\n token_op_table.append((op.ToCase, case))\n\n for delim1 in op.DELIMITER:\n for delim2 in op.DELIMITER:\n token_op_table.append((op.Replace, delim1, delim2))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetUpto, dsl_regex))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetFrom, dsl_regex))\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetFirst, type_, index))\n\n for type_ in op.Type:\n token_op_table.append((op.GetAll, type_))\n\n # Primitive types\n\n for type_ in op.Type:\n token_op_table.append(type_)\n\n for boundary in op.Boundary:\n token_op_table.append(boundary)\n\n # Covers op.INDEX\n for position in range(op.POSITION[0], op.POSITION[1]+1):\n token_op_table.append(position)\n\n # This covers op.DELIMITER\n for character in op.CHARACTER:\n token_op_table.append(character)\n\n token_op_table = {\n token: op\n for token, op in enumerate(token_op_table)\n }\n\n op_token_table = {\n op: token\n for token, op in token_op_table.items()\n }\n\n assert len(token_op_table) == len(op_token_table)\n\n string_token_table = {\n char: token\n for token, char in enumerate(op.CHARACTER)\n }\n\n return Tokenizer(\n token_op_table=token_op_table,\n op_token_table=op_token_table,\n string_token_table=string_token_table,\n )", "def _parse(cls, tokens, *, get_params=False):\n\n\t\tif get_params:\n\t\t\tresult = []\n\t\telse:\n\t\t\tresult = None\n\n\t\tfor t in tokens:\n\t\t\tnew = None\n\t\t\tdone = False\n\n\t\t\tif t.kind == 'OPEN':\n\t\t\t\tnew = cls._parse(tokens)\n\t\t\telif t.kind in {'CLOSE', 'DOT'}:\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'LAMBDA':\n\t\t\t\tparams = cls._parse(tokens, get_params=True)\n\n\t\t\t\tif not params:\n\t\t\t\t\traise LambdaError('No parameters in lambda', t.line, t.pos)\n\n\t\t\t\tbody = cls._parse(tokens)\n\n\t\t\t\tif not body:\n\t\t\t\t\traise LambdaError('No body in lambda', t.line, t.pos)\n\n\t\t\t\tnew = Abs(params[-1], body, line=t.line, pos=t.pos)\n\n\t\t\t\tfor param in params[-2::-1]:\n\t\t\t\t\tnew = Abs(param, new, line=t.line, pos=t.pos)\n\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'EQUAL':\n\t\t\t\tvar = cls._parse(tokens)\n\n\t\t\t\tif not var:\n\t\t\t\t\traise LambdaError('No variable to assign to', t.line, t.pos)\n\n\t\t\t\tvalue = cls._parse(tokens)\n\n\t\t\t\tif not value:\n\t\t\t\t\traise LambdaError('No value to assign: ' + var.name, t.line, t.pos)\n\n\t\t\t\tnew = Ass(var, value, line=t.line, pos=t.pos)\n\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'QUERY':\n\t\t\t\tvalue = cls._parse(tokens)\n\n\t\t\t\tif not value:\n\t\t\t\t\traise LambdaError('No value to query', t.line, t.pos)\n\n\t\t\t\tnew = Que(value, line=t.line, pos=t.pos)\n\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'SYMBOL':\n\t\t\t\tnew = Var(t.value, line=t.line, pos=t.pos)\n\n\t\t\tif new is not None:\n\t\t\t\tif get_params:\n\t\t\t\t\tresult.append(new)\n\t\t\t\telif result is None:\n\t\t\t\t\tresult = new\n\t\t\t\telse:\n\t\t\t\t\t# Ensure that when the function and argument are output,\n\t\t\t\t\t# they are correctly parenthesized.\n\t\t\t\t\tif isinstance(result, (Abs, Ass, Que)):\n\t\t\t\t\t\tresult.surround_on_str = True\n\n\t\t\t\t\tif isinstance(new, App):\n\t\t\t\t\t\tnew.surround_on_str = True\n\n\t\t\t\t\tresult = App(result, new, line=new.line, pos=new.pos)\n\n\t\t\tif done:\n\t\t\t\tbreak\n\n\t\treturn result", "def get_op_type(self):\n return self.op_type", "def repair_operators(self) -> List[Tuple[str, _OperatorType]]:\n return list(self._r_ops.items())", "def list_operations(\n self,\n orderby=None, # type: Optional[List[Union[str, \"models.Enum29\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum30\"]]]\n expand=None, # type: Optional[List[str]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfPrintOperation\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfPrintOperation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_operations.metadata['url'] # type: ignore\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfPrintOperation', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def separate_operations(document_ast: DocumentNode) -> Dict[str, DocumentNode]:\n operations: List[OperationDefinitionNode] = []\n dep_graph: DepGraph = {}\n\n # Populate metadata and build a dependency graph.\n for definition_node in document_ast.definitions:\n if isinstance(definition_node, OperationDefinitionNode):\n operations.append(definition_node)\n elif isinstance(\n definition_node, FragmentDefinitionNode\n ): # pragma: no cover else\n dep_graph[definition_node.name.value] = collect_dependencies(\n definition_node.selection_set\n )\n\n # For each operation, produce a new synthesized AST which includes only what is\n # necessary for completing that operation.\n separated_document_asts: Dict[str, DocumentNode] = {}\n for operation in operations:\n dependencies: Set[str] = set()\n\n for fragment_name in collect_dependencies(operation.selection_set):\n collect_transitive_dependencies(dependencies, dep_graph, fragment_name)\n\n # Provides the empty string for anonymous operations.\n operation_name = operation.name.value if operation.name else \"\"\n\n # The list of definition nodes to be included for this operation, sorted\n # to retain the same order as the original document.\n separated_document_asts[operation_name] = DocumentNode(\n definitions=[\n node\n for node in document_ast.definitions\n if node is operation\n or (\n isinstance(node, FragmentDefinitionNode)\n and node.name.value in dependencies\n )\n ]\n )\n\n return separated_document_asts", "def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks", "def parse_statement(self, stmt):\r\n if 'type' not in stmt:\r\n raise TypeError('Type field required')\r\n\r\n if stmt['type'] == 'property':\r\n return self.parse_property(stmt)\r\n elif stmt['type'] == 'edge':\r\n return self.parse_edge(stmt)\r\n elif stmt['type'] == 'key_index':\r\n return self.parse_key_index(stmt)\r\n elif stmt['type'] == 'defaults':\r\n return self.parse_defaults(stmt)\r\n else:\r\n raise ValueError('Invalid `type` value {}'.format(stmt['type']))", "def parse_next_instruction(self) -> None:\n instruction = self.program[self.pointer]\n opcode = instruction % 100\n if opcode == 99:\n self.halt = True\n\n self.modes = instruction // 100\n\n if opcode == 1:\n self.op_sum()\n if opcode == 2:\n self.op_multiply()\n if opcode == 3:\n self.op_input()\n if opcode == 4:\n self.op_output()\n if opcode == 5:\n self.op_jump_if_true()\n if opcode == 6:\n self.op_jump_if_false()\n if opcode == 7:\n self.op_less_than()\n if opcode == 8:\n self.op_equal_to()\n if opcode == 9:\n self.op_adjust_relative()", "def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")", "def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)", "def parse(self, commands):\n \n # Get rid of dummy objects that represented deleted objects in\n # the last parsing round.\n to_delete = []\n for id_, val in self._objects.items():\n if val == JUST_DELETED:\n to_delete.append(id_)\n for id_ in to_delete:\n self._objects.pop(id_)\n \n for command in commands:\n self._parse(command)", "def _extract_ops_from_onnx_graph(graph, operators, domain_opset_map):\n\n for operator in graph.node:\n # empty domain is used as an alias for 'ai.onnx'\n domain = operator.domain if operator.domain else \"ai.onnx\"\n\n if domain not in operators or domain not in domain_opset_map:\n continue\n\n operators[domain][domain_opset_map[domain]].add(operator.op_type)\n\n for attr in operator.attribute:\n if attr.type == onnx.AttributeProto.GRAPH: # process subgraph\n _extract_ops_from_onnx_graph(attr.g, operators, domain_opset_map)\n elif attr.type == onnx.AttributeProto.GRAPHS:\n # Currently no ONNX operators use GRAPHS.\n # Fail noisily if we encounter this so we can implement support\n raise RuntimeError(\"Unexpected attribute proto of GRAPHS\")", "def _parse_op_and_rate(op, rate, **kw):\n if not isinstance(op, Qobj):\n raise ValueError(\"NonMarkovianMCSolver ops must be of type Qobj\")\n if isinstance(rate, numbers.Number):\n rate = ConstantCoefficient(rate)\n else:\n rate = coefficient(rate, **kw)\n return op, rate", "def deserialize(cls, payload):\n return operations_pb2.Operation.FromString(payload)", "def process_line(line):\n action = {'+': operator.add, '-': operator.sub}\n nums = [int(num) for num in re.findall(r'-?\\d+', line)[1:]] # extracts numbers\n ops = re.findall(r' ([+-]) ', line) # extracts operators\n total = nums[0]\n for op, num in zip(ops, nums[1:]):\n total = action[op](total, num)\n return total", "def parse_input(self, instructions):\r\n\r\n input_ = instructions\r\n input_list = input_.strip().split()\r\n\r\n if input_list[0] == 'push':\r\n self.push(input_list[1])\r\n\r\n elif input_list[0] == 'pop':\r\n self.pop()\r\n\r\n elif input_list[0] == 'top':\r\n self.top()\r\n\r\n elif input_list[0] == 'replace':\r\n self.replace(input_list[1], input_list[2])\r\n\r\n else:\r\n pass", "def behave(self, type: str):\n return {\n '+': lambda x: self.plus(),\n '-': lambda x: self.minus(),\n '<': lambda x: self.shift_left(),\n '>': lambda x: self.shift_right(),\n ',': lambda x: self.comma(x),\n '.': lambda x: self.dot(),\n '[': lambda x: self.left_bracket(),\n ']': lambda x: self.right_bracket(),\n }.get(type)", "def __init__(self, operations=None):\n\n if operations is None:\n operations = self.default_operations\n\n if None in operations:\n operations.update(self.default_operations)\n\n self.operations = operations\n self.special = [\"(\", \")\", \",\"]", "def oopFirstPass(tokens):\r\n oopList = \"\"\r\n num1 = None\r\n num2 = None\r\n operation = None\r\n for val in tokens:\r\n if (num1 is None):\r\n num1 = float(val)\r\n continue\r\n if (val == \"+\" or val == \"-\"):\r\n #add to oopList\r\n oopList += str(num1) + \" \" + val + \" \"\r\n num1 = None\r\n continue\r\n if (operation is None):\r\n operation = val\r\n continue\r\n if (num2 is None):\r\n num2 = float(val)\r\n \r\n # do multiply/division\r\n if (operation == \"*\"):\r\n num1 = multiply(num1, num2)\r\n elif (operation == \"/\"):\r\n num1 = divide(num1, num2)\r\n else:\r\n return \"Incorrect Input Formula\"\r\n num2 = None\r\n operation = None\r\n oopList += str(num1) \r\n return oopList", "def ListOperations(\n self,\n request: google.longrunning.operations_pb2.ListOperationsRequest,\n context: grpc.ServicerContext,\n ) -> google.longrunning.operations_pb2.ListOperationsResponse:" ]
[ "0.6407225", "0.5742777", "0.56923646", "0.5603622", "0.55325913", "0.54617524", "0.5408668", "0.5370978", "0.5358051", "0.5347028", "0.5345967", "0.53198713", "0.52810514", "0.5279077", "0.5233153", "0.5233153", "0.5233153", "0.5233153", "0.52281266", "0.52163815", "0.520678", "0.5190903", "0.5181302", "0.513552", "0.512595", "0.5117479", "0.51017326", "0.50932896", "0.5056284", "0.50495887", "0.5036482", "0.50254977", "0.5014885", "0.5001341", "0.49963734", "0.4992806", "0.4988119", "0.49875423", "0.49875423", "0.49875423", "0.49875423", "0.49810657", "0.49788684", "0.49772558", "0.4976979", "0.49650076", "0.49549323", "0.49498358", "0.49457967", "0.49443945", "0.49256766", "0.4920145", "0.4917845", "0.49096906", "0.48924327", "0.48904362", "0.48863935", "0.48780602", "0.48468864", "0.48443666", "0.4843511", "0.48365843", "0.4804825", "0.4804825", "0.48042354", "0.4803146", "0.47954834", "0.4794148", "0.479168", "0.47837186", "0.47747186", "0.47747186", "0.47626132", "0.47620064", "0.47458732", "0.47386098", "0.47386098", "0.47386098", "0.4733615", "0.47304457", "0.47245997", "0.4716929", "0.47080112", "0.4707526", "0.46984047", "0.46961555", "0.4696063", "0.46957996", "0.46822655", "0.4675082", "0.46655843", "0.4665298", "0.46643987", "0.46436408", "0.46423194", "0.463872", "0.46296334", "0.46276945", "0.46227044", "0.46196133" ]
0.7862727
0
Parse all types from the raw schema response.
def parse_types(schema_types: List[Dict]) -> Dict[str, SchemaType]: result = {} for schema_type in schema_types: new_type = SchemaType(schema_type) result[new_type.name] = new_type return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)", "def _parse_types(self, die):\n if die.offset in self._visited_die_offset:\n return\n else:\n self._visited_die_offset.append(die.offset)\n\n if die.tag == \"DW_TAG_base_type\":\n self._parse_base_type(die)\n\n elif die.tag == \"DW_TAG_const_type\":\n self._parse_const_type(die)\n\n elif die.tag == \"DW_TAG_volatile_type\":\n self._parse_volatile_type(die)\n\n elif die.tag == \"DW_TAG_typedef\":\n self._parse_typedef(die)\n\n elif die.tag == \"DW_TAG_pointer_type\":\n self._parse_pointer_type(die)\n\n elif die.tag == \"DW_TAG_array_type\":\n self._parse_array_type(die)\n\n elif die.tag == \"DW_TAG_enumeration_type\":\n self._parse_enums_type(die)\n\n # union and class are not implemented yet, use structure.\n elif die.tag == \"DW_TAG_structure_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_union_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_class_type\":\n self._parse_structure_type(die)\n\n elif die.tag == \"DW_TAG_subroutine_type\":\n self._parse_subroutine_type(die)\n\n else:\n ...\n\n if die.tag == \"DW_TAG_compile_unit\":\n return\n\n # if has children, iter them, except DW_TAG_compile_unit.\n for child_die in die.iter_children():\n self._parse_types(child_die)", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def parse_types(self, messageSchema, element):\n for child_element in element:\n if child_element.tag not in self.VALID_TYPES_ELEMENTS:\n raise ValueError(\n f\"invalid types child element {repr(child_element.tag)}\"\n )\n\n parser = getattr(self, f\"parse_types_{child_element.tag}\", None)\n if not parser:\n raise RuntimeError(\n f\"unsupported types parser {repr(child_element.tag)}\"\n )\n\n parser(messageSchema, child_element)", "def parse(schema):\n if six.PY3:\n return avro.schema.Parse(schema)\n else:\n return avro.schema.parse(schema)", "def parse_response(self):\n pass", "def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types", "def gather_types(self):\n\n def gather_subfields(field: Field) -> List[Field]:\n fields = [field]\n\n if isinstance(field, CompositeField):\n for f in field.fields:\n fields.extend(gather_subfields(f))\n elif isinstance(field, ArrayField):\n fields = []\n fields.extend(gather_subfields(field.itemtype))\n\n return fields\n\n types = []\n for method in self.methods:\n types.extend([method.request, method.response])\n for field in method.request.fields:\n types.extend(gather_subfields(field))\n for field in method.response.fields:\n types.extend(gather_subfields(field))\n return types", "def _parse(self):\n pass", "def parse(cls, buf: memoryview, params: Params) \\\n -> tuple[AnyParseable, memoryview]:\n for data_type in params.expected:\n try:\n return data_type.parse(buf, params)\n except NotParseable:\n pass\n raise UnexpectedType(buf)", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def GetParsedTypes(cls):\n return cls._parser_clases.keys()", "def parse(self, response):", "def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")", "def parse(self, response):\n yield from self.all_forms(response)", "def describeSchemaTypes(self):\n if not self.__isSchemaCached():\n self.__cacheSchema()\n queryFile = self.__cacheLocation + \"/SELECT TYPES.json\"\n selectTypesReply = json.load(open(queryFile))\n for result in selectTypesReply[\"results\"]:\n if float(result[\"number\"]) < 1.1: \n continue # TEMP - ignore under 1.1\n queryFile = self.__cacheLocation + \"/DESCRIBE TYPE \" + re.sub(r'\\.', '_', result[\"number\"]) + \".json\"\n if not os.path.isfile(queryFile):\n raise Exception(\"Expected Schema for %s to be in Cache but it wasn't - exiting\" % result[\"number\"])\n jreply = json.load(open(queryFile))\n if \"count\" in result:\n jreply[\"count\"] = result[\"count\"]\n yield jreply", "def parse(self, data):\n raise NotImplementedError", "def _parse_result(self, responses, verbose=False):\n\n # loading the columns config\n colConfig = None\n if self._current_service:\n colConfig = self._column_configs.get(self._current_service)\n self._current_service = None # clearing current service\n\n resultList = []\n\n for resp in responses:\n result = resp.json()\n\n # check for error message\n if result['status'] == \"ERROR\":\n raise RemoteServiceError(result.get('msg', \"There was an error with your request.\"))\n\n resTable = _mashup_json_to_table(result, colConfig)\n resultList.append(resTable)\n\n allResults = vstack(resultList)\n\n # Check for no results\n if not allResults:\n warnings.warn(\"Query returned no results.\", NoResultsWarning)\n return allResults", "def parse_snmp_response(response, type):\n values = []\n root = etree.fromstring(response)\n body = root.findall('{%s}Body'%'http://schemas.xmlsoap.org/soap/envelope/')\n for b in body:\n message = b.findall('{%s}message'%'http://ggf.org/ns/nmwg/base/2.0/')\n for m in message:\n data = m.findall('{%s}data'%'http://ggf.org/ns/nmwg/base/2.0/')\n for d in data:\n datum = d.findall('{%s}datum'%'http://ggf.org/ns/nmwg/base/2.0/')\n for d2 in datum:\n #to check this is not an error message\n if d2.text != '':\n if d2.attrib['value'] != '' and d2.attrib['value'] != None and d2.attrib['value'] != 'nan':\n v = {}\n v['timeValue'] = datetime.fromtimestamp(float(d2.attrib['timeValue']))\n v['value']=d2.attrib['value']\n if type!=\"lamp\":\n v['valueUnits'] = d2.attrib['valueUnits']\n values.append(v)\n\n return values", "def test_custom_raw_row_results_all_types(self):\n # Connect using a custom protocol handler that tracks the various types the result message is used with.\n session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace=\"custserdes\")\n session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked\n session.row_factory = tuple_factory\n\n colnames = create_table_with_all_types(\"alltypes\", session, 1)\n columns_string = \", \".join(colnames)\n\n # verify data\n params = get_all_primitive_params(0)\n results = session.execute(\"SELECT {0} FROM alltypes WHERE primkey=0\".format(columns_string))[0]\n for expected, actual in zip(params, results):\n self.assertEqual(actual, expected)\n # Ensure we have covered the various primitive types\n self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1)\n session.shutdown()", "def parser(self, answer):\n result = {}\n for rrsets in answer.response.answer:\n for item in rrsets.items:\n rdtype = self.get_type_name(item.rdtype)\n\n if item.rdtype == self.get_type_id('A'):\n if result.has_key(rdtype):\n result[rdtype].append(item.address)\n else:\n result[rdtype] = [item.address]\n return result", "def response_helper(self, response, **kwargs):\n self.resolve_schema(response)\n if \"headers\" in response:\n for header in response[\"headers\"].values():\n self.resolve_schema(header)\n return response", "def _parse_response(self, response, return_type):\n\n return self._parse_response_body_from_xml_text(response=response, return_type=return_type)", "def parseType(self, line):\n\n\t\tcols = string.split(line, \",\")\n\n\t\tfor col in cols:\n\t\t\tself.type.append(col)", "def readOtherTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = (.*);\", self.data):\n typename, type_string = m.groups() \n if typename not in self.types.keys():\n types[typename] = type_string\n \n return types", "def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])", "def parse(self) -> None:\n pass", "def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types", "def _parse_results(self, handle):\n result_reader = ResultsReader(handle)\n for result in result_reader:\n\n # Diagnostic messages may be returned in the results\n if isinstance(result, Message):\n logger.debug('[{}] {}'.format(result.type, result.message))\n\n # Normal events are returned as dicts\n elif isinstance(result, dict):\n result = dict(result)\n if '_time' in result:\n result['_time'] = SplunkAbstraction._to_datetime(result['_time'])\n yield {\n 'time': result['_time'] if '_time' in result else '',\n 'metadata': {k: v for k, v in result.items() if k.startswith('_')},\n 'state': {k: v for k, v in result.items() if not k.startswith('_')}\n }\n\n else:\n logger.warning('Unknown result type in _parse_results: {}'.format(result))\n\n assert result_reader.is_preview is False", "def get_user_defined_types(self):\n query = mssqlqueries.get_user_defined_types()\n logger.info(u'UDTs query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])", "def parse(cls, data):\n raise NotImplementedError", "def parse(self, raw_or_file):\n errors = {}\n self._raw = yaml.load(raw_or_file)\n for name in self._raw:\n datum = self._raw[name]\n n = len(datum)\n if (not name == 'frodo') and not n == 1:\n errors[name] = {'too_many_types': datum.keys()}\n continue\n typ = datum.keys()[0]\n conf = datum[typ]\n if typ == 'env':\n self.environs[name] = FrodoEnv(name, self, **conf)\n elif typ == 'config':\n self.configs[name] = XCToolConfig(name, self, **conf)\n elif typ == 'test':\n self.tests[name] = FrodoTest(name, self, **conf)\n elif typ == 'precondition':\n self.preconditions[name] = FrodoPrecondition(name, self, **conf)\n elif name == 'frodo':\n frodo_errs = self.parse_system_conf(datum)\n if frodo_errs:\n errors[name] = frodo_errs\n else:\n errors[typ] = 'unknown type'\n return errors", "def parse(self):\n raise NotImplementedError", "def parse_schemas_17(parser, xnat_serssion, extension_types=True):\n if extension_types:\n schemas_uri = '/xapi/schemas'\n try:\n schema_list = xnat_serssion.get_json(schemas_uri)\n except exceptions.XNATResponseError as exception:\n message = 'Problem retrieving schemas list: {}'.format(exception)\n xnat_serssion.logger.critical(message)\n raise ValueError(message)\n else:\n schema_list = DEFAULT_SCHEMAS\n\n for schema in schema_list:\n if extension_types or schema in ['xdat', 'xnat']:\n parser.parse_schema_uri(xnat_session=xnat_serssion,\n schema_uri='/xapi/schemas/{schema}'.format(schema=schema))", "def parse(self, response):\n\n yield from self.parse_as_df(response)", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def _unpack_body(self, buff):\n\n # Unpack <return_code> and <count> (how many records affected or selected)\n self._return_code = struct_L.unpack_from(buff, offset=0)[0]\n\n # Separate return_code and completion_code\n self._completion_status = self._return_code & 0x00ff\n self._return_code >>= 8\n\n # In case of an error unpack the body as an error message\n if self._return_code != 0:\n self._return_message = unicode(buff[4:-1], self.charset, self.errors)\n if self._completion_status == 2:\n raise TarantoolError(self._return_code, self._return_message)\n\n # Unpack <count> (how many records affected or selected)\n self._rowcount = struct_L.unpack_from(buff, offset=4)[0]\n\n # If the response doesn't contain any tuple - there is nothing to unpack\n if self._body_length == 8:\n return\n\n # Parse response tuples (<fq_tuple>)\n if self._rowcount > 0:\n offset = 8 # The first 4 bytes in the response body is the <count> we have already read\n while offset < self._body_length:\n # In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).\n # Attribute <size> takes into account only size of tuple's <field> payload,\n # but does not include 4-byte of <cardinality> field.\n #Therefore the actual size of the <tuple> is greater to 4 bytes.\n tuple_size = struct.unpack_from(\"<L\", buff, offset)[0] + 4\n tuple_data = struct.unpack_from(\"<%ds\" % (tuple_size), buff, offset+4)[0]\n tuple_value = self._unpack_tuple(tuple_data)\n if self.field_types:\n self.append(self._cast_tuple(tuple_value))\n else:\n self.append(tuple_value)\n\n offset = offset + tuple_size + 4 # This '4' is a size of <size> attribute", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def parse_statement(self, stmt):\r\n if 'type' not in stmt:\r\n raise TypeError('Type field required')\r\n\r\n if stmt['type'] == 'property':\r\n return self.parse_property(stmt)\r\n elif stmt['type'] == 'edge':\r\n return self.parse_edge(stmt)\r\n elif stmt['type'] == 'key_index':\r\n return self.parse_key_index(stmt)\r\n elif stmt['type'] == 'defaults':\r\n return self.parse_defaults(stmt)\r\n else:\r\n raise ValueError('Invalid `type` value {}'.format(stmt['type']))", "def parse_response(self, response: Any) -> Any:\n return response", "def parse(stream):\n return xsd_models.parseString(stream, silence=True)", "async def parse(self, raw: str) -> dict:", "def _build_parsed_values(self):\n\n self.final_result = []\n retrieved_data_types = set() # keep track of data type ID's unpacked from record\n\n # Get the file time from the file name\n if self._file_time:\n self.final_result.append(self._encode_value(\n AdcptMWVSParticleKey.FILE_TIME, self._file_time, str))\n else:\n self.final_result.append({DataParticleKey.VALUE_ID: AdcptMWVSParticleKey.FILE_TIME,\n DataParticleKey.VALUE: None})\n\n # Get the sequence number from the file name\n if self._sequence_number:\n self.final_result.append(self._encode_value(\n AdcptMWVSParticleKey.SEQUENCE_NUMBER, self._sequence_number, int))\n else:\n self.final_result.append({DataParticleKey.VALUE_ID: AdcptMWVSParticleKey.SEQUENCE_NUMBER,\n DataParticleKey.VALUE: None})\n\n # Get the number of data types from the Header\n num_data_types = struct.unpack_from('<B', self.raw_data, HEADER_NUM_DATA_TYPES_OFFSET)\n # Get the list of offsets from the Header\n offsets = struct.unpack_from('<%sI' % num_data_types, self.raw_data, HEADER_OFFSETS_OFFSET)\n\n # Unpack Type IDs from the offsets\n for offset in offsets:\n data_type_id, = struct.unpack_from('<h', self.raw_data, offset)\n # keep track of retrieved data types\n retrieved_data_types.add(data_type_id)\n\n # Feed the data through the corresponding encoding function and unpacking rules\n try:\n self.encoding_func_dict[data_type_id][ENCODE_FUNC](\n offset + ID_TYPE_SIZE, self.encoding_func_dict[data_type_id][UNPACK_RULES])\n except KeyError:\n log.debug(\"Skipping unsupported data type ID: %s at offset: %s\",\n data_type_id, offset)\n\n # go through the list of expected data type ID's, fill in None for missing data type ID's\n missing_data = EXPECTED_PARTICLE_IDS_SET.difference(retrieved_data_types)\n for data_type_id in missing_data:\n if data_type_id is VARIABLE_LEADER:\n # timestamp is essential for a data particle - no timestamp, no particle\n message = \"Variable Leader Data Type is required for internal timestamp, \" \\\n \"particle ignored.\"\n log.warn(message)\n raise RecoverableSampleException(message)\n\n self.final_result.extend(self.encoding_func_dict[data_type_id][ENCODE_NULL])\n\n log.trace(\"FINAL RESULT: %s\\n\", self.final_result)\n\n return self.final_result", "def getEmbeddedTypes(self):\n # TODO need to clarify how we operate on Unions here. The current\n # code will break when we move to schema version 0.6 as we are\n # no longer assured that the first element of the union is null.\n # This would be a good opportunity to tidy this up.\n ret = []\n if isinstance(self.schema, avro.schema.RecordSchema):\n for field in self.getFields():\n if isinstance(field.type, avro.schema.ArraySchema):\n if isinstance(field.type.items, avro.schema.RecordSchema):\n ret.append((field.name, field.type.items.name))\n elif isinstance(field.type, avro.schema.RecordSchema):\n ret.append((field.name, field.type.name))\n elif isinstance(field.type, avro.schema.UnionSchema):\n t0 = field.type.schemas[0]\n t1 = field.type.schemas[1]\n if (isinstance(t0, avro.schema.PrimitiveSchema) and\n t0.type == \"null\"):\n if isinstance(t1, avro.schema.RecordSchema):\n ret.append((field.name, t1.name))\n else:\n raise Exception(\"Schema union assumptions violated\")\n return ret", "def get_all_typedefs(self):\n results = None\n atlas_endpoint = self.endpoint_url + \"/types/typedefs\"\n\n getTypeDefs = requests.get(\n atlas_endpoint,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(getTypeDefs)\n\n return results", "def type_fields(self, res, op_item):\n result = []\n cast_func = {}\n header = res[0]\n for heading in header:\n cast_func[heading] = DataType.str\n\n if \"field_type\" in op_item:\n for f, p in findall(FIELD_TYPE_RE, op_item[\"field_type\"]):\n cast_func[p] = self.dt.get_func(f)\n first = True\n for row in res[1:]:\n new_row = []\n for idx in range(len(header)):\n\n heading = header[idx]\n cur_value = row[idx]\n if type(cur_value) is tuple:\n cur_value = cur_value[1]\n if heading == \"timespan\" and first:\n first = False\n new_row.append((cast_func[heading](cur_value), cur_value))\n\n result.append(new_row)\n\n return [header] + result", "def readSimpleTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.SIMPLETYPES:\n types[typename] = typetype\n \n return types", "def parse(self, content):\n pass", "def get_types(example_row):\n types = []\n for v in example_row:\n value_type = ctype_text[v.ctype]\n if value_type == 'text':\n types.append(text_type)\n elif value_type == 'number':\n types.append(number_type)\n elif value_type == 'xldate':\n types.append(date_type)\n else:\n types.append(text_type)\n return types", "def parse(self):", "def parse_from_bytes(self, raw_buffer):\n\n try:\n (cpu_svn,\n self.misc_select,\n _,\n attributes,\n mr_enclave,\n _,\n mr_signer,\n _,\n self.isv_prod_id,\n self.isv_svn,\n _,\n report_data) = \\\n struct.unpack(self._format, raw_buffer)\n\n # Further parse embedded structures\n self.cpu_svn.parse_from_bytes(cpu_svn)\n self.attributes.parse_from_bytes(attributes)\n self.mr_enclave.parse_from_bytes(mr_enclave)\n self.mr_signer.parse_from_bytes(mr_signer)\n self.report_data.parse_from_bytes(report_data)\n except struct.error as se:\n raise ValueError('Unable to parse: {}'.format(se))", "def parse(self, stream, media_type=None, parser_context=None):\n raise NotImplementedError(\".parse() must be overridden.\")", "def _parse_response_body_from_xml_text(self, response, return_type):\n respbody = response.body\n\n doc = minidom.parseString(respbody)\n return_obj = return_type()\n for node in self._get_child_nodes(doc, return_type.__name__):\n self._fill_data_to_return_object(node, return_obj)\n\n # Note: We always explicitly assign status code to the custom return\n # type object\n return_obj.status = response.status\n\n return return_obj", "def _parse_typed_columns(typed_columns: Sequence[str]) \\\n -> Tuple[Tuple[str], Tuple[str]]:\n\n splitted = [x.split(\":\") for x in typed_columns]\n\n # assert correct split\n invalid = [x for x in splitted if len(x) != 2]\n if invalid:\n raise ValueError(\"Invalid typed column format encountered: {}. \"\n \"Typed columns should be formulated like \"\n \"'col_name:type_name', e.g. 'col1:int'. Please \"\n \"be aware that this error may occur if you omit \"\n \"dtypes when instantiating `PlainFrame`.\"\n .format(invalid))\n\n # get column names and corresponding types\n cols, types = zip(*splitted)\n\n # complete type abbreviations\n types = tuple([TYPE_ABBR.get(x, x) for x in types])\n\n # check valid types\n invalid_types = set(types).difference(TYPE_ABBR.values())\n if invalid_types:\n raise ValueError(\"Invalid types encountered: {}. Valid types \"\n \"are: {}.\"\n .format(invalid_types, TYPE_ABBR.items()))\n\n return cols, types", "def assertResponseTypes(self, response_data: dict, types: tuple):\n for field, type_ in types:\n with self.subTest(field=field, type=type_):\n self.assertIsInstance(response_data[field], type_)", "def _parse_proto(raw: bytes) -> list[google_protobuf.GoogleProtobuf.Pair]:\n buf = google_protobuf.GoogleProtobuf(KaitaiStream(io.BytesIO(raw)))\n for pair in buf.pairs:\n if not isinstance(\n pair.wire_type, google_protobuf.GoogleProtobuf.Pair.WireTypes\n ):\n raise ValueError(\"Not a protobuf.\")\n return buf.pairs", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def parse_response(self, resp):\n p, u = self.getparser()\n\n if hasattr(resp, 'text'):\n # modern requests will do this for us\n text = resp.text # this is unicode(py2)/str(py3)\n else:\n\n encoding = requests.utils.get_encoding_from_headers(resp.headers)\n if encoding is None:\n encoding = 'utf-8' # FIXME: what to do here?\n\n if sys.version_info[0] == 2:\n text = unicode( # noqa: F821\n resp.content, encoding, errors='replace')\n else:\n assert sys.version_info[0] == 3\n text = str(resp.content, encoding, errors='replace')\n p.feed(text)\n p.close()\n return u.close()", "def _parse_reply(self, msg_list): #{\n logger = self.logger\n\n if len(msg_list) < 4 or msg_list[0] != b'|':\n logger.error('bad reply: %r' % msg_list)\n return None\n\n msg_type = msg_list[2]\n data = msg_list[3:]\n result = None\n srv_id = None\n\n if msg_type == b'ACK':\n srv_id = data[0]\n elif msg_type in (b'OK', b'YIELD'):\n try:\n result = self._serializer.deserialize_result(data)\n except Exception, e:\n msg_type = b'FAIL'\n result = e\n elif msg_type == b'FAIL':\n try:\n error = jsonapi.loads(msg_list[3])\n if error['ename'] == 'StopIteration':\n result = StopIteration()\n elif error['ename'] == 'GeneratorExit':\n result = GeneratorExit()\n else:\n result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])\n except Exception, e:\n logger.error('unexpected error while decoding FAIL', exc_info=True)\n result = RPCError('unexpected error while decoding FAIL: %s' % e)\n else:\n result = RPCError('bad message type: %r' % msg_type)\n\n return dict(\n type = msg_type,\n req_id = msg_list[1],\n srv_id = srv_id,\n result = result,\n )", "def parse(self, response: BeautifulSoup):\n raise NotImplementedError", "def parse(self, payload):\n payload = json.loads(payload)\n \n if payload['response'] in self.possible_responses:\n return self.possible_responses[payload['response']](payload)\n else:\n print 'Response not valid'", "def _consume_type(self):\n try:\n self._consume(self.VARIABLE_TYPES)\n except CompilationEngineError:\n self._consume(TokenTypes.IDENTIFIER) # Class name", "def readDefinedTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.types.keys():\n types[typename] = typetype\n \n return types", "def parse(self):\n if not self.header_parsed:\n self.parse_header()", "def _get_types(self):\n\n db = Database()\n self.c_built_ins = list(map(lambda tup: tup[0], db.select_built_types()))\n self.c_built_in_array_types = r'^(' + '|'.join(self.escaped(self.c_built_ins)) + ')\\[[0-9]*\\]'\n self.c_types = list(map(lambda tup: tup[0], db.select_types()))\n self.c_array_types = r'^(' + '|'.join(self.escaped(self.c_types)) + ')\\[[0-9]*\\]'\n db.close_connection()", "def Parse(self, ctx:Context, resp:Response)->Generator[Any,None,None]:\n yield Error(msg=\"Parse function not implemented\", code=Unimplemented)", "def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n decoded_stream = codecs.getreader(encoding)(stream)\n parse_constant = strict_constant if self.strict else None\n return ujson.load(decoded_stream, parse_constant=parse_constant)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % str(exc))", "def parse(self):\n return []", "def parse_response(self, response):\n\n return json.loads(response.text)", "def deduce_schema(self, input_data, *, schema_map=None):\n\n if self.input_format == 'csv':\n if self.csv_dialect:\n reader = csv.DictReader(input_data, dialect=self.csv_dialect)\n else:\n reader = csv.DictReader(input_data)\n elif self.input_format == 'json' or self.input_format is None:\n reader = json_reader(input_data)\n elif self.input_format == 'dict':\n reader = input_data\n else:\n raise Exception(f\"Unknown input_format '{self.input_format}'\")\n\n if schema_map is None:\n schema_map = OrderedDict()\n\n try:\n for json_object in reader:\n\n # Print a progress message periodically.\n self.line_number += 1\n if self.line_number % self.debugging_interval == 0:\n logging.info(f'Processing line {self.line_number}')\n\n # Deduce the schema from this given data record.\n if isinstance(json_object, dict):\n self.deduce_schema_for_record(\n json_object=json_object,\n schema_map=schema_map,\n )\n elif isinstance(json_object, Exception):\n self.log_error(\n f'Record could not be parsed: Exception: {json_object}'\n )\n if not self.ignore_invalid_lines:\n raise json_object\n else:\n self.log_error(\n 'Record should be a JSON Object '\n f'but was a {type(json_object)}'\n )\n if not self.ignore_invalid_lines:\n raise Exception(f'Record must be a JSON Object '\n f'but was a {type(json_object)}')\n finally:\n logging.info(f'Processed {self.line_number} lines')\n\n return schema_map, self.error_logs", "def validate(self):\n\n for entry in self.body:\n e_type = list(entry.keys())[0]\n if e_type not in self.type_checks:\n raise exceptions.BadInputError(f\"invalid input type {e_type}\")\n\n body = entry[e_type]\n self.type_checks[e_type](body)", "def parse(self) -> None:\n self._parse_zone_files()\n self._process_rules()\n self._process_zones()\n self._process_links()", "def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))", "def decode_input_data(self, rawdata):\n return self.get_content_type().loads(rawdata, self)", "def parse(self, message):\n resp = json.loads((self.send_api_request(message)).decode('utf-8'))\n\n nlu_response = NLUResponse()\n nlu_response.text = message\n intent_schema = IntentSchema()\n if resp[\"result\"][\"metadata\"]:\n intent_schema.name = resp[\"result\"][\"metadata\"][\"intentName\"]\n intent_schema.confidence = resp[\"result\"][\"score\"]\n else: # fallback if no intent is given by the nlu\n intent_schema.name = \"greet\"\n intent_schema.confidence = 0.0\n nlu_response.intent = intent_schema\n print(\"Recognized Intent by Dialogflow {}\".format(intent_schema.name ))\n\n pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(resp)\n\n try:\n nlu_response.entities = []\n entities = resp[\"result\"][\"parameters\"]\n resolved_query = resp[\"result\"][\"resolvedQuery\"]\n\n for key, value in entities.items():\n if value:\n entity_schema = EntitiesSchema()\n entity_schema.start = resolved_query.find(value)\n entity_schema.end = resolved_query.find(value) + len(value)\n entity_schema.entity = key\n entity_schema.value = value\n nlu_response.entities.append(entity_schema)\n #print(\"Key: {}, Value: {}\".format(key, value))\n except Exception as err:\n logging.warning('No Entites extracted {}'.format(err))\n\n schema = RasaNLUSchema()\n data, error = schema.dump(nlu_response)\n\n return data", "def _load_type_tables(self):\n logger.info(\"Reading content of type tables...\")\n for table_name in self.type_tables:\n logger.info(f\"Reading JSONL dump of type table '{table_name}'...\")\n table_jsonl = resource_stream('sotorrent_pipeline',\n f'type_tables/{table_name}.jsonl').read().decode()\n self.type_tables_jsonl[table_name] = table_jsonl\n logger.info(f\"Read {len(self.type_tables_jsonl)} type table(s).\")", "def _parse_response(resp):\n for header in resp['payload']['headers']:\n if header['name'] == 'From':\n email = _parse_email_value(header['value'])\n sender_user_id = EMAIL_TO_USER_ID.get(email)\n if not sender_user_id:\n print(\"sender_user_id not found {}\".format(email))\n return\n\n if resp['payload']['mimeType'] in ['text/html', 'text/plain']:\n encoded_data = resp['payload']['body']['data'].encode('utf-8')\n body = base64.urlsafe_b64decode(encoded_data)\n else:\n # unclear if other options may come through\n print(\"found new mimeType: {}, id: {}\".format(resp['payload']['mimeType'], resp['id']))\n return\n\n # we only care about chat labels for now\n label = 'chats' if 'chats' in resp['labelIds'] else None\n time_secs = int(resp['internalDate']) / 1000 # convert to seconds\n timestamp = datetime.fromtimestamp(time_secs)\n\n return MessageData(\n body=body,\n timestamp=timestamp,\n message_id=resp['id'],\n label=label,\n data=json.dumps(resp),\n sender_user_id=sender_user_id,\n thread_id=resp['threadId']\n )", "def _parse_place_types(types_str, delimiter='|'):\n return [p.strip() for p in types_str.split(delimiter)\\\n if p.strip() in places.TYPES]", "def test_empty_tags(self):\n\n # Base Schema-derived types\n schemas = [\"type: integer\",\n \"type: number\",\n \"type: boolean\",\n \"type: string\",\n \"type: 'null'\",\n \"type: timestamp\",\n \"type: timestamp-hp\",\n\n (\"type: array\\n\"\n \"items: { type: number }\"),\n\n (\"type: object\\n\"\n \"properties:\\n\"\n \" foo: { type: string }\")]\n\n for schema in schemas:\n parsed = self.parse(schema)\n self.assertEqual(parsed.tags, {})\n\n # Links and relations\n schema = self.parse(\"type: integer\\n\"\n \"links:\\n\"\n \" self: { path: $/foo }\\n\"\n \"relations:\\n\"\n \" foo:\\n\"\n \" resource: /foo\")\n\n self.assertEqual(schema.links['self'].tags, {})\n self.assertEqual(schema.relations['foo'].tags, {})\n\n # Typeless schema fragment\n schema = self.parse(\"oneOf:\\n\"\n \"- type: integer\\n\"\n \"- type: 'null'\")\n self.assertEqual(schema.tags, {})", "async def get_fields(self) -> List[Field]:\n schema = await self.get_schema()\n fields = []\n if schema:\n # The faust-avro parser expects a json-parsed avro schema\n # https://github.com/masterysystems/faust-avro/blob/master/faust_avro/parsers/avro.py#L20\n parsed_schema = self._parse(json.loads(schema))\n for field in parsed_schema.fields:\n fields.append(Field(field.name, field.type.python_type))\n\n return fields", "def _deserialize(\n self, value: Any, attr: str = None, data: Mapping[str, Any] = None, **kwargs\n ):\n errors = []\n # iterate through the types being passed into UnionField via val_types\n for field in self.valid_types:\n try:\n # inherit deserialize method from Fields class\n return field.deserialize(value, attr, data, **kwargs)\n # if error, add error message to error list\n except ValidationError as error:\n errors.append(error.messages)\n raise ValidationError(errors)", "def get_response_content_types_list(response):\n # type: (AnyResponseType) -> List[Str]\n content_types = []\n known_types = [\"application\", \"audio\", \"font\", \"example\", \"image\", \"message\", \"model\", \"multipart\", \"text\", \"video\"]\n for part in response.headers[\"Content-Type\"].split(\";\"):\n for sub_type in part.strip().split(\",\"):\n if \"=\" not in sub_type and sub_type.split(\"/\")[0] in known_types:\n content_types.append(sub_type)\n return content_types", "def resolve_schema_in_request_body(self, request_body):\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)", "def parse(self):\n result = []\n for field in self.get_fields():\n result.append(field.get_field())\n return result", "def _parse_raw_predictions(self, raw_predictions):\n result = []\n for line in raw_predictions.split(\"\\n\"):\n line_parts = line.split(\"|\")\n type = line_parts[0]\n assert type.lstrip(\"*\") in (\n \"FP\", \"TP\", \"TN\", \"FN\"), 'Expected {} to be in (FP, TP, TN, FN), {}'.format(line[0], line)\n\n docid = line_parts[1]\n start_end = line_parts[2]\n entity_name = line_parts[3]\n alt_gene = None\n alt_gene_start_end = None\n\n if type.lstrip(\"*\") == \"TP\":\n start_end = line_parts[3]\n entity_name = line_parts[2]\n alt_gene = line_parts[4]\n alt_gene_start_end = line_parts[5]\n\n result.append({\n \"type\": type,\n \"docid\": docid,\n \"start_end\": start_end,\n \"entity_name\": entity_name,\n \"alt_gene\": alt_gene,\n \"alt_gene_start_end\": alt_gene_start_end,\n })\n return result", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def unpackRecords(self,unpackTypes):\n for record in self.records:\n if record.name in unpackTypes:\n record.load(unpack=True)", "def test_type_result(self):\n result = self.parser.msg_analysis(MSG_TEST_NO_RESULT[0])\n assert isinstance(result, list)", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n result.append({})\n\n for prop in self.pattern.findall(msg):\n key = prop.split(\"=\")[0]\n value = prop.split('\"')[1]\n\n if key == \"CM\":\n try:\n value = float(value)\n except:\n pass\n if key == \"CLASSID\":\n try:\n value = int(value)\n except:\n pass\n result[-1][key] = value\n\n return result", "def process_type(self, swagger_type, context):\n pass", "def _parse_response(self, response, all_ops):\n try:\n parsed_response = json.loads(response)\n except Exception, e:\n raise ApiError(e)\n if 'error' in parsed_response: # needed anymore?\n raise ApiError(parsed_response['error'])\n # Return the true API return value.\n return parsed_response", "def parse(cls, s):\n raise NotImplementedError", "def parse(self, response):\n return super().parse(response)", "def parse(self, response):\n return super().parse(response)", "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> List[Dict]:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: List = []\n rows: List = []\n this_row: str = ''\n headers: str = ''\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n row_name, header_data = line.split(':', maxsplit=1)\n\n if row_name in rows:\n # this is data\n _, row_data = line.split(':', maxsplit=1)\n data_table = headers + row_data\n output_line = simple_table_parse(data_table.splitlines())\n output_line[0]['type'] = this_row\n raw_output.extend(output_line)\n continue\n\n else:\n # this is a header row\n rows.append(row_name)\n this_row = row_name\n headers = header_data + '\\n'\n continue\n\n return raw_output if raw else _process(raw_output)", "def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True" ]
[ "0.6339696", "0.5975076", "0.5879143", "0.58355993", "0.57179266", "0.5602357", "0.55450535", "0.54961365", "0.5487783", "0.5469786", "0.54546714", "0.5419225", "0.54117334", "0.5395699", "0.53922814", "0.5358962", "0.53277004", "0.53108835", "0.5305968", "0.52904135", "0.5288441", "0.52786785", "0.5275993", "0.5273896", "0.5272915", "0.5223531", "0.52221346", "0.5219724", "0.5213751", "0.5203779", "0.5191017", "0.5188037", "0.51812035", "0.51751727", "0.5167566", "0.5143243", "0.5143243", "0.5143243", "0.5143243", "0.5133061", "0.5100018", "0.509381", "0.5081863", "0.5080787", "0.50739187", "0.5050973", "0.50427896", "0.5041147", "0.5017858", "0.49597892", "0.4946316", "0.4933421", "0.493317", "0.49318722", "0.49215513", "0.49195507", "0.49193603", "0.49041545", "0.48997122", "0.48979583", "0.48979583", "0.4895901", "0.48859742", "0.4867142", "0.48627654", "0.48621657", "0.48599574", "0.48534882", "0.4852926", "0.48520187", "0.48471433", "0.4845835", "0.4836995", "0.48357925", "0.48337427", "0.48295647", "0.48293188", "0.48184267", "0.4812677", "0.48067942", "0.48020193", "0.47961178", "0.47827175", "0.47779348", "0.47760865", "0.47755215", "0.47618064", "0.4761742", "0.4754932", "0.4751321", "0.47493458", "0.4749157", "0.4736556", "0.4732449", "0.47323656", "0.47272593", "0.4725927", "0.4725927", "0.47233883", "0.47179437" ]
0.5321174
17
Parse a list of arguments into a dictionary where the key is the name of the argument and the argument itself is the value.
def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]': if not args: return {} result = {} for a in args: if not a: continue arg = Argument(a) result[arg.name] = arg return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def arglist2dict(args):\n arg_dict = {}\n\n if len(args) == 0:\n return arg_dict\n\n if not args[0].startswith('--'):\n raise ValueError(f\"Positional keywords are not supported: {args[0]}\")\n\n i = 0\n while i < len(args):\n arg = args[i]\n i = i + 1\n if arg.startswith('--'):\n dest = arg[2:]\n j, arglist = Parser.get_args(args[i:])\n i = i + j\n Parser.update_arg_dict(arg_dict, dest, arglist)\n return arg_dict", "def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result", "def parse(args: list, keyword_set: set) -> dict:\n parsed_dict = {'': []}\n while args:\n keyword = get_keyword(arg=args[0], keyword_set=keyword_set)\n\n if keyword is not None:\n args.pop(0)\n keyword_name = keyword.keyword_name\n\n if keyword_name in parsed_dict:\n raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)\n\n if keyword.param_for is not None:\n parsed_dict[keyword_name] = [keyword.keyword]\n else:\n parsed_dict[keyword_name] = []\n num_args_pulled = 0\n while num_args_pulled < keyword.num_args:\n if not args:\n raise necrobot.exception.NumParametersException(\n keyword=keyword,\n num_expected=keyword.num_args,\n num_given=num_args_pulled\n )\n else:\n num_args_pulled += 1\n parsed_dict[keyword_name].append(args[0])\n args.pop(0)\n else:\n parsed_dict[''].append(args[0])\n args.pop(0)\n\n return parsed_dict", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def parse_kwargs(kwargs_list: List[str]) -> Dict[str, Any]:\n\n kwargs_dict = {}\n\n for kwarg in kwargs_list:\n key = kwarg[2:].split('=')[0]\n value = '='.join(kwarg.split('=')[1:])\n\n try:\n if re.match(r'^(-)?[0-9]+$', value):\n value = int(value)\n\n elif re.match(r'^(-)?[0-9]*.[0-9]+$', value) or re.match(r'^(-)?[0-9]*(\\.)?[0-9]+e(-|\\+)[0-9]+$', value):\n value = float(value)\n\n elif re.match(r'^\\[.*]$', value) or re.match(r'^\\{.*}$', value):\n value = json.loads(value)\n\n elif value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n\n elif value.lower() == 'none':\n value = None\n\n except:\n logging.warning(f'Could not automatically parse argument \"{key}.\" Its type will remain string.')\n\n kwargs_dict[key] = value\n\n return kwargs_dict", "def args_to_dictionaty(args):\n\tres_args = {}\n\tfor i, arg in enumerate(args[1:]):\n\t\tif i % 2 == 0:\n\t\t\tkey = arg\n\t\telse:\n\t\t\tres_args[key] = arg\n\treturn res_args", "def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict", "def parse_launch_arguments(launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:\n parsed_launch_arguments = OrderedDict() # type: ignore\n for argument in launch_arguments:\n count = argument.count(':=')\n if count == 0 or argument.startswith(':=') or (count == 1 and argument.endswith(':=')):\n raise RuntimeError(\n \"malformed launch argument '{}', expected format '<name>:=<value>'\"\n .format(argument))\n name, value = argument.split(':=', maxsplit=1)\n parsed_launch_arguments[name] = value # last one wins is intentional\n return parsed_launch_arguments.items()", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def parse_args_dict(args=None):\n return vars(parse_args(args))", "def parse_arguments(args):", "def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d", "def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict", "def crude_arg_parser(args=sys.argv):\n args_dict = {}\n key = None\n for e in args[1:]:\n if e[:2] == '--':\n if key:\n args_dict[key] = True # Switch arg\n key = e[2:]\n elif key:\n args_dict[key] = e\n key = None\n\n return args_dict", "def parse_arguments(\n input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None\n) -> dict:\n if argument_parser is None:\n argument_parser = argparse.ArgumentParser()\n\n argument_parser.add_argument(\n \"--\" + FLAG_DOCKER_IMAGE_PREFIX.replace(\"_\", \"-\"),\n help=\"Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.\",\n required=False,\n default=\"\",\n )\n\n return build_utils.parse_arguments(\n input_args=input_args, argument_parser=argument_parser\n )", "def args2dict(args, dict_args={}):\n \n for arg in args:\n #this_entry = re.findall(r'[^\"\\s]\\S*|\".+?\"', arg)\n p_arg = arg.split('=')\n if len(p_arg) > 1:\n dict_args[p_arg[0]] = False if p_arg[1].lower() == 'false' else \\\n True if p_arg[1].lower() == 'true' else \\\n None if p_arg[1].lower() == 'none' else \\\n '='.join(p_arg[1:]) if len(p_arg) > 2 else \\\n p_arg[1]\n \n return(dict_args)", "def _parse_args(self, args : dict):\n result = {}\n for key, value in args.items():\n if key in self._subparsers:\n # if it's a list, it is because it's a preset\n if isinstance(value, list):\n result[key] = value[0]\n else:\n result[key] = self._subparsers[key]._parse_args(value)\n elif key in self._actions:\n result[key] = self._actions[key](value)\n else:\n raise ValueError(f\"Unknown argument {key}\")\n\n return result", "def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")", "def parseCommandLine(argv):\n parameters = {}\n for p in argv[1:]: # skip 0th element (module name)\n pair = split(p, '=', 1)\n if (2 != len(pair)):\n print 'bad parameter: %s (had no equals sign for pairing)' % p\n sys.exit()\n else:\n parameters[pair[0]] = pair[1]\n return parameters", "def parse_args(argv: t.Iterable[str] = None):\n if argv is None:\n argv = sys.argv[1:]\n\n args: t.List[str] = []\n kwargs: t.MutableMapping[str, t.Any] = {}\n\n key = None\n for arg in argv:\n if arg.startswith('--'):\n if arg == '--help':\n print(USAGE)\n raise SystemExit\n if key is not None:\n kwargs[key] = True\n key = arg[2:]\n continue\n\n match = re.match('^(\\\\w+)=(.*)$', arg)\n if match:\n if key is not None:\n kwargs[key] = True\n key = None\n kwargs[match.group(1)] = match.group(2)\n continue\n\n if key is not None:\n kwargs[key] = arg\n key = None\n continue\n\n args.append(arg)\n\n if key is not None:\n kwargs[key] = True\n\n return (tuple(args), kwargs)", "def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value", "def parse(self, arg_list):\n\n if self._meta.ignore_unknown_arguments is True:\n args, unknown = self.parse_known_args(arg_list)\n self.parsed_args = args\n self.unknown_args = unknown\n else:\n args = self.parse_args(arg_list)\n self.parsed_args = args\n return self.parsed_args", "def parse_unknown_args(args):\n retval = {}\n preceded_by_key = False\n for arg in args:\n if arg.startswith('--'):\n if '=' in arg:\n key = arg.split('=')[0][2:]\n value = arg.split('=')[1]\n retval[key] = value\n else:\n key = arg[2:]\n preceded_by_key = True\n elif preceded_by_key:\n retval[key] = arg\n preceded_by_key = False\n\n return retval", "def args_to_params(args: list) -> dict:\n found = {}\n\n # Setup the dictionary identifying the parameters\n found['sensor'] = args.sensor\n found['filename'] = args.filename\n found['working_space'] = args.working_space\n if args.userid:\n found['userid'] = args.userid\n\n # Note: Return an empty dict if we're missing mandatory parameters\n return found", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def process_cli_config_args(config_args:List[str]) -> Dict:\n # assert len(config_args) % 3 == 0, \\\n # \"You should pass config args in [--config.arg_name arg_value arg_type] format\"\n assert len(config_args) % 2 == 0, \\\n \"You should pass config args in [--config.arg_name arg_value] format\"\n arg_names = [config_args[i] for i in range(0, len(config_args), 2)]\n arg_values = [config_args[i] for i in range(1, len(config_args), 2)]\n\n result = {}\n\n for name, value in zip(arg_names, arg_values):\n assert name.startswith(CONFIG_ARG_PREFIX), \\\n f\"Argument {name} is unkown and does not start with `config.` prefix. Cannot parse it.\"\n\n result[name[len(CONFIG_ARG_PREFIX):]] = infer_type_and_convert(value)\n\n return result", "def parse_args(args):\n if len(args) == 1:\n return {}", "def parse_args(argparser_args):\n return {k: v for k, v in vars(argparser_args).items() if v is not None}", "def parse_args():\n # Argument objects\n argument_objects = [\n FindInterfaceArg(),\n InterfaceArg(),\n NaughtyCountArg(),\n FirewallArg(),\n ModelTypeArg(),\n LogArg(),\n ]\n\n # Create the parser and parse the args\n parser = create_parser(argument_objects)\n parsed_args = parser.parse_args()\n options = {}\n\n # Parse all of the options\n for obj in argument_objects:\n if not obj.process_argument(parsed_args, options):\n parser.print_usage()\n exit()\n\n return options", "def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)", "def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev", "def parse_key_value_arg(self, arg_value, argname):\n result = {}\n for data in arg_value:\n\n # Split at first '=' from left\n key_value_pair = data.split(\"=\", 1)\n\n if len(key_value_pair) != 2:\n raise exceptions.InvalidKeyValuePairArgumentError(\n argname=argname,\n value=key_value_pair)\n\n result[key_value_pair[0]] = key_value_pair[1]\n\n return result", "def parse_request_arg_dict(arg, exception_class=Exception):\n arg_dict = {}\n arg_pairs = arg.split(';')\n for arg_pair in arg_pairs:\n try:\n arg_name, arg_value = arg_pair.split('=', 1)\n except Exception as error:\n logging.exception(error)\n raise exception_class(\n 'there is no `=` in %s' % arg_pair\n )\n arg_dict[arg_name] = arg_value\n return arg_dict", "def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments", "def get_multi_argument(self,argument_list):\n dict_return = {}\n for param in argument_list: \n try:\n if isinstance(param,dict):\n for key in param: \n if param[key] is False :\n try: dict_return[key] = self.get_argument(key)\n except:continue\n else: dict_return[param] = self.get_argument(key,param[key])\n else: dict_return[param] = self.get_argument(param)\n except Exception,e: self.treat_except(e)\n return dict_return", "def _parse_arguments(kwargs, argv):\n retval = {}\n errors = []\n for arg in argv:\n retval[arg['arg_name']] = kwargs.get(arg['arg_name'], None)\n if retval[arg['arg_name']]:\n try:\n if arg['convert_func'] is not None:\n retval[arg['arg_name']] = arg['convert_func'](retval[arg['arg_name']])\n except ValueError:\n errors.append({'status': '400',\n 'detail': 'Error in argument %s: %s' % (arg['arg_name'], retval[arg['arg_name']])})\n if errors:\n raise ApplicationException({'errors': errors}, 400)\n return retval", "def parse_arguments(command_line: str, **kwargs):\n run_kwargs: typing.Dict[str, typing.Any] = {'shell': True}\n if kwargs:\n run_kwargs.update(kwargs)\n run_kwargs['args'] = command_line\n return run_kwargs", "def map_arguments():\n arguments = {\n '-c': 'ogg',\n '-d': 'no',\n '-q': '4'\n }\n args = sys.argv[:]\n args.pop(0)\n while len(args) > 1:\n if args[0] == '-c' and re.search('^mp3$|^ogg$', args[1]) or \\\n args[0] == '-d' and re.search('^y(es)?$', args[1]) or \\\n args[0] == '-q' and re.search('^[0-9]$', args[1]):\n arguments[args[0]] = args[1]\n args.pop(0)\n args.pop(0)\n else:\n print_help()\n if len(args) == 1:\n print_help()\n return arguments", "def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def _parse_params( self ):\n paramDic={}\n # Parameters are on the 3rd arg passed to the script\n paramStr=sys.argv[2]\n print paramStr\n if len(paramStr)>1:\n paramStr = paramStr.replace('?','')\n \n # Ignore last char if it is a '/'\n if (paramStr[len(paramStr)-1]=='/'):\n paramStr=paramStr[0:len(paramStr)-2]\n \n # Processing each parameter splited on '&' \n for param in paramStr.split(\"&\"):\n try:\n # Spliting couple key/value\n key,value=param.split(\"=\")\n except:\n key=param\n value=\"\"\n \n key = urllib.unquote_plus(key)\n value = urllib.unquote_plus(value)\n \n # Filling dictionnary\n paramDic[key]=value\n print paramDic\n return paramDic", "def parse_args(arguments: Sequence, options: List[str] = None) -> Dict:\n LOGGER.debug(\"Parsing arguments: %s options: %s\", arguments, options)\n\n try:\n import awsglue.utils as au\n except ImportError:\n return parse_args_fallback(arguments, options)\n\n try:\n resolved = au.getResolvedOptions(args=arguments, options=options)\n LOGGER.debug(\"awsglue.utils args resolved: %s\", resolved)\n return resolved\n except au.GlueArgumentError:\n return parse_args_fallback(arguments, options)", "def parse_args(args_list):\n # If no arguments specified, print usage statement with no error.\n if len(args_list) == 1:\n sys.exit(usage)\n\n # Make all flags upper case to avoid case sensitivity.\n flags = [i.upper() for i in args_list if i.startswith('-')]\n\n # See if help is desired. If so, print usage with no error.\n if help_desired(flags):\n sys.exit(usage)\n\n # Retrieve fasta files. At least one, up to 3 is needed.\n fastas = [\n i for i in args_list if\n i.endswith('.fasta') or\n i.endswith('.fa') or\n i.endswith('.fan') or\n i.endswith('.fas')\n ]\n\n # Make sure that at least one fasta file was found.\n if not fastas:\n print usage\n raise ValueError('No fasta files found.')\n\n # Make sure that no more than 3 fasta files have been selected.\n if len(fastas) > 3:\n print usage\n raise ValueError(\n 'A maximum of 3 fasta files can be compared at once. You entered %r fasta files.' % len(fastas)\n )\n\n return {\n 'flags': flags,\n 'fastas': fastas\n }", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def parse_args():\n parser = argparse.ArgumentParser( description='Required: function-name.' )\n parser.add_argument( '--function', '-f', help='function name required', required=True )\n args_dict = vars( parser.parse_args() )\n return args_dict", "def readArgs(args):\n params = {}\n for k in args.keys():\n k2 = k.replace(\"<\", \"\").replace(\">\", \"\").replace(\"-\", \"\")\n try: # Convert strings to int or floats when required\n params[k2] = int(args[k])\n except:\n try:\n params[k2] = float(args[k])\n except:\n try:\n params[k2] = str2bool(args[k])\n except:\n params[k2] = args[k]\n return params", "def load_cli_kwargs(kwargs_list, delimiter='='):\n kwargs = {}\n for kv in kwargs_list:\n k, v = kv.split(delimiter, 1)\n kwargs[k] = v\n return kwargs", "def parse_args():\n import argparse\n\n #argument\n parser =argparse.ArgumentParser()\n\n parser.add_argument('--in_list', help = 'path to input list.')\n parser.add_argument('--out_list', help = 'path for saving list.')\n args = parser.parse_args()\n\n return args", "def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments", "def parse_args(args):\n retv = dict()\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--version',\n action='version',\n version='$scriptname {ver}'.format(ver=__version__))\n\n parser.add_argument(\n '-w',\n '--write',\n action='store_true',\n help=\"write the file\")\n\n parser.add_argument(\n '-d',\n '--description',\n help=\"description of the script\")\n\n parser.add_argument(\n '-t',\n '--templates',\n dest='tdirs',\n action='append',\n help=\"location of the templates\")\n\n parser.add_argument('scriptpath', nargs='*')\n\n opts = vars(parser.parse_args(args))\n\n for keyn, keyv in opts.items():\n if keyv is not None:\n retv[keyn] = keyv\n\n retv['scriptpath'] = \" \".join(retv['scriptpath'])\n retv['scriptname'] = os.path.basename(retv['scriptpath'])\n if 'write' not in retv:\n retv['write'] = False\n\n return retv", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def _convert_param_list_to_dict(param_list: list, parameters_dict: dict) -> dict:\n for param in param_list:\n param_array: list = param.split(\"=\")\n key: str = param_array[0]\n value: str = None\n if len(param_array) > 1:\n value = param_array[1]\n parameters_dict[key] = value\n return parameters_dict", "def process_args(arg_list):\n\tproposal = None\n\trun_number = None\n\tsource_dir = None\n\ttarget_dir = None\n\n\t# Loop through Command Line Parameters...\n\tfor arg in arg_list:\n\t\tprint(\"\\narg=%s\" % arg)\n\t\tpargs = arg.split('=')\n\t\tkey = pargs[0]\n\t\tif len(pargs) > 1:\n\t\t\tvalue = pargs[1]\n\t\telse:\n\t\t\tvalue = \"\"\n\t\tprint(\"key=%s\" % key)\n\t\tprint(\"value=%s\" % value)\n\t\tif key == \"proposal\":\n\t\t\tproposal = value\n\t\telif key == \"run_number\":\n\t\t\trun_number = value\n\t\telif key == \"source_dir\":\n\t\t\tsource_dir = value\n\t\telif key == \"target_dir\":\n\t\t\ttarget_dir = value\n\t\telif key == \"TIFFFilePath\":\n\t\t\ttiff_file_path = value\n\t\telif key == \"TIFFFileName\":\n\t\t\ttiff_file_name = value\n\n\treturn proposal, run_number, source_dir, target_dir, tiff_file_path, tiff_file_name", "def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)", "def clean_command_line(args):\n args = vars(args)\n # solo devuelvo los items que tienen datos en el runstring\n ret = {}\n for item in args:\n if args[item]:\n ret[item] = args[item]\n return ret", "def parse_args_to_dict():\n locust_config_info[\"run_time\"] = args.t[0]\n locust_config_info[\"ramp_up\"] = args.r[0]\n locust_config_info[\"print_stats\"] = args.print_stats\n locust_config_info[\"summary_only\"] = args.summary_only\n locust_config_info[\"users\"] = args.u[0]\n locust_config_info[\"test_name\"] = args.f[0]\n locust_config_info[\"tsin\"] = args.tsin[0]\n print \"login style: {} l[0]: {}\".format(args.l, args.l[0])\n locust_config_info[\"login_style\"] = args.l[0]\n locust_config_info[\"request_timeout\"] = args.request_timeout[0]\n return locust_config_info", "def parse_options(option_list: List[str]) -> Dict[str, Union[int, float, str]]:\n d = dict()\n for o in option_list:\n o = o.split('=')\n if len(o) != 3:\n raise OptionParsingError(\"Not enough elements in the parsed options. Need 3 elements.\")\n key = o[0]\n val = o[1]\n if o[2] not in type_mappings:\n raise OptionParsingError(f\"Unknown option type {o[2]}.\")\n type_func = type_mappings[o[2]]\n d.update({key: type_func(val)})\n return d", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def __parse_docstring(docstring):\n\t\tif docstring is None or docstring == \"\":\n\t\t\treturn {}\n\t\tlines = docstring.replace(\"\\t\", \"\").split(\"\\n\")\n\t\tresult = {}\n\t\thelp_line = \"\"\n\t\targuments = {}\n\n\t\ts_argument = False\n\t\twhile lines != []:\n\t\t\tline = lines.pop(0).strip()\n\n\t\t\tif line.strip() == \"\":\n\t\t\t\tcontinue\n\n\t\t\telse:\n\t\t\t\tif not s_argument:\n\t\t\t\t\tif line == \"Arguments:\":\n\t\t\t\t\t\ts_argument = True\n\t\t\t\t\telse:\n\t\t\t\t\t\thelp_line += \" \" + line\n\t\t\t\telse:\n\t\t\t\t\tif line[0] in [\"@\", \"#\"]:\n\t\t\t\t\t\topt = line[0]\n\t\t\t\t\t\targ = line[1:]\n\t\t\t\t\t\tvariable, _, values = arg.partition(\" = \")\n\t\t\t\t\t\tname, _, typ = variable.partition(':')\n\n\t\t\t\t\t\tif typ in Command.TYPES:\n\t\t\t\t\t\t\ttyp = Command.TYPES[typ]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise CommandTypeError(\"{typ} not supported by commandparse\".format(typ))\n\n\t\t\t\t\t\talias = name[0]\n\t\t\t\t\t\targuments[name] = {\n\t\t\t\t\t\t\t\"alias\": \"-{alias}\".format(alias=alias),\n\t\t\t\t\t\t\t\"name\": \"--{name}\".format(name=name),\n\t\t\t\t\t\t\t\"type\": typ,\n\t\t\t\t\t\t\t\"help_line\": \"\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif values:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tv = literal_eval(values)\n\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\traise CommandDefaultValueError(\"Incorret value(s) in a placeholder: {v}\".format(v=values))\n\t\t\t\t\t\t\tif isinstance(v, list):\n\t\t\t\t\t\t\t\targuments[name][\"values\"] = v\n\t\t\t\t\t\t\telif isinstance(v, str) or isinstance(v, int) or isinstance(v, float):\n\t\t\t\t\t\t\t\targuments[name][\"value\"] = v\n\n\t\t\t\t\t\tif opt == \"#\":\n\t\t\t\t\t\t\targuments[name][\"pos\"] = True\n\t\t\t\t\t\telif opt == \"@\":\n\t\t\t\t\t\t\targuments[name][\"pos\"] = False\n\n\t\t\t\t\telif line: # if no prefix is found, read the help line of the previous argument.\n\t\t\t\t\t\tif not arguments[name][\"help_line\"]:\n\t\t\t\t\t\t\targuments[name][\"help_line\"] = line\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\targuments[name][\"help_line\"] += \" \" + line\n\n\t\treturn {\"help_line\": help_line.strip(), \"arguments\": arguments}", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_cmd_parameters_(args):\n # create argument parser\n parser = ArgumentParser(description=\"Script to build search index for ChatBot\")\n set_default_arguments_(parser)\n # parse options and transform them into common dictionary\n options = vars(parser.parse_args(args))\n # remove options with None values (if any)\n options = {k: v for k, v in options.items() if v is not None}\n return options", "def _cmd_params_to_dict(params):\n return {t[0]: t[1] for t in params}", "def _parse_arguments():\n parser = argparse.ArgumentParser(\n prog=\"JSON sorter\",\n description=\"Take a json file, sort the keys and insert 4 spaces for indents.\",\n )\n\n parser.add_argument(\n \"input\", help=\"JSON file to parse.\",\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n default=sys.stdout,\n type=argparse.FileType(mode=\"w\"),\n help=\"File to write to. Defaults to stdout.\",\n )\n\n # Should probably implement this and CSV as subcommands\n parser.add_argument(\n \"-y\",\n \"--yaml\",\n action=\"store_true\",\n help=\"Whether to sort a YAML file provided as the input.\",\n )\n\n # is there a way to have info printed with this from argparse?\n parser.add_argument(\n \"-l\",\n \"--log\",\n action=\"store_true\",\n help=\"Turn logging on and print to console.\",\n )\n\n parser.add_argument(\n \"-ll\",\n \"--log_level\",\n dest=\"log_level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Set the logging level\",\n )\n\n parser.add_argument(\n \"-V\", \"--version\", action=\"version\", version=\"%(prog)s\" + __version__\n )\n\n if len(sys.argv[1:]) == 0:\n parser.print_help()\n sys.exit()\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n\n # add these command line arg options\n parser.add_argument(\"departure_date\", help=\"Provide departure date in MM/DD/YYYY\")\n parser.add_argument(\"return_date\", help=\"Provide return date in MM/DD/YYYY\")\n parser.add_argument(\"departure_airport\", help=\"Provide airport code, e.g. BWI\")\n parser.add_argument(\"return_airport\", help=\"Provide airport code, e.g. ICN\")\n\n # parse these command line options\n arg = parser.parse_args()\n\n departure_date = arg.departure_date\n return_date = arg.return_date\n departure_airport = arg.departure_airport\n return_airport = arg.return_airport\n\n return departure_date, return_date, departure_airport, return_airport", "def parse_args(args=None):\n\t\treturn _get_args_parser().parse_args(args)", "def _parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'list_of_files', type=str,\n help='Input ASCII file with a list of files to be downloaded')\n\n return parser.parse_args()", "def parse_arguments():\n global parser\n parser = argparse.ArgumentParser(\n description='Certainly this isn\\'t how Food Network does it',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('''\n Recipe List must appear as follows. **\n =======\n recipe_name\n serveing_size\n ingredient 0\n ingredient 1\n ingredient 2\n ...\n ...\n ...\n ingredient n\n '''))\n parser.add_argument('input_file',\n help=\"An input text file to read in recipes from. \"\n \"Must adhere certain structure.**\")\n parser.add_argument('out_file', help=\"File to write json recipe data to.\")\n parser.add_argument('-s', '--serving-size', type=str,\n help='The number of servings you\\'d like to make.',\n dest='serving_size', default=4)\n parser.add_argument('-f', '--filter-items', type=split_cmdline_filter_items,\n dest='filter_items',\n help='A comma delimited string of ingredients to filter recipes by. '\n 'Multi-word ingredients must be quoted.')\n global args\n args = parser.parse_args()\n\n global serving_size_override\n serving_size_override = args.serving_size\n global filter_ingredients\n filter_ingredients = args.filter_items", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def argdict(self):\n return dict((arg.name, val) for arg, val in zip(self.sig, self))", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\"Run arguments for system submitted tasks\")\n\n parser.add_argument(\"-f\", \"--funcs\", type=str, nargs=\"?\", required=True,\n help=\"path to pickle file containing a list of \"\n \"functions/methods that should be run by the \"\n \"submitted process\"\n )\n parser.add_argument(\"-k\", \"--kwargs\", type=str, nargs=\"?\", required=False,\n default=None,\n help=\"path to pickle file containing a dictionary of \"\n \"keyword argumnets that should be passed to the \"\n \"functions\")\n parser.add_argument(\"-e\", \"--environment\", type=str, nargs=\"?\",\n required=False,\n help=\"Optional comma-separated environment variables, \"\n \"which should be given as \"\n \"VARNAME1=value1,VARNAME2=value2 and so on. These \"\n \"will be separated and instantiated into Python's \"\n \"os.environ\")\n\n return parser.parse_args()", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )", "def parseArguments(args=None):\n\n # parse command line arguments\n parser = argparse.ArgumentParser(description='collection creator')\n parser.add_argument( 'config_file', action=\"store\" )\n parser.add_argument( 'out_path', action=\"store\" )\n\n return parser.parse_args(args)", "def parse_arguments(args=sys.argv[1:]):\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-i', '--input',\n help=\"Path of input file to read. Default: {d}\".format(d=INPUT_FILE),\n default=INPUT_FILE)\n \n return parser.parse_args(args)", "def parse_arguments(args, parent_parser=[]):\n return ((), args)", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\n \"This script receives 'grouped_hits.txt', uses a FASTA file to get the \\\n \\nsequences, uses MAFFT to align each group, then generates a consensus \\\n \\nsequence for each group using biopython.\\n\\n \\\n Example: python {0} -g grouped_hits.txt -f sequences.fasta\".format(argv[0]),\n formatter_class = argparse.RawDescriptionHelpFormatter)\n \n requiredNamed = parser.add_argument_group('required arguments')\n\n requiredNamed.add_argument(\"-g\", \"--GROUPS\", type=str, required=True,\\\n help=\"Grouped hits file generated by group_self_BLAST.py\", action=\"store\")\n\n requiredNamed.add_argument(\"-f\", \"--FASTA\", type=str, required=True,\\\n help=\"FASTA file containing all sequences.\", action=\"store\")\n\n return parser.parse_args()", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(\"Utility script used to producing stable matches between mentors and candidates.\")\n parser.add_argument('mentor_file', metavar = 'MENTOR_FILE', type = str,\n help = \"CSV containing the preference lists submitted by the mentors\")\n parser.add_argument('candidate_file', metavar = 'CANDIDATE_FILE', type = str,\n help = 'CSV containing the preference lists submitted by the candidates')\n parser.add_argument('-o', '--output', dest = 'output_file', action = 'store', \n help = \"location of the output file\")\n parser.add_argument('-n', dest = \"num_preferences\", action = 'store', default = 5, type = int,\n help = \"number of preferences for each to consider (default 5)\")\n return parser.parse_args()", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]", "def analyze_args(self, args):\n args_obj = args.__dict__\n if args_obj.get('add'):\n action = 'bc'\n params = args_obj['add']\n\n elif args_obj.get('prime'):\n action = 'prime'\n params = args_obj['prime']\n else:\n print \"Invalid Arguments...\"\n sys.exit(0)\n\n params_check = self.validate_params(params)\n if params_check is True:\n parsed_args = dict(action=action,\n params=map(int, params))\n return parsed_args", "def parse_arguments(argv=None, parser=None):\n if parser is None:\n parser = make_parser()\n\n return parser.parse_args(argv)", "def grab_scrape_info() -> dict[str, Union[ParseResult, str]]:\n\n logger.debug(\"Grabbing scrape info from arguments\")\n url: ParseResult = urlparse(args.url)\n if not url.scheme:\n logger.error(\"URL has no scheme\")\n raise exceptions.InvalidInput('URL is incomplete. It has no scheme.')\n\n argument: Optional[str]\n for argument in args.seasons, args.episodes:\n if argument is not None:\n if len(argument) > 2 and not argument.startswith('['):\n logger.error(\"Invalid input format for seasons or episodes\")\n raise exceptions.InvalidInput('Invalid input format for seasons or episodes')\n\n for argument_content in argument:\n if argument_content not in ''.join(['[', ']', ',', '-', ' ', string.digits]):\n logger.error(\"Invalid input format for seasons or episodes\")\n raise exceptions.InvalidInput('Invalid input format for seasons or episodes')\n\n return {\n 'url': url,\n 'seasons': args.seasons,\n 'episodes': args.episodes,\n }", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=(\n \"SpotiQuote: An automatic ad silencer combined with spottily played quotes.\"\n \" Spotify is queried by an AppleScript to report its status and when found\"\n \" to be presenting an advertisement, automatically muted. Once the an\"\n \" advertisement concludes the volume is set back to its previous level.\"\n )\n )\n\n parser.add_argument(\n \"--volume\",\n type=int,\n default=80,\n dest=\"volume\",\n help=\"Integer value between 0 and 100 to start Spotify at.\",\n )\n\n parser.add_argument(\n \"--memos\",\n type=str,\n default=None,\n dest=\"memos\",\n help=(\n \"File path to json file containing memos to say at the beginning of a muted\"\n ),\n )\n\n parser.add_argument(\n \"--voice\",\n type=str,\n default=\"Alex\",\n dest=\"voice\",\n help=\"Default voice to read memos in\",\n )\n\n parser.add_argument(\n \"--after_num_plays\",\n type=int,\n default=None,\n dest=\"after_num_plays\",\n help=(\n \"Recite memo after an integer number of songs have completed. Completion\"\n \" of play is defined as reaching the last 5 seconds of a song. Must be at\"\n \" least one.\"\n ),\n )\n\n return parser.parse_args()", "def parse_arguments():\n\n info = 'Divides pdb info files for parallelization'\n parser = argparse.ArgumentParser(description=info)\n\n # program arguments\n parser.add_argument('-f', '--in-file',\n type=str,\n required=True,\n help='PDB info file to divide')\n parser.add_argument('-n', '--num-splits',\n default=1000,\n type=int,\n help='Number of splits to perform (Default: 1000)')\n parser.add_argument('-m', '--mut-file',\n type=str,\n required=True,\n help='File containing mutation information')\n parser.add_argument('--split-dir',\n default = \"../data/split_pdbs/\",\n type=str,\n help='Output directory for split PDB info files')\n\n args = parser.parse_args()\n opts = vars(args)\n return opts", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def parseArgs(self, args, **vars):\n argList = []\n for token in self.argLexer.finditer(args):\n for tokenType, tokenValue in list(token.groupdict().items()):\n if tokenValue is not None:\n argList.append(getattr(self, 'argtoken_' +\n tokenType)(tokenValue, vars))\n return argList", "def parse_args():\n parser = default_argument_parser()\n parser.add_argument(\"--label-map\",\n dest=\"label_map\",\n type=pathlib.Path,\n help=\"Label map in YAML format which maps from category \"\n \"ID to name.\")\n parser.add_argument(\"--train-csv\",\n dest=\"train_csv\",\n required=True,\n type=pathlib.Path,\n help=\"Path to training data CSV file.\")\n parser.add_argument(\"--valid-csv\",\n dest=\"valid_csv\",\n required=False,\n type=pathlib.Path,\n help=\"Optional path to validation data CSV file.\")\n parser.add_argument(\n \"--image-width\",\n type=int,\n help=\"Image width (optional, used to speed up dataset processing).\")\n parser.add_argument(\n \"--image-height\",\n type=int,\n help=\"Image height (optional, used to speed up dataset processing).\")\n return parser.parse_args()", "def args(hub, val: List[str] or str) -> Tuple[List[str], Dict[str, str]]:\n args = []\n kwargs = {}\n for v in hub.render.cli.render(val):\n if isinstance(v, dict):\n kwargs.update(v)\n else:\n args.append(v)\n\n return args, kwargs", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def _parse_args(self, prepared_args):\n pass", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def Args(parser):", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Re-Id\")\n parser.add_argument(\n \"--exp1\", help=\"Name of Camera 1\",\n default=None, required=True)\n parser.add_argument(\n\t \"--exp2\", help=\"Name of Camera 2\",\n\t default=None, required=True)\n\n return parser.parse_args()", "def _parse_arguments(text):\n parser = argparse.ArgumentParser(\n description=\"Build Python-based Rez packages in just a single command.\",\n )\n\n parser.add_argument(\n \"--hdas\",\n nargs=\"+\",\n help=\"The relative paths to each folder containing VCS-style Houdini HDAs.\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--items\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to copy / install.\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--eggs\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to make into a .egg file.\",\n )\n\n parser.add_argument(\n \"--symlink\",\n action=\"store_true\",\n default=linker.must_symlink(),\n help=\"If True, symlink everything back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-files\",\n action=\"store_true\",\n default=linker.must_symlink_files(),\n help=\"If True, symlink files back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-folders\",\n action=\"store_true\",\n default=linker.must_symlink_folders(),\n help=\"If True, symlink folders back to the source Rez package.\",\n )\n\n known, _ = parser.parse_known_args(text)\n\n return known" ]
[ "0.75726336", "0.7397223", "0.7325528", "0.71652824", "0.7037274", "0.6999478", "0.6991102", "0.69558775", "0.69498295", "0.69380295", "0.6924944", "0.69120455", "0.6848294", "0.6756323", "0.6723195", "0.66975105", "0.6656867", "0.6617072", "0.6584447", "0.6567549", "0.65363634", "0.6525429", "0.6501903", "0.6501504", "0.6482272", "0.6439208", "0.64390683", "0.64336765", "0.6341237", "0.6329849", "0.6327732", "0.63261926", "0.6317717", "0.6317364", "0.62792957", "0.62631387", "0.6239137", "0.62290144", "0.6209235", "0.618948", "0.61894256", "0.6176305", "0.6172716", "0.6113603", "0.60946876", "0.6082099", "0.60537785", "0.6023486", "0.6000508", "0.59915406", "0.5975556", "0.59664315", "0.5959181", "0.5950514", "0.5924337", "0.59232044", "0.5916626", "0.5901855", "0.5897256", "0.58889055", "0.58864176", "0.58845794", "0.5868127", "0.58652794", "0.58468515", "0.58423775", "0.58344895", "0.5828471", "0.58223", "0.5810771", "0.5777806", "0.57775044", "0.5754866", "0.5754069", "0.5749367", "0.57490826", "0.574513", "0.5713848", "0.57111895", "0.5707107", "0.5701594", "0.5699029", "0.569731", "0.5691901", "0.56869054", "0.5669614", "0.5654963", "0.5652932", "0.5648942", "0.56462103", "0.5644211", "0.5644154", "0.5642982", "0.5640379", "0.562334", "0.5621328", "0.56187177", "0.56183875", "0.561562", "0.5613119" ]
0.85187536
0
Parse a list of directives into a dictionary where the key is the name of the directive and the value is the directive itself.o
def parse_directives(schema_directives: List[Dict]) -> Dict[str, Directive]: result = {} for schema_directive in schema_directives: new_directive = Directive(schema_directive) result[new_directive.name] = new_directive return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directives():\n cmd = \"{} -L\".format(_detect_os())\n ret = {}\n out = __salt__[\"cmd.run\"](cmd)\n out = out.replace(\"\\n\\t\", \"\\t\")\n for line in out.splitlines():\n if not line:\n continue\n comps = line.split(\"\\t\")\n desc = \"\\n\".join(comps[1:])\n ret[comps[0]] = desc\n return ret", "def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()", "def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()", "def parse_directive(line):\n composite = list()\n pointer = line.find(\"#\")\n composite.append(line[0: pointer])\n composite.append(line[pointer + 1: len(line) - 1])\n return composite", "def get_definitions(wlist):\n ddict = {}\n for word in wlist:\n text = get_def_page(word)\n defs = extract_defs(text)\n ddict[word] = defs\n return ddict", "def directives(self):\n return self._directives", "def directives(self):\n return self._directives", "def parse_cache_control(\n header_value: str) -> dict[str, str | int | bool | None]:\n directives: dict[str, str | int | bool | None] = {}\n\n for segment in parse_list(header_value):\n name, sep, value = segment.partition('=')\n if sep != '=':\n directives[name] = None\n elif sep and value:\n value = _dequote(value.strip())\n try:\n directives[name] = int(value)\n except ValueError:\n directives[name] = value\n # NB ``name='' is never valid and is ignored!\n\n # convert parameterless boolean directives\n for name in _CACHE_CONTROL_BOOL_DIRECTIVES:\n if directives.get(name, '') is None:\n directives[name] = True\n\n return directives", "def _convert_tags_to_dict(text_list_tags):\n return OrderedDict([re.findall(r\"\"\"\\s*_(\\w+)\\s+(.+?)\\s*$\"\"\", row)[0] for row in text_list_tags])", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg", "def parse_vars(items):\n return dict((parse_var(item) for item in items))", "def find_regions(directives):\n regions = {}\n for directive in directives:\n if directive.startswith(\"sequence-region\"):\n try:\n _, accession, start, end = directive.split(\" \")\n regions[accession] = (int(start), int(end))\n except ValueError:\n # likely sequence-region without coordinates\n pass\n return regions", "def parse_list(constant_list):\n\n values = dict()\n descriptions = dict()\n for (key, value, desc) in constant_list:\n values[key] = value\n descriptions[value] = desc\n return (values, descriptions)", "def export_commentary_text_as_dictionary(commentary_parts_list):\n verse_string = str(commentary_parts_list[0])\n header_string = str(commentary_parts_list[1])\n \n verse = re.search(r\"\\[(\\d+)\\]\", verse_string).group(1)\n header = re.search(r'\\<u\\>\\s*\"(.+)\"\\s*\\<\\/u\\>', header_string).group(1)\n\n commentary_text = commentary_parts_list[2].replace(\": \", \"\")\n key = verse + \"__\" + header\n \n return key, commentary_text.strip()", "def GetDirective(item,directive):\n pat=re.compile(' '+directive + '[\\s= ]*([\\S, ]*)\\n')\n m=pat.search(item)\n if m:\n return m.group(1)", "def to_dict(tags: list):\n result = {}\n for tag in tags:\n result[tag.name] = tag.get_text()\n return result", "def makeGcauCfgDictFromAgc(lineList): \r\n diction = {}\r\n withinCfgData = False\r\n for eachString in lineList:\r\n if re.match(RE_COMPILED_CFG_START, eachString):\r\n withinCfgData = True\r\n elif re.match(RE_COMPILED_CFG_END, eachString):\r\n withinCfgData = False\r\n elif withinCfgData:\r\n p = re.match(RE_COMPILED_CFG_ITEM, eachString)\r\n if p:\r\n obj = p.groups()[0]\r\n attr = p.groups()[1]\r\n val = p.groups()[2]\r\n if obj not in diction:\r\n diction[obj] = {}\r\n diction[obj][attr] = val\r\n return diction", "def _process_directives(self, db):\n term = Term('_directive')\n directive_node = db.find(term)\n if directive_node is not None:\n directives = db.get_node(directive_node).children\n\n gp = LogicFormula()\n while directives:\n current = directives.pop(0)\n self.execute(current, database=db, context=self.create_context((), define=None),\n target=gp)\n return True", "def _parse_tags(tags: str):\n return dict(item.split(\":\") for item in shlex.split(tags)) # type: ignore", "def crm2dict(conf_list=None):\n if conf_list is None:\n conf_list=configure_parse()\n conf_dict=dict(conf_list)\n results={}\n groupkeys = getkeys(conf_dict, 'group')\n primitivekeys = getkeys(conf_dict, 'primitive')\n for gk in groupkeys:\n results.setdefault(gk.split()[1], {})\n locationkeys = getkeys(conf_dict, 'location')\n for key in conf_dict.keys():\n conf_type, tag = key.split()\n if conf_type == 'group':\n members=[x for x in conf_dict[key] if not (x.startswith('target-role') or x == 'meta')]\n results[tag].update({'members' : members })\n elif conf_type == 'location':\n service_name, loc=parse_tag(tag)\n balancer = conf_dict[key][2]\n if service_name not in results.keys():\n results.setdefault(service_name, {'loadbalancers' : {loc:balancer}})\n elif 'loadbalancers' not in results[service_name].keys():\n results[service_name].update({'loadbalancers' : {loc:balancer}})\n else:\n results[service_name]['loadbalancers'].update({loc:balancer})\n elif conf_type == 'primitive':\n service_name, service_type = parse_tag(tag)\n if service_type == 'ld':\n results[service_name].update({'type' : 'ldirectord'})\n elif service_type[:2] == 'ip':\n params = conf_dict[key]\n parsed_params={}\n for param in params:\n if param[:3] == 'ip=':\n parsed_params.setdefault('ip', param[4:-1])\n elif param[:13] == 'cidr_netmask=':\n parsed_params.setdefault('cidr_netmask', param[14:-1])\n elif param[:4] == 'nic=':\n parsed_params.setdefault('nic', param[5:-1])\n if 'ips' not in results[service_name].keys():\n results[service_name].update({'ips' : [haipstr(parsed_params)]})\n else:\n results[service_name]['ips'].append(haipstr(parsed_params))\n return results", "def _parse_comments(self, tokens: TokenIterator):\n metadata = {}\n while tokens.peek().type == 'COMMENT':\n comment = tokens.next().text\n while comment:\n comment, found, meta = comment.rpartition('::')\n if found:\n key, _, value = meta.partition(' ')\n metadata[key] = value.rstrip()\n return metadata", "def produce_parse_duckduckgo(self, value_list:list) -> dict:\n value_html, value_link = value_list\n value_dict = {'search': value_link, 'names': [], 'links': []}\n if len(value_html) < 1000:\n return value_dict\n soup = BeautifulSoup(value_html, 'html.parser')\n soup = soup.find('div', id='links')\n soup = soup.find_all('h2')\n value_name = [f.text.strip() for f in soup]\n value_link = [f.find('a').get('href', '') for f in soup]\n if '//duckduckgo.com/l/?uddg=' in value_link[0]:\n value_link = [urllib.parse.urlparse(f) for f in value_link]\n value_link = [urllib.parse.parse_qs(f.query).get('uddg', '')[0] for f in value_link]\n value_dict['names'] = value_name\n value_dict['links'] = value_link\n return value_dict", "def potcar_str2dict(potcar_list: Optional[str]) -> dict:\n if potcar_list is None:\n return {}\n elif isinstance(potcar_list, str):\n potcar_list = potcar_list.split()\\\n\n d = {}\n for p in potcar_list:\n element = p.split(\"_\")[0]\n d[element] = p\n return d", "def list(file_path):\n output = utils.run_process(['mdls', file_path])\n # get metadata into list, allowing for nested attributes\n md = [[y.strip()\n for y in line.split('=')]\n for line in output]\n # iterate over list to deal with nested attributes\n # then build dictionary\n listed_item, md_dict = [], {}\n for item in md:\n # item is pair\n if len(item) == 2:\n k, v = item\n # if second item is parens, then first is key\n if v == '(':\n listed_key = utils.clean_attribute(k)\n # else, it's a simple `key: value` pair\n else:\n # attempt to convert to `int`\n try:\n val = int(v)\n except (ValueError, TypeError):\n val = v.replace('\"', '')\n # convert shell nulls to Python `None`\n if val in ('\"\"', '(null)'):\n val = None\n key = utils.clean_attribute(k)\n md_dict[key] = val\n # single item is part of a nested attribute\n elif len(item) == 1 and item[0] != ')':\n value = item[0].replace('\"', '')\n listed_item.append(value)\n # single item marks end of a nested attribute\n elif len(item) == 1 and item[0] == ')':\n md_dict[listed_key] = listed_item\n listed_item = []\n return md_dict", "def register_based_directives():\n if not BASED_LIB_RST:\n return\n\n if \"directives\" in BASED_LIB_RST:\n for dir_name, dir_cls_str in BASED_LIB_RST[\"directives\"].items():\n class_ = import_string(dir_cls_str)\n directives.register_directive(dir_name, class_)", "def parse(self):\n try:\n self.match_value(Operator, \"#\")\n\n # Check for a match against known directives\n candidates = [self.define, self.undef, self.include, self.ifdef,\n self.ifndef, self.if_, self.elif_, self.else_, self.endif, self.pragma]\n for f in candidates:\n try:\n directive = f()\n if not self.eol():\n log.warning(\"Additional tokens at end of preprocessor directive\")\n return directive\n except ParseError:\n pass\n\n # Any other line beginning with '#' is a preprocessor\n # directive, we just don't handle it (yet). Suppress\n # warnings for common directives that shouldn't impact\n # correctness.\n common_unhandled = [\"line\", \"warning\", \"error\"]\n if len(self.tokens) > 2 and str(self.tokens[1]) not in common_unhandled:\n log.warning(\"Unrecognized directive\")\n return UnrecognizedDirectiveNode(self.tokens)\n except ParseError:\n raise ParseError(\"Not a directive.\")", "def get_commands_dict() -> dict:\n commands_dict = {}\n f = open(f\"data/metadata/commands.dict.txt\", \"r\", encoding=\"utf-8\").read()\n for command in f.split(\"\\n\"):\n commands_dict[command.split(\":\")[0]] = command.split(\":\")[1]\n return commands_dict", "def process(path, name):\n d = {}\n path = path / name\n with open(path.as_posix()) as fd:\n file_contents = fd.read()\n module = ast.parse(file_contents)\n docstring = ast.get_docstring(module)\n docstring_line = get_value(docstring)\n d['name'] = name\n if docstring_line:\n d['docstring'] = docstring_line\n else:\n d['docstring'] = 'No docstring provided.'\n return d", "def _parse_single_definition(unparsedDefinition):\r\n parsed = {'definition': unparsedDefinition['difino']}\r\n parsed['subdefinitions'] = [\r\n _parse_subdefinitions(subdefinition)\r\n for subdefinition in unparsedDefinition['pludifinoj']\r\n ]\r\n \r\n parsed['examples'] = [\r\n {'example': example['ekzemplo']}\r\n for example in unparsedDefinition['ekzemploj']\r\n ]\r\n return parsed", "def parse(self, content):\n self._sections = {}\n self._filters = []\n section = None\n\n def error(msg):\n print('autodl.cfg: line {}: {}'.format(i + 1, msg))\n # log('autodl.cfg: line {}: {}'.format(i + 1, msg))\n\n first_prog = re.compile(ur'^\\[\\s*([\\w\\-]+)\\s*(?:([^\\]]+))?\\s*]$')\n second_prog = re.compile(ur'^([\\w\\-]+)\\s*=(.*)$')\n lines = content['data'].split('\\n')\n for line in lines:\n i = 0\n line = line.strip()\n if line == '':\n continue\n\n first_array = first_prog.match(line)\n second_array = second_prog.match(line)\n if line[0] == '#':\n if section:\n section.add_comment(line)\n elif first_array:\n _type = first_array.group(1).strip().lower()\n try:\n _name = first_array.group(2).strip().lower()\n except AttributeError:\n _name = None\n section = self.get_section(_type, _name)\n elif second_array:\n if section is None:\n error('Missing a [section]')\n else:\n _option = second_array.group(1).strip().lower()\n _value = second_array.group(2).strip().lower()\n section.add_option(_option, _value)\n else:\n error('Ignoring line')\n i += 1", "def _get_directive_name(self):", "def parse_headers(file_contents: str) -> dict:\n\n match = re.search(r'#HEADER#(.*?)#', file_contents, re.MULTILINE | re.DOTALL)\n\n if match is None:\n raise Exception('No #HEADER# provided')\n\n headers = {}\n lines = match.group(1).split(\"\\n\")\n\n for line in lines:\n if line.strip() != '':\n parts = line.split(' : ')\n value = re.sub(r'(^[\\'\"]|[\\'\"]$)', '', parts[1].strip())\n headers[parts[0].strip()] = value\n\n return headers", "def data_labels(labels: Dict[str, Label], data_sec: List[str], memory: Memory):\n data_line_re = f\"(?:({mipsRE.LABEL}):)?\\\\s*({mipsRE.DIRECTIVE})\\\\s+(.*)\"\n second_pass = []\n save_for_next: List[re.Match] = []\n\n for line in data_sec:\n match = re.match(data_line_re, line)\n if match:\n label_name = match[1]\n directive_name = match[2][1:]\n raw_data = match[3]\n directive = Directives[directive_name]\n\n try:\n address = directive(raw_data, memory)\n except NameError as err:\n second_pass.append((line, err.args[0][6:-16]))\n continue\n\n if label_name:\n if label_name in labels:\n raise MipsException(f\"Label {label_name} already defined\")\n # Not all directives need labels\n labels[label_name] = Label(name=label_name, value=address, location=mipsRE.DATA_SEC, kind=match[2][1:])\n if save_for_next:\n labels[save_for_next.pop()[1]] = Label(name=label_name, value=address, location=mipsRE.DATA_SEC, kind=match[2][1:])\n else:\n match = re.match(f\"(?:(\\\\b[\\\\w]+\\\\b):)+\\\\s*(\\\\S*)\", line)\n if match:\n if not match[2]: # Example: 'argc: '\n save_for_next.append(match)\n elif match[2] in labels:\n ptr = labels[match[2]]\n labels[match[1]] = Label(name=match[1], value=ptr.value, location=ptr.location, kind=ptr.kind)\n else:\n second_pass.append((line, match[2]))\n\n for pair in second_pass:\n match = re.match(data_line_re, pair[0])\n if match:\n label_name = match[1]\n directive_name = match[2][1:]\n if pair[1] in labels:\n raw_data = match[3].replace(pair[1], str(labels[pair[1]].value))\n else:\n label_position = [i for i in range(0, len(second_pass)) if second_pass[i][0][0 : len(pair[1])] == pair[1]]\n if len(label_position) == 0:\n raise MipsException(f\"Symbol {pair[1]} not found in symbol table\")\n elif len(label_position) > 1000:\n raise MipsException(f\"Cannot make labels refer to each other\")\n else:\n second_pass.insert(label_position[-1] + 1, pair)\n continue\n\n directive = Directives[directive_name]\n try:\n address = directive(raw_data, memory)\n except NameError as err:\n second_pass.append((pair[0].replace(match[3], raw_data), err.args[0][6:-16]))\n continue\n\n if label_name:\n if label_name in labels:\n raise MipsException(f\"Label {label_name} already defined\")\n # Not all directives need labels\n labels[label_name] = Label(name=label_name, value=address, location=mipsRE.DATA_SEC, kind=match[2][1:])\n else:\n match = re.match(f\"(?:(\\\\b[\\\\w]+\\\\b):)+\\\\s*(\\\\S*)\", pair[0])\n if match:\n if match[2] in labels:\n ptr = labels[match[2]]\n labels[match[1]] = Label(name=match[1], value=ptr.value, location=ptr.location, kind=ptr.kind)\n else:\n raise MipsException(f\"Unknown directive {pair[0]}\")\n\n address = Directives[\"space\"](\"4\", memory) # Pad the end of data section", "def _parse(file_contents):\n\n if file_contents is None or file_contents == '':\n return {}\n\n result = {}\n\n for line in file_contents.splitlines():\n # Full line comment\n if line[:1] == '#':\n continue\n\n parts = line.split('=', 1)\n\n # Not a full key-value pair.\n if len(parts) < 2:\n continue\n\n result[parts[0].strip()] = parts[1].strip()\n\n return result", "def _parse_metadata(self, md):\n md = ast.literal_eval(md)\n dd = defaultdict(list)\n\n for entry in md:\n try:\n for k, v in entry.items():\n dd[k].append(v)\n except AttributeError:\n continue\n return dd", "def get_structure(self):\n main = {}\n for line in self.load():\n match = re.match('^\\s*([A-Za-z0-9_]+)(\\((\\d+)\\))?=(.*)$', line)\n if match:\n key = match.group(1)\n index = match.group(3)\n value = match.group(4)\n if index is None:\n main[key] = self.parse_data_value(value)\n else:\n if key not in main:\n main[key] = []\n main[key].append(self.parse_data_value(value))\n #else:\n # print(line)\n return main", "def parse_definitions(headers: dict, file_contents: str) -> list:\n\n if not headers:\n raise Exception('Please set headers first')\n\n match = re.search(r'#DEFINITION#(.*?)#', file_contents, re.MULTILINE | re.DOTALL)\n\n if not match:\n raise Exception('No #DEFINITION# provided')\n\n definitions = list(map(str.strip, match.group(1).split(headers['EOF'])))\n\n if definitions[-1] == headers['EOR']:\n del definitions[-1]\n\n return definitions", "def user_defined_descriptions(path):\n try:\n lines = [line.rstrip() for line in open(path).readlines()]\n return dict([x.split(maxsplit=1) for x in lines])\n except FileNotFoundError:\n return dict()", "def macros(self) -> Dict[str, List[str]]:\n\n result: Dict[str, List[str]] = {}\n for spec in self.specs.values():\n result[spec.name] = []\n for macro in spec.macros.values():\n result[spec.name].append(macro[0])\n return result", "def parse_modules(self) -> None:\n mods: Dict[str, str] = {}\n matches = self.find_dir(\"LoadModule\")\n iterator = iter(matches)\n # Make sure prev_size != cur_size for do: while: iteration\n prev_size = -1\n\n while len(mods) != prev_size:\n prev_size = len(mods)\n\n for match_name, match_filename in zip(\n iterator, iterator):\n mod_name = self.get_arg(match_name)\n mod_filename = self.get_arg(match_filename)\n if mod_name and mod_filename:\n mods[mod_name] = mod_filename\n mods[os.path.basename(mod_filename)[:-2] + \"c\"] = mod_filename\n else:\n logger.debug(\"Could not read LoadModule directive from Augeas path: %s\",\n match_name[6:])\n self.modules.update(mods)", "def google_docstring_to_dict(\n docstring: Optional[str],\n file_import: Optional[Any] = None,\n ) -> Tuple[str, list, list]:\n\n if not docstring:\n return \"\", [], []\n\n regex_sections = r\"^(?: {4}|\\t)(?P<name>\\*{0,4}\\w+|\\w+\\s\\w+):\\n(?P<desc>(?:(\\s|\\S)*?(\\n\\n|\\Z)))\"\n regex_titles = r\"^(?: {4}|\\t)(?P<name>\\*{0,4}\\w+|\\w+\\s\\w+):\"\n section_titles = re.findall(regex_titles, docstring, re.MULTILINE)\n regex_description_sections = r\"(?P<desc>\\A(\\s|\\S)*?)(\\n\\n|\\Z)\"\n descrition_sections = re.findall(\n regex_description_sections, docstring, re.MULTILINE\n )\n description = descrition_sections[0][0] if descrition_sections else \"\"\n sections = re.findall(regex_sections, docstring, re.MULTILINE)\n if not sections and not description:\n description = docstring\n input_list = []\n output_list = []\n if \"Args\" in section_titles or \"Context Outputs\" in section_titles:\n for section in sections:\n if \"Args\" in section:\n lines = section[1].split(\"\\n\")\n # get first indent number\n spaces_num = len(lines[0]) - len(lines[0].lstrip())\n arg_lines = section[1].split(f'\\n{spaces_num*\" \"}')\n for arg_line in arg_lines:\n in_arg, in_arg_type = MetadataToDict.parse_in_argument_lines(\n arg_line, file_import\n )\n if in_arg:\n input_list.append((in_arg, in_arg_type))\n\n if \"Context Outputs\" in section:\n lines = section[1].split(\"\\n\")\n spaces_num = len(lines[0]) - len(lines[0].lstrip())\n out_lines = section[1].split(f'\\n{spaces_num*\" \"}')\n for out_line in out_lines:\n out_arg = MetadataToDict.parse_out_argument_lines(\n out_line,\n )\n if out_arg:\n output_list.append(out_arg)\n\n return description, input_list, output_list", "def get_dep_map(kerneldir):\n\n\tf = open(os.path.join(kerneldir, 'modules.dep'))\n\tdeps = {}\n\tfor l in f:\n\t\t#print repr(l)\n\t\tmod, dep_list_str = l.strip().split(':', 1)\n\t\tassert mod not in deps\n\n\t\tkmod = KModuleName(mod)\n\t\tdep_list = [KModuleName(x) for x in dep_list_str.strip().split()]\n\t\tdep_list.insert(0, kmod)\t# prepend ourself as a dependency\n\n\t\tdeps[kmod] = dep_list\n\n\tf.close()\n\treturn deps", "def getParsedDic(self):\n return {}", "def params_to_dict(tags):\n tags_dict = {}\n tags_name_value_list = [tag[0].split(':') for tag in tags]\n for tag_name, tag_value in tags_name_value_list:\n tags_dict.setdefault(tag_name, []).append(tag_value)\n return tags_dict", "def __get_smart_attr_headers_params(self, href_parsed):\n\n\t\tsmart_attr_to_drive_list_map = {}\n\n\t\th4_elements = href_parsed.find_all('h4')\n\t\tsmart_attr_headers = filter(lambda x: self.SMART_ATTR_HEADER_REGEX.match(x.text), h4_elements)\n\n\t\tfor smart_attr_header in smart_attr_headers:\n\t\t\tdrives = smart_attr_header.find_next(string=self.SMART_ATTR_DRIVE_LIST_REGEX).parent.parent\n\t\t\tdrives = drives.text.split(':')[1]\n\t\t\tsmart_attr_to_drive_list_map[smart_attr_header.text] = self.__clean_drive_text(drives)\n\n\t\treturn smart_attr_to_drive_list_map", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parseconfig_se(cfile):\n cdict = {}\n f = open(cfile,'r')\n lines = f.readlines()\n for l in lines:\n a = string.split(l)\n if len(a) > 0:\n if a[0][0] != '#':\n maxi = len(a)\n for i in range(1,len(a)):\n if a[i][0] == '#':\n maxi = i\n break\n # Turn comma-separated lists into python lists\n entry = []\n for e in a[1:maxi]:\n if string.find(e,','):\n entry = entry + string.split(e,',')\n else:\n entry = entry + [e]\n cdict[a[0]] = entry\n return cdict", "def GetDefinitions(filename,obj):\n file=open(filename)\n content=file.read().replace(\"\\t\",\" \")\n file.close\n pat=re.compile(obj +' \\{([\\S\\s]*?)\\}',re.DOTALL)\n finds=pat.findall(content)\n return finds", "def parse_classads(output):\n classads = []\n tmp = {}\n for line in output.split(\"\\n\"):\n # A blank line signifies that this classad is finished\n if line == \"\":\n if len(tmp) > 0:\n classads.append(tmp)\n tmp = {}\n\n pair = line.split(\" = \", 2)\n if len(pair) == 2:\n tmp[pair[0]] = pair[1]\n\n return classads", "def builddictionary(dirlist):\n init_dictionary={}\n for string in dirlist:\n splitstring=string.split(\"\\t\")\n if len(splitstring) == 2:\n init_dictionary[splitstring[1].strip(\"\\n\")] = [int(splitstring[0]), 0]\n return init_dictionary", "def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict", "def parse_mod_entry(tr):\n\ttrl = tr.split(\";\")\n\ttrdict = OrderedDict()\n\n\tfor j in trl:\n\t\tk = j.split(\"=\")\n\n\t\tif k[0] in trdict:\n# print \"%s already in dict\" % (k[0])\n\t\t\ttrdict[k[0]].append(k[1])\n\t\telse: \n\t\t\ttrdict[k[0]]=[k[1]]\n\treturn trdict", "def construct_vars(config):\n c = ConfigParser()\n c.readfp(open(config))\n keys = []\n for s in c.sections():\n keys += c.items(s)\n return dict(keys)", "def parseSections(data):\n pro = _sectionSplit.split(data)\n sections = {}\n for x in xrange(1, len(pro), 2):\n sections[pro[x]] = pro[x+1]\n return sections", "def _parse_header(path):\n with open(path) as f:\n text = f.read().splitlines()\n raw_segs = [line.split() for line in text if ':' in line]\n\n # convert the content into a giant dict of all key, values\n return dict((i[0][:-1], i[1:]) for i in raw_segs)", "def _parse_subdefinitions(subdefinitions):\r\n parsed = {'definition': subdefinitions['difino']}\r\n parsed['examples'] = [\r\n {\r\n 'example': example['ekzemplo']\r\n } for example in subdefinitions['ekzemploj']]\r\n return parsed", "def prepare_multiple_out_parsers(run_dict):\n output_parser_dict = {}\n for run_label, run_name in run_dict.items():\n output_parser_dict[run_label] = OutputParser(run_name, use_most_recent=False)\n return output_parser_dict", "def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d", "def _build(self, *args, **kwargs):\n for directive in self.directives:\n alias, ip, port, server_name, location = directive[\"signature\"].split(\":\")\n\n if location not in self.locations.keys():\n handle_location = Location(**{\n \"location\" : location,\n }\n )\n self.locations = handle_location\n self.locations[location].directives = directive", "def parse_defines(self):\n for line in self.header.splitlines():\n if line.lower().startswith(\"#define\"):\n _, line = line.strip().split(None, 1) # remove #define\n if \" \" in line:\n symbol, value = line.split(None, 1)\n if value.isdigit():\n value = int(value)\n elif value.startswith(\"0x\"):\n value = int(value, 16)\n elif value in self.types:\n self.types[symbol] = self.types[value]\n else:\n symbol = line\n value = \"\"\n self.constants[symbol] = value\n return self.constants", "def make_lex_dict(self):\n lex_dict = {}\n for line in self.lexicon_full_filepath.split('\\n'):\n sp = line.strip().split('\\t')\n if(len(sp) > 1):\n (word, measure) = line.strip().split('\\t')[0:2]\n lex_dict[word] = float(measure)\n return lex_dict", "def _parser(self,\n search_str):\n return {line_index: parsed_line_keys for (line_index, parsed_line_keys)\n in enumerate(self._load_line(search_str=search_str))\n if parsed_line_keys\n }", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_debian_control(cwd):\n from pathlib import Path\n import re\n\n if isinstance(cwd, str):\n cwd = Path(cwd)\n\n field_re = re.compile(r'^([\\w-]+)\\s*:\\s*(.+)')\n\n content = (cwd / 'debian' / 'control').read_text()\n control = {}\n for line in content.split('\\n'):\n m = field_re.search(line)\n if m:\n g = m.groups()\n control[g[0]] = g[1]\n\n for k in ('Build-Depends', 'Depends'):\n m = re.findall(r'([^=\\s,()]+)\\s?(?:\\([^)]+\\))?', control[k])\n control[k] = m\n\n return control", "def extract_def_use(elements):\n\n definition_usage_tuple = [(definition, usage) for element in elements for i, (definition, usage) in\n enumerate(zip(element['definition'], element['usage'])) if\n len(definition) != 0 and len(usage) != 0 and definition != \"\" and usage != \"\"]\n definitions = [el[0] for el in definition_usage_tuple]\n usages = [el[1] for el in definition_usage_tuple]\n definitions = [re.sub(r'[\\n\\r\\t]', ' ', el) for el in definitions]\n usages = [re.sub(r'[\\n\\r\\t]', ' ', el) for el in usages]\n return definitions, usages", "def parseMountOutput(output):\n\t\n\t# none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)\n\t\n\tparsedOutput = {}\n\tregex = \"(\\S+)\\s+on\\s+(\\S+)\\s+type\\s+(\\S+)\\s+\\((\\S+)\\)\"\n\tfor l in output:\n\t\tif re.search(regex,l):\n\t\t\tm = re.search(regex,l)\n\t\t\tdev = m.group(1)\n\t\t\tmntpoint = m.group(2)\n\t\t\tfs = m.group(3)\n\t\t\tperm = m.group(4)\n\t\t\tparsedOutput[dev] = {}\n\t\t\tparsedOutput[dev]['mntpoint'] = mntpoint\n\t\t\tparsedOutput[dev]['filesys'] = fs\n\t\t\tparsedOutput[dev]['perm'] = perm\n\t\t\t\n\t\t\t\n\treturn parsedOutput", "def get_model_descs():\n model_descs = {}\n for (dirpath, dirnames, filenames) in walk('.'):\n for filename in filenames:\n if 'model_desc' in filename:\n numwavelets = None\n wavelet_name = filename.split('.')[1]\n if '-' in wavelet_name:\n wavelet_name = wavelet_name.split('-')[0]\n dirs = str(dirpath).split('/')\n for direc in dirs:\n if 'w' in direc:\n numwavelets = int(direc.split('w')[1].split('.')[0])\n break\n if numwavelets is None:\n print(join(dirpath, filename) + \" is skipped\")\n else:\n if numwavelets not in model_descs.keys():\n model_descs[numwavelets] = {}\n if wavelet_name not in model_descs[numwavelets].keys():\n model_descs[numwavelets][wavelet_name] = {}\n model_descs[numwavelets][wavelet_name][join(dirpath,\\\n filename)] = str(dirpath) + \"/\"\n return model_descs", "def parse_sequences(filename: str,\n ordered: bool=False) -> Dict[str, str]:\n NAME_SYMBOL = '>'\n result = OrderedDict() if ordered else {}\n\n last_name = None\n with open(filename) as sequences:\n for line in sequences:\n if line.startswith(NAME_SYMBOL):\n last_name = line[1:-1]\n result[last_name] = []\n else:\n result[last_name].append(line[:-1])\n\n for name in result:\n result[name] = ''.join(result[name])\n\n return result", "def parse(lines, descriptions):\n # TODO does startswith with an empty string always return true?\n result = {}\n for description in descriptions: # Fill dict with empty arrays for all entries\n result[description[0]] = []\n\n for line in lines:\n words = line.split()\n for description in descriptions:\n try:\n result[description[0]].append(parse_line(words, description[0], description[1]))\n break\n except ValueError:\n pass\n return result", "def parse_to_dicts(lines, containers):\n\n pairs = [(a, b.strip()) for a, b in (m.split(':', 1) for m in lines)]\n item = {}\n kind, name = None, None\n for j in range(0, len(pairs)):\n if pairs[j][0] in containers.keys():\n if j != 0:\n containers[kind].append((name, item))\n item = {}\n kind = pairs[j][0]\n name = pairs[j][1]\n else:\n item[pairs[j][0]] = pairs[j][1]\n if kind is not None:\n containers[kind].append((name, item))\n\n return containers", "def parse_docstring(obj: object) -> Dict[str, Union[str, List, Dict[str, str], None]]:\n raw = getdoc(obj)\n summary = raw.strip(' \\n').split('\\n', maxsplit=1)[0].split('.')[0] if raw else None\n raises = {}\n details = raw.replace(summary, '').lstrip('. \\n').strip(' \\n') if raw else None\n for match in _RE_RAISES.finditer(raw or ''):\n raises[match.group('name')] = match.group('description')\n if details:\n details = details.replace(match.group(0), '')\n parsed = {\n 'raw': raw,\n 'summary': summary or None,\n 'details': details or None,\n 'returns': None,\n 'params': [],\n 'raises': raises\n }\n return parsed", "def parse_special(special):\n special_name = \"\"\n specials = {}\n for line in special:\n if check_template_start(line):\n special_name = line.split(\":\")[1]\n specials[special_name] = []\n elif check_template_end(line):\n special_name = \"\"\n elif special_name != \"\":\n specials[special_name].append(line)\n\n return specials", "def _parse_ddwrt_response(data_str):\n return {key: val for key, val in _DDWRT_DATA_REGEX.findall(data_str)}", "def get_sections(h):\n secnames = {}\n resec = re.compile('(\\w+)\\[(\\d*)\\]')\n for sec in h.allsec():\n g = resec.match(sec.name())\n if g.group(1) not in secnames.keys():\n secnames[g.group(1)] = [int(g.group(2))]\n else:\n secnames[g.group(1)].append(int(g.group(2)))\n return secnames", "def _to_dict(self, data_list):\n data_dict = dict(pair.split('=') for pair in data_list)\n return data_dict", "def _extract_commands():\n module_names = [\n module.name for module in pkgutil.iter_modules(projectstarter.commands.__path__)\n ]\n commands = {}\n for module_name in module_names:\n module = importlib.import_module(f\"projectstarter.commands.{module_name}\")\n if module.__doc__ is not None:\n commands[module_name] = {\n \"description\": module.__doc__.strip().lower(),\n \"func_parse\": module.__getattribute__(\"parse\"),\n \"func_run\": module.__getattribute__(\"run\"),\n }\n return commands", "def structure(data: list) -> dict:\n structure = {}\n for i in range(0, len(data)):\n leading_ = Interpreter.leading(Interpreter(), data[i])\n data_ele = data[i].replace(\"-\", \"\").strip()\n if data_ele in structure:\n structure[data_ele + \"%%%\"] = leading_\n structure[data_ele] = leading_\n return structure", "def parse_dist_meta():\n\n re_meta = re.compile(r\"__(\\w+?)__\\s*=\\s*(.*)\")\n re_doc = re.compile(r'^\"\"\"(.+?)\"\"\"')\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, NAME, \"__init__.py\")) as meta_fh:\n distmeta = {}\n for line in meta_fh:\n if line.strip() == \"# -eof meta-\":\n break\n match = re_meta.match(line.strip())\n if match:\n distmeta.update(_add_default(match))\n return distmeta", "def read_comment(comment):\n comment_dict = {}\n\n debug(\"parse tab in comment.\")\n comment_dict_from_tab, comment = parse_tab_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_tab)\n comment_dict.update(comment_dict_from_tab)\n\n debug(\"parse space in comment.\")\n comment_dict_from_space, comment = parse_space_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_space)\n comment_dict.update(comment_dict_from_space)\n\n debug(\"parse keyword in comment.\")\n comment_dict_from_keyword, comment = parse_keyword_in_comment(comment)\n debug(\"parsed dict: %s.\" % comment_dict_from_keyword)\n comment_dict.update(comment_dict_from_keyword)\n # keyword based separation.\n return comment_dict", "def _directive_render(node: RenderTreeNode, context: RenderContext) -> str:\n # special directives that should only be used within substitutions\n if node.meta[\"module\"].endswith(\"misc.Replace\") and node.children:\n return \"\\n\\n\".join(child.render(context) for child in node.children[-1])\n if node.meta[\"module\"].endswith(\"misc.Date\"):\n return \"{sub-ref}`today`\"\n # TODO handle unicode directive\n\n name = node.meta[\"name\"]\n info_str = option_block = code_block = \"\"\n\n if node.children and node.children[0].type == \"directive_arg\":\n info_str = \"\".join(child.render(context) for child in node.children[0])\n info_str = \" \".join(info_str.splitlines()).strip()\n if info_str:\n info_str = \" \" + info_str\n\n if node.meta[\"options_list\"]:\n yaml_str = yaml_dump(\n {\n key: (True if val is None else (int(val) if val.isnumeric() else val))\n for key, val in node.meta[\"options_list\"]\n }\n )\n option_block = indent(yaml_str, \":\", lambda s: True).strip()\n\n if node.children and node.children[-1].type == \"directive_content\":\n content = \"\\n\\n\".join(child.render(context) for child in node.children[-1])\n if not option_block and content.startswith(\":\"):\n # add a new-line, so content is not treated as an option\n content = \"\\n\" + content\n elif option_block and content:\n # new lines between options and content\n option_block += \"\\n\\n\"\n code_block = content\n\n if option_block or code_block:\n # new line before closing fence\n code_block += \"\\n\"\n\n # Info strings of backtick code fences can not contain backticks or tildes.\n # If that is the case, we make a tilde code fence instead.\n if node.markup and \":\" in node.markup:\n fence_char = \":\"\n elif \"`\" in info_str or \"~\" in info_str:\n fence_char = \"~\"\n else:\n fence_char = \"`\"\n\n # The code block must not include as long or longer sequence of `fence_char`s\n # as the fence string itself\n fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)\n fence_str = fence_char * fence_len\n return f\"{fence_str}{{{name}}}{info_str}\\n{option_block}{code_block}{fence_str}\"", "def list2dict(L):\n\n dd = {i: L[i].split('\\t') for i in range(len(L))} # auxiliary dict\n D = {}\n # Construct output dictionary of key-value pairs:\n D[dd[0][0]] = {dd[1][0]: dict(zip(dd[0][1:], dd[1][1:])),\n dd[2][0]: dict(zip(dd[0][1:], dd[2][1:]))}\n return D", "def parse(filename):\n\n # Copy the content from given file to a local list\n with open(filename, 'r') as fp:\n content = [line for line in (line.strip() for line in fp) if line]\n\n # Initialize a dictionary to store the parsed data\n data = {\n 'module_name': '',\n 'input': [],\n 'output': [],\n 'wire': [],\n 'reg': [],\n 'connections': []\n }\n\n # Get module name\n if 'module' in content[0][:7]:\n data['module_name'] = re.search(r'e.*\\(', content[0]).group()[1:-1].strip()\n else:\n print(\"Module name not present!\")\n exit(0)\n\n try:\n for line in content[1:-1]:\n # Get module parameters\n keywords = ['input', 'output', 'wire', 'reg']\n for key in keywords:\n if key in line[:len(key) + 1]:\n parse_line(data, line, key)\n\n # Get connections\n if any(x in line[:5] for x in ['nand', 'nor', 'not', 'xor', 'and', 'or', 'xnor']):\n gate = re.search(r' (.*)\\(', line).group(1).strip()\n inputs = [s.strip() for s in re.search(r'\\((.*)\\)', line).group(1).split(',')]\n for i in inputs[1:]:\n data['connections'].append((i, gate))\n data['connections'].append((gate, inputs[0]))\n except:\n print(\"Not supported!\")\n exit(0)\n\n return data", "def parse(line):\n return dict([pair.split(':') for pair in line.split()])", "def set_dependencies(self,dependency_list):\n\t\tdeps = {}\n\t\tfor relation in dependency_list:\n\t\t\tself.nr_of_deps += 1\n\t\t\t# Find the type of relation\n\t\t\trel = re.match('[a-z\\_]*(?=\\()',relation).group(0)\n\t\t\t# Find head and dependent\n\t\t\thead = int(re.search('(?<=-)[0-9]*(?=, )',relation).group(0))\n\t\t\tdep = int(re.search('(?<=-)[0-9]*(?=\\)$)', relation).group(0))\n\t\t\t# Set head position and create\n\t\t\t#dictinary entries\n\t\t\tif head == 0:\n\t\t\t\tself.head_pos = dep\n\t\t\telse:\n\t\t\t\tdeps[head] = deps.get(head,[])\n\t\t\t\tdeps[head].append([dep,rel])\n\t\t#set headpos to first head in dependency list if sentence has no head\n\t\tif dependency_list and not self.head_pos:\n\t\t\tfirst_head = int(re.search('(?<=-)[0-9]*(?=, )',dependency_list[0]).group(0))\n\t\t\tself.head_pos = first_head\n\t\treturn deps", "def read_tags(filename):\n with open(filename) as f:\n ast_tree = ast.parse(f.read(), filename)\n\n res = {}\n for node in ast.walk(ast_tree):\n if type(node) is not ast.Assign:\n continue\n\n target = node.targets[0]\n if type(target) is not ast.Name:\n continue\n\n if not (target.id.startswith('__') and target.id.endswith('__')):\n continue\n\n name = target.id[2:-2]\n res[name] = ast.literal_eval(node.value)\n\n return res", "def parse(filename):\n with open(filename) as file:\n contents = file.read()\n lines = contents.split(\"\\n\") # Python reads all endings by default as \\n\n res = {}\n for line in lines:\n if line.startswith(\"#\") or line == \"\":\n continue\n x = line.split(\":\")\n if x[1] != '':\n res[int(x[0])] = x[1]\n return res", "def get_definitions(cfg,regs):\n def_l = list()\n for reg in regs.keys():\n print \"Implementing register {}\".format(reg)\n reg_uc = reg.upper()\n reg_lc = reg.lower()\n r_prefix = get_reg_definition_prefix(cfg,reg_uc)\n def_l.append(get_def_reg_prop(cfg,regs,reg,'addr'))\n for f in regs[reg]['fields']:\n print \" Implementing field {}\".format(f)\n f_prefix = get_field_definition_prefix(cfg,reg_uc, f)\n for d in definition_list:\n def_l.append((f_prefix+d.upper(),regs[reg]['fields'][f][d]))\n return def_l", "def get_defines(self):\n defines = []\n for defs in self['DEFS']:\n defines.extend(re.split('[ ,]', defs))\n return defines", "def __parse_docstring(docstring):\n\t\tif docstring is None or docstring == \"\":\n\t\t\treturn {}\n\t\tlines = docstring.replace(\"\\t\", \"\").split(\"\\n\")\n\t\tresult = {}\n\t\thelp_line = \"\"\n\t\targuments = {}\n\n\t\ts_argument = False\n\t\twhile lines != []:\n\t\t\tline = lines.pop(0).strip()\n\n\t\t\tif line.strip() == \"\":\n\t\t\t\tcontinue\n\n\t\t\telse:\n\t\t\t\tif not s_argument:\n\t\t\t\t\tif line == \"Arguments:\":\n\t\t\t\t\t\ts_argument = True\n\t\t\t\t\telse:\n\t\t\t\t\t\thelp_line += \" \" + line\n\t\t\t\telse:\n\t\t\t\t\tif line[0] in [\"@\", \"#\"]:\n\t\t\t\t\t\topt = line[0]\n\t\t\t\t\t\targ = line[1:]\n\t\t\t\t\t\tvariable, _, values = arg.partition(\" = \")\n\t\t\t\t\t\tname, _, typ = variable.partition(':')\n\n\t\t\t\t\t\tif typ in Command.TYPES:\n\t\t\t\t\t\t\ttyp = Command.TYPES[typ]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise CommandTypeError(\"{typ} not supported by commandparse\".format(typ))\n\n\t\t\t\t\t\talias = name[0]\n\t\t\t\t\t\targuments[name] = {\n\t\t\t\t\t\t\t\"alias\": \"-{alias}\".format(alias=alias),\n\t\t\t\t\t\t\t\"name\": \"--{name}\".format(name=name),\n\t\t\t\t\t\t\t\"type\": typ,\n\t\t\t\t\t\t\t\"help_line\": \"\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif values:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tv = literal_eval(values)\n\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\traise CommandDefaultValueError(\"Incorret value(s) in a placeholder: {v}\".format(v=values))\n\t\t\t\t\t\t\tif isinstance(v, list):\n\t\t\t\t\t\t\t\targuments[name][\"values\"] = v\n\t\t\t\t\t\t\telif isinstance(v, str) or isinstance(v, int) or isinstance(v, float):\n\t\t\t\t\t\t\t\targuments[name][\"value\"] = v\n\n\t\t\t\t\t\tif opt == \"#\":\n\t\t\t\t\t\t\targuments[name][\"pos\"] = True\n\t\t\t\t\t\telif opt == \"@\":\n\t\t\t\t\t\t\targuments[name][\"pos\"] = False\n\n\t\t\t\t\telif line: # if no prefix is found, read the help line of the previous argument.\n\t\t\t\t\t\tif not arguments[name][\"help_line\"]:\n\t\t\t\t\t\t\targuments[name][\"help_line\"] = line\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\targuments[name][\"help_line\"] += \" \" + line\n\n\t\treturn {\"help_line\": help_line.strip(), \"arguments\": arguments}", "def _parse_handle_section(lines):\n data = {}\n key = ''\n next(lines)\n\n for line in lines:\n line = line.rstrip()\n if line.startswith('\\t\\t'):\n if isinstance(data[key], list):\n data[key].append(line.lstrip())\n elif line.startswith('\\t'):\n key, value = [i.strip() for i in line.lstrip().split(':', 1)]\n key = normalize(key)\n if value:\n data[key] = value\n else:\n data[key] = []\n else:\n break\n\n return data", "def parse_dependency(dep, forge):\n if '|' in dep:\n return [parse_dependency(alt, forge) for alt in dep.split('|')]\n dep = dep.strip()\n name = ''\n version = ''\n arch = ''\n version, dep = extract_text(dep)\n arch, dep = extract_text(dep, ('[', ']'))\n name = dep.strip()\n return {'forge': forge, 'product': name,\n 'constraints': use_mvn_spec(version), 'architectures': arch}", "def parser(ICDfile):\n terms = defaultdict(set)\n relationships = set()\n definitions = defaultdict()\n ICDfile = ICDfile[0]\n #version = ICDfile.split('/')[1].split('_')[1]\n first = True\n with open(ICDfile, 'r') as fh:\n for line in fh:\n if first:\n first = False\n continue\n data = line.rstrip(\"\\r\\n\").split(\"\\t\")\n icdCode = data[0]\n icdTerm = data[1]\n chapter = data[2]\n chapId = data[3]\n block = data[4]\n blockId = data[5]\n\n terms[icdCode].add(icdTerm)\n definitions[icdCode] = \"term\"\n terms[chapId].add(chapter)\n definitions[chapId] = \"chapter\"\n terms[blockId].add(block)\n definitions[blockId] = \"block\"\n\n if len(icdCode) > 3:\n order = len(icdCode) - 1\n i = 3\n while i <= order:\n if icdCode[0:i] in terms:\n relationships.add((icdCode, icdCode[0:i], \"HAS_PARENT\"))\n i += 1\n\n relationships.add((icdCode, chapId, \"HAS_PARENT\"))\n relationships.add((icdCode, blockId, \"HAS_PARENT\"))\n relationships.add((blockId, chapId, \"HAS_PARENT\"))\n\n return terms, relationships, definitions", "def map_to_scope(var_list):\n return {var.op.name.split('/', 1)[1]: var for var in var_list}", "def load_defs():\n # Load word definitions\n fname = 'word-definitions.txt'\n with open(fname) as fh:\n lines = fh.readlines()\n \n # Create dictionary keyed by lowercase word\n def_tbl = dict()\n for line in lines:\n # split the dictionary line at the first space\n word, word_def = line.split(sep=None, maxsplit=1)\n # add this entry to the dictionary\n word = word.lower()\n def_tbl[word] = word_def.rstrip()\n return def_tbl", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def genVDeclList(syms):\r\n sortedsyms = sorted(syms, key = lambda s: s.name)\r\n return [dha.VDecl(sym) for sym in sortedsyms]", "def _parse_metadata_fields(key_value_block: str) -> Dict[str, str]:\n key_value_block = key_value_block.lstrip()\n field_lines = re.split(r'\\n', key_value_block)\n field_name = 'unknown'\n fields_builder: Dict[str, str] = {}\n for field_line in field_lines:\n field_match = RE_FIELD_COMPONENTS.match(field_line)\n if field_match and field_match.group('field') in NAMED_FIELDS:\n field_name = field_match.group(\n 'field').lower().replace('-', '_')\n field_name = re.sub(r'_no$', '_num', field_name)\n fields_builder[field_name] = field_match.group(\n 'value').rstrip()\n elif field_name != 'unknown':\n # we have a line with leading spaces\n fields_builder[field_name] += re.sub(r'^\\s+', ' ', field_line)\n return fields_builder", "def extract_entities_from_dependency_parse(dtrees, postag):\n sents = []\n for x in range(0,len(dtrees)):\n tok_list = []\n for node_index in dtrees[x].nodes:\n if node_index != 0:\n node = dtrees[x].nodes[node_index]\n if node['ctag'] == postag:\n tok_list.append((node['word'],postag))\n else:\n tok_list.append((node['word'],'O'))\n sents.append(tok_list)\n return sents", "def fParseNoteDefs(self, m):\n label, link, att, content = m.groups()\n\n # Assign an id if the note reference parse hasn't found the label yet.\n if label not in self.notes:\n self.notes[label] = {'id': str(uuid.uuid4()).replace('-', '')}\n\n # Ignores subsequent defs using the same label\n if 'def' not in self.notes[label]:\n self.notes[label]['def'] = {'atts': self.pba(att), 'content':\n self.graf(content), 'link': link}\n return ''" ]
[ "0.6590887", "0.60470355", "0.60470355", "0.58491236", "0.5815651", "0.57697064", "0.57697064", "0.5671237", "0.5338338", "0.52868664", "0.52477413", "0.5196038", "0.51685214", "0.51405686", "0.5023584", "0.49965042", "0.4984144", "0.49639156", "0.4882775", "0.4877682", "0.48603687", "0.48333287", "0.48227295", "0.48154616", "0.4812607", "0.47794577", "0.47505942", "0.47324657", "0.4712543", "0.47108516", "0.47049314", "0.4680752", "0.46746734", "0.46734607", "0.46694458", "0.46289948", "0.46164203", "0.46080393", "0.45959565", "0.45890957", "0.45737976", "0.4572621", "0.4556337", "0.4531671", "0.45228553", "0.4512042", "0.45052972", "0.45048183", "0.44996387", "0.44834375", "0.447987", "0.44785878", "0.44597414", "0.44486377", "0.44485393", "0.44302475", "0.44300094", "0.4428338", "0.44241077", "0.441971", "0.44094598", "0.4408429", "0.44071466", "0.4406621", "0.4402122", "0.43918636", "0.43825823", "0.43766928", "0.43753153", "0.4370156", "0.43695915", "0.43679047", "0.4366613", "0.43622887", "0.43562683", "0.43550685", "0.43512183", "0.43477023", "0.43414378", "0.4336519", "0.4331073", "0.43307644", "0.43262917", "0.43211925", "0.43196183", "0.43181992", "0.4309272", "0.43057382", "0.4304719", "0.43022284", "0.43022162", "0.42993242", "0.4298254", "0.4298118", "0.42976415", "0.4295831", "0.42949498", "0.42935035", "0.42934763", "0.42928922" ]
0.77479357
0
Creates a neural network that takes as input a batch of images (3 dimensional tensors) and outputs a batch of outputs (1 dimensional tensors)
def __init__( self, input_shape: Tuple[int, int, int], encoding_size: int, output_size: int ): super(VisualQNetwork, self).__init__() height = input_shape[0] width = input_shape[1] initial_channels = input_shape[2] conv_1_hw = self.conv_output_shape((height, width), 8, 4) conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2) self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32 self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4]) self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2]) self.dense1 = torch.nn.Linear(self.final_flat, encoding_size) self.dense2 = torch.nn.Linear(encoding_size, output_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def build_neural_net(X, filename=None):\n Y1, weights1 = build_layer(X, input_size=784, output_size=300)\n Y2, weights2 = build_layer(Y1, input_size=300, output_size=10, activation=nnet.softmax)\n\n if filename != None:\n saved_weights = np.load(filename)\n weights1.set_value(np.asarray(saved_weights[0], dtype=theano.config.floatX))\n weights2.set_value(np.asarray(saved_weights[1], dtype=theano.config.floatX))\n\n return Y2, weights1, weights2", "def build(self, images):\n\n shape = images.get_shape().as_list()\n images = tf.reshape(images,\n [shape[0] * shape[1], shape[2], shape[3], shape[4]])\n\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(self._params.weight_decay_rate),\n biases_initializer=tf.zeros_initializer()):\n with slim.arg_scope([slim.conv2d], padding='SAME'):\n # convert the image to one hot if needed.\n if self._params.to_one_hot:\n net = tf.one_hot(\n tf.squeeze(tf.to_int32(images), axis=[-1]),\n self._params.one_hot_length)\n else:\n net = images\n\n p = self._params\n # Adding conv layers with the specified configurations.\n for conv_id, kernel_stride_channel in enumerate(\n zip(p.conv_sizes, p.conv_strides, p.conv_channels)):\n kernel_size, stride, channels = kernel_stride_channel\n net = slim.conv2d(\n net,\n channels, [kernel_size, kernel_size],\n stride,\n scope='conv_{}'.format(conv_id + 1))\n\n net = slim.flatten(net)\n net = slim.fully_connected(net, self._params.embedding_size, scope='fc')\n\n output = tf.reshape(net, [shape[0], shape[1], -1])\n return output", "def buildNet(inputShape, numUniqueClasses):\n layers = InputLayer((None,) + inputShape[1:4])\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1), stride= (5,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 1, \n filter_size = (3,1), stride= (3,1))\n layers = NonlinearityLayer(layers, nonlinearity = nonlinearity)\n layers = DropoutLayer(layers,p=.3) \n layers = batch_norm(NNHelpers.LocallyConnected2DLayer(layers,1,(5,1),\n W=He('relu'),\n nonlinearity=nonlinearity)) \n layers = DenseLayer(layers,num_units=numUniqueClasses,\n nonlinearity=linear) \n layers = NonlinearityLayer(layers, nonlinearity=softmax) \n return layers", "def neural_network(z, dim_out):\n hidden_dim = 15\n net1 = slim.fully_connected(z, hidden_dim, activation_fn=None)\n net2 = slim.fully_connected(net1, dim_out, activation_fn=tf.tanh)\n return net2", "def neural_network():\n model = Sequential()\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\", input_shape=(28, 28, 1)))\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(10, activation=\"softmax\"))\n model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n return model", "def create_model_net(n_input,n_hidden,n_output):\n net = Sequential(\n L.Linear(n_input, n_hidden), F.relu,\n L.Linear(n_hidden, n_hidden), F.relu,\n L.Linear(n_hidden, n_output), F.softmax)\n return net", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2,num_hidden_units_3, num_code_units, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_in,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.softmax,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_4 = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_5 = lasagne.layers.DenseLayer(\n l_hidden_4,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_out = lasagne.layers.DenseLayer(\n l_hidden_5,\n num_units=output_dim,\n nonlinearity=None,\n )\n\n return l_out", "def nn(data):\n training_set = SupervisedDataSet*\n\n\n input_nodes = 3\n hidden_layer_1 = 10\n hidden_layer_2 = 10\n output_layer = 5\n\n net = buildNetwork(input_nodes, hidden_layer_1, hidden_layer_2, output_layer, bias=True, hiddenclass=TanhLayer)", "def create_network(input_nodes, hidden_nodes, output_nodes=None, output_softmax=True):\n output_nodes = output_nodes or input_nodes\n\n variables = []\n\n with tf.compat.v1.name_scope('network'):\n if isinstance(input_nodes, tuple):\n input_layer = tf.compat.v1.placeholder(\"float\", (None,) + input_nodes)\n flat_size = reduce(operator.mul, input_nodes, 1)\n current_layer = tf.reshape(input_layer, (-1, flat_size))\n else:\n input_layer = tf.compat.v1.placeholder(\"float\", (None, input_nodes))\n current_layer = input_layer\n\n for hidden_nodes in hidden_nodes:\n last_layer_nodes = int(current_layer.get_shape()[-1])\n hidden_weights = tf.Variable(\n tf.random.truncated_normal((last_layer_nodes, hidden_nodes), stddev=1. / np.sqrt(last_layer_nodes)),\n name='weights')\n hidden_bias = tf.Variable(tf.constant(0.01, shape=(hidden_nodes,)), name='biases')\n\n variables.append(hidden_weights)\n variables.append(hidden_bias)\n\n current_layer = tf.nn.relu(\n tf.matmul(current_layer, hidden_weights) + hidden_bias)\n\n if isinstance(output_nodes, tuple):\n output_nodes = reduce(operator.mul, input_nodes, 1)\n\n # for some reason having output std divided by np.sqrt(output_nodes) massively outperforms np.sqrt(hidden_nodes)\n output_weights = tf.Variable(\n tf.random.truncated_normal((hidden_nodes, output_nodes), stddev=1. / np.sqrt(output_nodes)), name=\"output_weights\")\n output_bias = tf.Variable(tf.constant(0.01, shape=(output_nodes,)), name=\"output_bias\")\n\n variables.append(output_weights)\n variables.append(output_bias)\n\n output_layer = tf.matmul(current_layer, output_weights) + output_bias\n if output_softmax:\n output_layer = tf.nn.softmax(output_layer)\n\n return input_layer, output_layer, variables", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def model(images, is_training=True):\n images = tf.reshape(images, [-1, 28, 28, 1])\n\n # First convolutional layer with max pooling and ReLU activation.\n conv1 = slim.conv2d(images, 32, [5, 5], activation_fn=tf.nn.relu, scope='conv1')\n pool1 = slim.max_pool2d(conv1, [2, 2], scope='pool1')\n\n # Second convolutional layer with max pooling and ReLU activation.\n conv2 = slim.conv2d(pool1, 64, [5, 5], activation_fn=tf.nn.relu, scope='conv2')\n pool2 = slim.max_pool2d(conv2, [2, 2], scope='pool2')\n\n # First fully connected layer with ReLU activation.\n flat = slim.flatten(pool2)\n fc1 = slim.fully_connected(flat, 1024, activation_fn=tf.nn.relu, scope='fc1')\n\n # Dropout.\n drop = slim.dropout(fc1, 0.5, is_training=is_training)\n\n # Fully connected output layer (logits).\n fc2 = slim.fully_connected(drop, 10, activation_fn=None, scope='fc2')\n return fc2", "def train(images, labels, fold, model_type, batch_size=32, num_epochs=5):\n num_classes = len(np.unique(labels))\n (X_train, y_train), (X_val, y_val), (X_test, y_test) = reformatInput(images, labels, fold)\n X_train = X_train.astype(\"float32\", casting='unsafe')\n X_val = X_val.astype(\"float32\", casting='unsafe')\n X_test = X_test.astype(\"float32\", casting='unsafe')\n # Prepare Theano variables for inputs and targets\n input_var = T.TensorType('floatX', ((False,) * 5))()\n target_var = T.ivector('targets')\n # Create neural network model (depending on first command line parameter)\n print(\"Building model and compiling functions...\")\n # Building the appropriate model\n if model_type == '1dconv':\n network = build_convpool_conv1d(input_var, num_classes)\n elif model_type == 'maxpool':\n network = build_convpool_max(input_var, num_classes)\n elif model_type == 'lstm':\n network = build_convpool_lstm(input_var, num_classes, 100)\n elif model_type == 'mix':\n network = build_convpool_mix(input_var, num_classes, 100)\n elif model_type == 'cnn':\n input_var = T.tensor4('inputs')\n network, _ = build_cnn(input_var)\n network = DenseLayer(lasagne.layers.dropout(network, p=.5),\n num_units=256,\n nonlinearity=lasagne.nonlinearities.rectify)\n network = DenseLayer(lasagne.layers.dropout(network, p=.5),\n num_units=num_classes,\n nonlinearity=lasagne.nonlinearities.softmax)\n else:\n raise ValueError(\"Model not supported ['1dconv', 'maxpool', 'lstm', 'mix', 'cnn']\")\n # Create a loss expression for training, i.e., a scalar objective we want\n # to minimize (for our multi-class problem, it is the cross-entropy loss):\n prediction = lasagne.layers.get_output(network)\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\n loss = loss.mean()\n params = lasagne.layers.get_all_params(network, trainable=True)\n updates = lasagne.updates.adam(loss, params, learning_rate=0.001)\n # Create a loss expression for validation/testing. The crucial difference\n # here is that we do a deterministic forward pass through the network,\n # disabling dropout layers.\n test_prediction = lasagne.layers.get_output(network, deterministic=True)\n test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,\n target_var)\n test_loss = test_loss.mean()\n # As a bonus, also create an expression for the classification accuracy:\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),\n dtype=theano.config.floatX)\n # Compile a function performing a training step on a mini-batch (by giving\n # the updates dictionary) and returning the corresponding training loss:\n train_fn = theano.function([input_var, target_var], loss, updates=updates)\n # Compile a second function computing the validation loss and accuracy:\n val_fn = theano.function([input_var, target_var], [test_loss, test_acc])\n # Finally, launch the training loop.\n print(\"Starting training...\")\n best_validation_accu = 0\n # We iterate over epochs:\n for epoch in range(num_epochs):\n # In each epoch, we do a full pass over the training data:\n train_err = 0\n train_batches = 0\n start_time = time.time()\n for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=False):\n inputs, targets = batch\n train_err += train_fn(inputs, targets)\n train_batches += 1\n # And a full pass over the validation data:\n val_err = 0\n val_acc = 0\n val_batches = 0\n for batch in iterate_minibatches(X_val, y_val, batch_size, shuffle=False):\n inputs, targets = batch\n err, acc = val_fn(inputs, targets)\n val_err += err\n val_acc += acc\n val_batches += 1\n av_train_err = train_err / train_batches\n av_val_err = val_err / val_batches\n av_val_acc = val_acc / val_batches\n # Then we print the results for this epoch:\n print(\"Epoch {} of {} took {:.3f}s\".format(\n epoch + 1, num_epochs, time.time() - start_time))\n print(\" training loss:\\t\\t{:.6f}\".format(av_train_err))\n print(\" validation loss:\\t\\t{:.6f}\".format(av_val_err))\n print(\" validation accuracy:\\t\\t{:.2f} %\".format(av_val_acc * 100))\n if av_val_acc > best_validation_accu:\n best_validation_accu = av_val_acc\n # After training, we compute and print the test error:\n test_err = 0\n test_acc = 0\n test_batches = 0\n for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):\n inputs, targets = batch\n err, acc = val_fn(inputs, targets)\n test_err += err\n test_acc += acc\n test_batches += 1\n av_test_err = test_err / test_batches\n av_test_acc = test_acc / test_batches\n print(\"Final results:\")\n print(\" test loss:\\t\\t\\t{:.6f}\".format(av_test_err))\n print(\" test accuracy:\\t\\t{:.2f} %\".format(av_test_acc * 100))\n # Dump the network weights to a file like this:\n np.savez('weights_lasg_{0}'.format(model_type), *lasagne.layers.get_all_param_values(network))\n print('-'*50)\n print(\"Best validation accuracy:\\t\\t{:.2f} %\".format(best_validation_accu * 100))\n print(\"Best test accuracy:\\t\\t{:.2f} %\".format(av_test_acc * 100))", "def create_network(self, nparameter, image_size = [151,151]):\n \n # input\n self.input = tf.placeholder(\"float32\",[1,image_size[0],image_size[0],1]);\n #self.input = tf.placeholder(\"float\",[1,image_size[0],image_size[0],1]);\n \n # convolution layer\n self.conv1_w = w = self.create_weights([8,8,1,32])\n self.conv1_b = b = self.create_bias([32])\n self.conv1 = self.create_conv2d(self.input, w, b, stride = 4, name = 'Layer1');\n \n self.conv2_w = w = self.create_weights([4,4,32,64])\n self.conv2_b = b = self.create_bias([64])\n self.conv2 = self.create_conv2d(self.conv1, w, b, stride = 2, name = 'Layer2');\n \n self.conv3_w = w = self.create_weights([3,3,64,64])\n self.conv3_b = b = self.create_bias([64])\n self.conv3 = self.create_conv2d(self.conv2, w, b, stride = 1, name = 'Layer2'); \n \n # hidden layer\n conv3_shape = self.conv3.get_shape().as_list();\n conv3_n = conv3_shape[1] * conv3_shape[2] * conv3_shape[3];\n conv3_flat = tf.reshape(self.conv3,[-1,conv3_n]);\n #conv3_flat = tf.reshape(self.conv3,[conv3_n]);\n self.fc4_w = w = self.create_weights([conv3_n, 512]);\n self.fc4_b = b = self.create_bias([512]);\n self.fc4 = tf.nn.relu(tf.matmul(conv3_flat, w) + b) \n \n # output layer\n self.output_w = w = self.create_weights([512, nparameter]);\n self.output_b = b = self.create_bias([nparameter]);\n \n out = tf.nn.tanh(tf.matmul(self.fc4, w) + b);\n self.output = tf.reshape(out, [1, self.npoints, 2]);", "def build(self, images):\n shape = images.get_shape().as_list()\n if len(shape) != 5:\n raise ValueError(\n 'The tensor shape should have 5 elements, {} is provided'.format(\n len(shape)))\n if shape[4] != 3:\n raise ValueError('Three channels are expected for the input image')\n\n images = tf.cast(images, tf.uint8)\n images = tf.reshape(images,\n [shape[0] * shape[1], shape[2], shape[3], shape[4]])\n with slim.arg_scope(resnet_v2.resnet_arg_scope()):\n\n def preprocess_fn(x):\n x = tf.expand_dims(x, 0)\n x = tf.image.resize_bilinear(x, [299, 299],\n align_corners=False)\n return(tf.squeeze(x, [0]))\n\n images = tf.map_fn(preprocess_fn, images, dtype=tf.float32)\n\n net, _ = resnet_v2.resnet_v2_50(\n images, is_training=False, global_pool=True)\n output = tf.reshape(net, [shape[0], shape[1], -1])\n return output", "def apply_network(inputs):\n return apply_layer(tf.sigmoid(apply_layer(inputs, 64)), 1)", "def SRCNN(input_shape, depth_multiplier=1, multi_output=False):\n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64*depth_multiplier, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n #conv1 = BatchNormalization(name='bn_conv1')(conv1)\n \n mapping = Convolution2D(filters=32*depth_multiplier, kernel_size=1, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv1)\n #mapping = BatchNormalization(name='bn_mapping')(mapping)\n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def _generateNetwork(self, n_actions, obs_space):\r\n \r\n import tensorflow as tf \r\n self._ALPHA = 1e-3 # learning rate \r\n RESIZED_SCREEN = 84\r\n self._STATE_FRAMES = 3 # states/images used for taking a decision\r\n \r\n # Graph for compressing the input image \r\n x, y, z = obs_space\r\n self._image_input_layer = tf.placeholder(\"float\", \r\n [None, x, y, z])\r\n image_step_size_x = int(np.ceil(float(x / RESIZED_SCREEN)))\r\n image_step_size_y = int(np.ceil(float(y / RESIZED_SCREEN)))\r\n extra_pad_x = RESIZED_SCREEN - int(x / image_step_size_x)\r\n extra_pad_y = RESIZED_SCREEN - int(y / image_step_size_y)\r\n self._image_output_layer = tf.nn.max_pool(\r\n self._image_input_layer, \r\n ksize=[1, image_step_size_x, image_step_size_y, 1],\r\n strides=[1, image_step_size_x, image_step_size_y, 1], \r\n padding=\"VALID\") \r\n \r\n # Function for compressing (and reshaping) the image\r\n self._compressImage = lambda obs : np.pad(\r\n self._session.run(\r\n self._image_output_layer, \r\n feed_dict={self._image_input_layer: np.array([obs])})/255.0, \r\n ((0,0), (0,extra_pad_x), (0,extra_pad_y), (0,0)),\r\n mode='constant') \r\n\r\n CONVOLUTION_FILTER_VECTOR = [6, 6, 4]\r\n CONVOLUTION_STRIDE_VECTOR = [3, 3, 2]\r\n CONVOLUTION_KERNEL_VECTOR = [16, 16, 36]\r\n CONVOLUTION_INPUT_VECTOR = ([z * self._STATE_FRAMES] + \r\n CONVOLUTION_KERNEL_VECTOR[:-1])\r\n FEED_FWD_VECTOR = [(3**2) * CONVOLUTION_KERNEL_VECTOR[-1], 64, \r\n n_actions] \r\n \r\n # The chosen activation function is the Leaky ReLU function\r\n self._activation = lambda x : tf.maximum(0.01*x, x)\r\n\r\n \r\n # Initialization parameters\r\n INITIALIZATION_STDDEV = 0.1\r\n INITIALIZATION_MEAN = 0.00\r\n INITIALIZATION_BIAS = -0.001\r\n\r\n # Convolutional layers\r\n self._input_layer = tf.placeholder(\"float\", \r\n [None, \r\n RESIZED_SCREEN, \r\n RESIZED_SCREEN, \r\n z * self._STATE_FRAMES])\r\n self._convolutional_weights = []\r\n self._convolutional_bias = []\r\n self._hidden_convolutional_layer = [self._input_layer]\r\n\r\n for i in range(len(CONVOLUTION_FILTER_VECTOR)):\r\n self._convolutional_weights.append(tf.Variable(tf.truncated_normal(\r\n [CONVOLUTION_FILTER_VECTOR[i], \r\n CONVOLUTION_FILTER_VECTOR[i], \r\n CONVOLUTION_INPUT_VECTOR[i], \r\n CONVOLUTION_KERNEL_VECTOR[i]], \r\n mean=INITIALIZATION_MEAN, \r\n stddev=INITIALIZATION_STDDEV)))\r\n self._convolutional_bias.append(tf.Variable(tf.constant(\r\n INITIALIZATION_BIAS, \r\n shape=[CONVOLUTION_KERNEL_VECTOR[i]])))\r\n self._hidden_convolutional_layer.append(\r\n self._activation(tf.nn.conv2d(\r\n self._hidden_convolutional_layer[i], \r\n self._convolutional_weights[i], \r\n strides=[1, \r\n CONVOLUTION_STRIDE_VECTOR[i],\r\n CONVOLUTION_STRIDE_VECTOR[i], \r\n 1], \r\n padding=\"VALID\") \r\n + self._convolutional_bias[i]))\r\n \r\n # Feed forward layers\r\n self._hidden_activation_layer = [tf.reshape(\r\n self._hidden_convolutional_layer[-1], \r\n [-1, FEED_FWD_VECTOR[0]])]\r\n self._feed_forward_weights = []\r\n self._feed_forward_bias = []\r\n\r\n for i in range(len(FEED_FWD_VECTOR) - 2):\r\n self._feed_forward_weights.append(tf.Variable(tf.truncated_normal(\r\n [FEED_FWD_VECTOR[i], \r\n FEED_FWD_VECTOR[i+1]], \r\n mean=INITIALIZATION_MEAN, \r\n stddev=INITIALIZATION_STDDEV)))\r\n self._feed_forward_bias.append(tf.Variable(tf.constant(\r\n INITIALIZATION_BIAS, shape=[FEED_FWD_VECTOR[i+1]])))\r\n self._hidden_activation_layer.append(self._activation(\r\n tf.matmul(self._hidden_activation_layer[i], \r\n self._feed_forward_weights[i]) \r\n + self._feed_forward_bias[i])\r\n )\r\n \r\n # The calculation of the state-action value function does not \r\n # require the neurons' activation function\r\n self._feed_forward_weights.append(tf.Variable(tf.truncated_normal(\r\n [FEED_FWD_VECTOR[-2], \r\n FEED_FWD_VECTOR[-1]], \r\n mean=INITIALIZATION_MEAN, \r\n stddev=INITIALIZATION_STDDEV)))\r\n self._feed_forward_bias.append(tf.Variable(tf.constant(\r\n INITIALIZATION_BIAS, \r\n shape=[FEED_FWD_VECTOR[-1]])))\r\n self._state_value_layer = (tf.matmul(self._hidden_activation_layer[-1], \r\n self._feed_forward_weights[-1]) \r\n + self._feed_forward_bias[-1])\r\n\r\n # Define the logic of the optimization\r\n self._action = tf.placeholder(\"float\", [None, n_actions])\r\n self._target = tf.placeholder(\"float\", [None])\r\n self._action_value_vector = tf.reduce_sum(tf.mul(\r\n self._state_value_layer, self._action), reduction_indices=1)\r\n self._cost = tf.reduce_sum(tf.square(\r\n self._target - self._action_value_vector))\r\n self._alpha = tf.placeholder('float')\r\n self._train_operation = tf.train.AdamOptimizer(\r\n self._alpha).minimize(self._cost)\r\n self._session = tf.Session()\r\n\r\n operation_intizializer = tf.initialize_all_variables()\r\n self._saver = tf.train.Saver()\r\n\r\n try:\r\n self._saver.restore(self._session, self._PARAMETERS_FILE_PATH)\r\n print ('Calibrated parameters SUCCESSFULLY LOADED.',\r\n flush=True)\r\n except:\r\n self._session.run(operation_intizializer)\r\n print ('It was not possible to load calibrated parameters.',\r\n flush=True)\r\n \r\n # Definition of feed_forward and optimization functions\r\n self._feedFwd = lambda state : self._session.run(\r\n self._state_value_layer, \r\n feed_dict={self._input_layer: state})\r\n \r\n self._backProp = lambda valueStates, actions, valueTarget : (\r\n self._session.run(self._train_operation, \r\n feed_dict={self._input_layer: valueStates,\r\n self._action: actions,\r\n self._target: valueTarget,\r\n self._alpha : self._ALPHA}))", "def __init__(self, attribute_size, output_size, n_hidden_layers=2, n_hidden_neurons=30):\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_neurons = n_hidden_neurons\n self.attribute_size = attribute_size\n self.output_size = output_size\n\n X = T.fmatrix()\n Y = T.fmatrix()\n\n self.w_h = nnet.init_weights((self.attribute_size, self.n_hidden_neurons))\n self.w_h2 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n self.w_o = nnet.init_weights((self.n_hidden_neurons, self.output_size))\n\n if self.n_hidden_layers == 2:\n\n noise_py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)\n\n elif self.n_hidden_layers == 3:\n\n self.w_h3 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n\n noise_py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_h3, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)", "def run_net(inputs, **parameter):\n\n # ---- set numpy random state for each run----\n np.random.set_state(np_state)\n\n # -----parameter setting-------\n n_ex = 1600\n n_inh = int(n_ex / 4)\n n_input = MNIST_shape[1] * coding_n\n n_read = n_ex + n_inh\n\n R = parameter['R']\n f_in = parameter['f_in']\n f_EE = parameter['f_EE']\n f_EI = parameter['f_EI']\n f_IE = parameter['f_IE']\n f_II = parameter['f_II']\n\n A_EE = 60 * f_EE\n A_EI = 60 * f_EI\n A_IE = 60 * f_IE\n A_II = 60 * f_II\n A_inE = 60 * f_in\n A_inI = 60 * f_in\n\n tau_ex = parameter['tau_ex'] * coding_duration\n tau_inh = parameter['tau_inh'] * coding_duration\n tau_read = 30\n\n p_inE = parameter['p_in'] * 0.1\n p_inI = parameter['p_in'] * 0.1\n\n # ------definition of equation-------------\n neuron_in = '''\n I = stimulus(t,i) : 1\n '''\n\n neuron = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1 (unless refractory)\n dg/dt = (-g)/(3*ms) : 1\n dh/dt = (-h)/(6*ms) : 1\n I = (g+h)+13.5: 1\n x : 1\n y : 1\n z : 1\n '''\n\n neuron_read = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1\n dg/dt = (-g)/(3*ms) : 1 \n dh/dt = (-h)/(6*ms) : 1\n I = (g+h): 1\n '''\n\n synapse = '''\n w : 1\n '''\n\n on_pre_ex = '''\n g+=w\n '''\n\n on_pre_inh = '''\n h-=w\n '''\n\n # -----Neurons and Synapses setting-------\n Input = NeuronGroup(n_input, neuron_in, threshold='I > 0', method='euler', refractory=0 * ms,\n name='neurongroup_input')\n\n G_ex = NeuronGroup(n_ex, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=3 * ms,\n name='neurongroup_ex')\n\n G_inh = NeuronGroup(n_inh, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=2 * ms,\n name='neurongroup_in')\n\n G_readout = NeuronGroup(n_read, neuron_read, method='euler', name='neurongroup_read')\n\n S_inE = Synapses(Input, G_ex, synapse, on_pre=on_pre_ex, method='euler', name='synapses_inE')\n\n S_inI = Synapses(Input, G_inh, synapse, on_pre=on_pre_ex, method='euler', name='synapses_inI')\n\n S_EE = Synapses(G_ex, G_ex, synapse, on_pre=on_pre_ex, method='euler', name='synapses_EE')\n\n S_EI = Synapses(G_ex, G_inh, synapse, on_pre=on_pre_ex, method='euler', name='synapses_EI')\n\n S_IE = Synapses(G_inh, G_ex, synapse, on_pre=on_pre_inh, method='euler', name='synapses_IE')\n\n S_II = Synapses(G_inh, G_inh, synapse, on_pre=on_pre_inh, method='euler', name='synapses_I')\n\n S_E_readout = Synapses(G_ex, G_readout, 'w = 1 : 1', on_pre=on_pre_ex, method='euler')\n\n S_I_readout = Synapses(G_inh, G_readout, 'w = 1 : 1', on_pre=on_pre_inh, method='euler')\n\n # -------initialization of neuron parameters----------\n G_ex.v = '13.5+1.5*rand()'\n G_inh.v = '13.5+1.5*rand()'\n G_readout.v = '0'\n G_ex.g = '0'\n G_inh.g = '0'\n G_readout.g = '0'\n G_ex.h = '0'\n G_inh.h = '0'\n G_readout.h = '0'\n G_ex.tau = tau_ex\n G_inh.tau = tau_inh\n G_readout.tau = tau_read\n [G_ex, G_in] = base.allocate([G_ex, G_inh], 10, 10, 20)\n\n # -------initialization of network topology and synapses parameters----------\n S_inE.connect(condition='j<0.3*N_post', p=p_inE)\n S_inI.connect(condition='j<0.3*N_post', p=p_inI)\n S_EE.connect(condition='i != j', p='0.3*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_EI.connect(p='0.2*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_IE.connect(p='0.4*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_II.connect(condition='i != j', p='0.1*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_E_readout.connect(j='i')\n S_I_readout.connect(j='i+n_ex')\n\n S_inE.w = function.gamma(A_inE, S_inE.w.shape)\n S_inI.w = function.gamma(A_inI, S_inI.w.shape)\n S_EE.w = function.gamma(A_EE, S_EE.w.shape)\n S_IE.w = function.gamma(A_IE, S_IE.w.shape)\n S_EI.w = function.gamma(A_EI, S_EI.w.shape)\n S_II.w = function.gamma(A_II, S_II.w.shape)\n\n S_EE.pre.delay = '1.5*ms'\n S_EI.pre.delay = '0.8*ms'\n S_IE.pre.delay = '0.8*ms'\n S_II.pre.delay = '0.8*ms'\n\n # ------create network-------------\n net = Network(collect())\n net.store('init')\n\n # ------run network-------------\n stimulus = TimedArray(inputs[0], dt=Dt)\n net.run(duration * Dt)\n states = net.get_states()['neurongroup_read']['v']\n net.restore('init')\n return (states, inputs[1])", "def classify_lenet5(batch_size=500, output_size=20):\n\n rng = numpy.random.RandomState(23455)\n\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 37, 23))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 37, 23),\n filter_shape=(20, 1, 4, 2),\n poolsize=(2, 2),\n )\n\n # layer1 = LeNetConvPoolLayer(\n # rng,\n # input=layer0.output,\n # image_shape=(batch_size, 20, 17, 11),\n # filter_shape=(50, 20, 4, 2),\n # poolsize=(2, 2),\n # )\n #\n # layer4 = LeNetConvPoolLayer(\n # rng,\n # input=layer1.output,\n # image_shape=(batch_size, 50, 7, 5),\n # filter_shape=(100, 50, 4, 2),\n # poolsize=(2, 2),\n # )\n\n layer2_input = layer0.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=3740,\n n_out=output_size,\n activation=T.tanh,\n use_bias=True\n )\n\n # layer5 = HiddenLayer(\n # rng,\n # input=layer2.output,\n # n_in=200,\n # n_out=output_size,\n # activation=T.tanh,\n # use_bias=True\n # )\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer2.output, n_in=output_size, n_out=2)\n\n model_params = pickle.load(open('../model/cnn_dist_'+str(output_size)+'.pkl'))\n #\n layer0.W = theano.shared(\n value=numpy.array(\n model_params[2].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer0.b = theano.shared(\n value=numpy.array(\n model_params[3].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer1.W = theano.shared(\n # value=numpy.array(\n # model_params[-4].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer1.b = theano.shared(\n # value=numpy.array(\n # model_params[-3].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n #\n # layer4.W = theano.shared(\n # value=numpy.array(\n # model_params[-6].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer4.b = theano.shared(\n # value=numpy.array(\n # model_params[-5].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer2.W = theano.shared(\n value=numpy.array(\n model_params[0].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer2.b = theano.shared(\n value=numpy.array(\n model_params[1].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer5.W = theano.shared(\n # value=numpy.array(\n # model_params[-10].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer5.b = theano.shared(\n # value=numpy.array(\n # model_params[-9].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer3.W = theano.shared(\n value=numpy.array(\n model_params[4].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer3.b = theano.shared(\n value=numpy.array(\n model_params[5].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # params = layer3.params + layer5.params + layer2.params + layer4.params + layer1.params + layer0.params\n\n datasets = load_data(None)\n\n sets = ['train', 'dev', 'test']\n dimension = [20000, 20000, 20000]\n for k in range(3):\n if k == 0:\n classify_set_x, classify_set_y, classify_set_z, classify_set_m, classify_set_c, classify_set_b= datasets[k]\n else:\n classify_set_x, classify_set_y, classify_set_z= datasets[k]\n\n # compute number of minibatches for training, validation and testing\n n_classify_batches = classify_set_x.get_value(borrow=True).shape[0]\n n_classify_batches /= batch_size\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n classify = theano.function(\n [index],\n layer2.output,\n givens={\n x: classify_set_x[index * batch_size: (index + 1) * batch_size],\n }\n )\n\n r = []\n\n for i in xrange(n_classify_batches):\n m = classify(i)\n r.extend(m)\n r = np.array(r)\n print r.shape\n r = np.append(r, np.reshape(classify_set_y.eval(),(dimension[k], 1)), 1)\n numpy.savetxt('../extractedInformation/cnn_dist_'+str(output_size)+'/'+sets[k]+'.csv', r, delimiter=\",\")", "def predict(self, images):\n\t\t#testing_dataset = tf.data.Dataset.from_tensor_slices(images)\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\ttesting_dataset = tf.data.Dataset.from_tensor_slices(np.asarray(images)).map(lambda x: tf.image.resize(x, [self.image_size, self.image_size]) / 255.0)\n\t\t#testing_dataset_shape = tf.data.Dataset.from_tensor_slices(np.full((len(images), 2), 500, dtype=np.int32))\n\t\ttesting_iterator_X = tf.data.Dataset.zip((testing_dataset, )).batch(self.batch_size).make_initializable_iterator()\n\n\t\tself.sess.run(testing_iterator_X.initializer)\n\t\ttesting_handle_X = self.sess.run(testing_iterator_X.string_handle())\n\n\t\tfinal_output = np.zeros([len(images), 500, 500, num_classes])\n\t\tj = 0\n\t\tcount = 0\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t[test_output] = self.sess.run(\n\t\t\t\t\t[self.output],\n\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\tself.is_training: False,\n\t\t\t\t\t\t\tself.handle_X: testing_handle_X,\n\t\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\tthis_len = len(test_output)\n\t\t\t\tfor z in range(len(test_output)):\n\t\t\t\t\tfor dim in range(num_classes):\n\t\t\t\t\t\tfinal_output[count+z:count+z+1, :, :, dim] = scipy.misc.imresize(test_output[z, :, :, dim], [500, 500])\n\n\t\t\t\t#final_output[count:count+this_len, :, :, :] = test_output\n\t\t\t\tto = final_output[count:count+this_len, :, :, :].argmax(axis=-1)\n\t\t\t\t'''\n\t\t\t\tpdb.set_trace()\n\t\t\t\tfor z in range(this_len):\n\t\t\t\t\tplt.matshow(to[z])\n\t\t\t\t\tplt.colorbar()\n\t\t\t\t\tplt.show()\n\t\t\t\t'''\n\t\t\t\tcount += this_len\n\t\t\t\tprint(f'Batch: {j}')\n\t\t\t\tj += 1\n\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\tbreak\n\t\treturn final_output", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def conv_net_lasagne(X_train: np.ndarray, y_train: np.ndarray):\n\n print(\"WARNING: Training this neural leads to serious memory issues\")\n\n net1 = NeuralNet(\n layers=[('input', layers.InputLayer),\n ('conv2d1', layers.Conv2DLayer),\n ('maxpool1', layers.MaxPool2DLayer),\n ('conv2d2', layers.Conv2DLayer),\n ('maxpool2', layers.MaxPool2DLayer),\n ('dropout1', layers.DropoutLayer),\n ('dense', layers.DenseLayer),\n ('dropout2', layers.DropoutLayer),\n ('output', layers.DenseLayer),\n ],\n # input layer\n input_shape=(None, 1, 28, 28),\n # layer conv2d1\n conv2d1_num_filters=32,\n conv2d1_filter_size=(5, 5),\n conv2d1_nonlinearity=lasagne.nonlinearities.rectify,\n conv2d1_W=lasagne.init.GlorotUniform(),\n # layer maxpool1\n maxpool1_pool_size=(2, 2),\n # layer conv2d2\n conv2d2_num_filters=32,\n conv2d2_filter_size=(5, 5),\n conv2d2_nonlinearity=lasagne.nonlinearities.rectify,\n # layer maxpool2\n maxpool2_pool_size=(2, 2),\n # dropout1\n dropout1_p=0.5,\n # dense\n dense_num_units=256,\n dense_nonlinearity=lasagne.nonlinearities.rectify,\n # dropout2\n dropout2_p=0.5,\n # output\n output_nonlinearity=lasagne.nonlinearities.softmax,\n output_num_units=10,\n # optimization method params\n update=nesterov_momentum,\n update_learning_rate=0.01,\n update_momentum=0.9,\n max_epochs=10,\n verbose=1,\n )\n # Train the network\n nn = net1.fit(X_train, y_train)\n\n return nn", "def neural_net(input_state):\n # Two fully connected layers:\n level_1 = tf.matmul(tf.expand_dims(input_state, 0), W1)\n level_2 = tf.matmul(level_1, W2)\n # One output neuron for each possible action:\n out = tf.matmul(level_2, O)\n return out", "def train(self, num_epochs: int):\n learn_rate = 0.02\n\n images, labels = self._mn_data.load_training()\n indices = [i for i in range(len(images))]\n\n for epoch in range(num_epochs):\n random.shuffle(indices) # Avoids modifying the actual lists\n epoch_cost = 0\n i = 0\n\n # Go through the training data in batches\n while i < len(indices):\n print(i, \"---------------------------------------------------------\")\n\n if i >= 800:\n break\n\n start = i\n end = i + batch_size\n batch_indices = indices[start:end]\n\n dw = [[[0 for _ in range(perceptron.size_w())] for perceptron in layer] for layer in self._network]\n db = [[0 for _ in layer] for layer in self._network]\n\n # Take a single image from the batch\n for index in batch_indices:\n # print(\"ex\")\n result = self.feed_forward(images[index])\n epoch_cost += self.cost(result, labels[index]) # Creates self._desired_changes\n\n # Backpropagate starting from the last (output) layer\n for j in range(len(self._network)-1, -1, -1):\n layer = self._network[j]\n prev_act_values = self._layer_inputs[j]\n function_name = layer[0].get_activation().name()\n\n if j > 0:\n next_desired_changes = [0.0 for _ in self._network[j-1]]\n else:\n next_desired_changes = None\n\n if function_name == \"relu\":\n leakage = self._relu.get_leakage()\n\n # Look at each perceptron\n for k in range(len(layer)):\n perceptron = layer[k]\n dc_da = self._desired_changes[k]\n\n if function_name == \"sigmoid\":\n dc_da *= self._sigmoid(perceptron.z) * (1 - self._sigmoid(perceptron.z))\n # print(perceptron.z, sig_delta)\n # print(dc_da)\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n elif function_name == \"relu\":\n dc_da *= leakage if perceptron.z < 0 else 1\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n # print(\"dcda\", dc_da)\n\n if next_desired_changes:\n # print(\"nd\", next_desired_changes)\n self._desired_changes = next_desired_changes\n\n # End of sample image loop\n # print(dw[1:])\n # break\n\n # Update weights and biases\n for j in range(len(self._network)):\n layer = self._network[j]\n\n for k in range(len(layer)):\n perceptron = layer[k]\n\n perceptron.change_weights_and_bias(dw[j][k], db[j][k])\n\n # print(dw[1:])\n # print(db)\n\n i += batch_size\n\n print(\"Epoch {} completed out of {} with loss {}\".format(epoch + 1, num_epochs, epoch_cost))", "def SqueezeNet(input_shape=(224, 224, 3)):\n image_input = Input(shape=input_shape)\n\n network = Conv2D(64, (3, 3), strides=(2, 2), padding=\"valid\")(image_input)\n network = Activation(\"relu\")(network)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=16, input_channel_large=64\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=16, input_channel_large=64\n )\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=32, input_channel_large=128\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=32, input_channel_large=128\n )\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=48, input_channel_large=192\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=48, input_channel_large=192\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=64, input_channel_large=256\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=64, input_channel_large=256\n )\n\n # Remove layers like Dropout and BatchNormalization, they are only needed in training\n # network = Dropout(0.5)(network)\n\n network = Conv2D(1000, kernel_size=(1, 1), padding=\"valid\", name=\"last_conv\")(\n network\n )\n network = Activation(\"relu\")(network)\n\n network = GlobalAvgPool2D()(network)\n network = Activation(\"softmax\", name=\"output\")(network)\n\n input_image = image_input\n model = Model(inputs=input_image, outputs=network)\n\n return model", "def create_base_network(image_input_shape, embedding_size):\n input_image = Input(shape=image_input_shape)\n x = input_image \n #x = Flatten()(input_image)\n x = Dense(128, activation='relu')(x)\n x = Dropout(0.1)(x)\n x = Dense(128, activation='relu')(x)\n x = Dropout(0.1)(x)\n x = Dense(embedding_size)(x)\n\n base_network = Model(inputs=input_image, outputs=x)\n #plot_model(base_network, to_file='base_network.png', show_shapes=True, show_layer_names=True)\n return base_network", "def __init__(self,\n image_shape,\n z_dim,\n num_blocks,\n action_space,\n hiddens=[],\n dropout=False,\n subsampling=True):\n super().__init__()\n self.image_shape = image_shape\n self.layers = nn.ModuleList()\n self.layers.append(\n ImageInputNetwork(image_shape, z_dim, num_blocks, dropout,\n subsampling))\n self.layers.append(nn.Sequential(\n nn.Linear(128, z_dim),\n nn.ReLU()\n ))\n self.layers.append(nn.Sequential(nn.Linear(z_dim, action_space.n),\n nn.Softmax(1)))\n self.layers.append(nn.Linear(z_dim, 1))", "def create_neural_network(NumberOfFeatures, NumberOfClasses, optimizer_type, lr, moment, lr_decay):\n model = create_base_network(NumberOfFeatures, NumberOfClasses)\n if optimizer_type == 'sgd':\n opt = optimizers.SGD(lr=lr, momentum=moment, decay=lr_decay)\n else:\n opt = optimizer_type\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n print(model.summary())\n return model", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def __create_dense_net(nb_classes, img_input, include_top, depth=40,\n nb_dense_block=3, growth_rate=12, nb_filter=-1,\n nb_layers_per_block=-1, bottleneck=False,\n reduction=0.0, dropout_rate=None, weight_decay=1e-4,\n subsample_initial_block=False, activation='softmax'):\n\n concat_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n if reduction != 0.0:\n assert 0.0 < reduction <= 1.0, \\\n 'reduction value must lie between 0.0 and 1.0'\n\n # layers in each dense block\n if type(nb_layers_per_block) is list or \\\n type(nb_layers_per_block) is tuple:\n nb_layers = list(nb_layers_per_block) # Convert tuple to list\n\n assert len(nb_layers) == nb_dense_block, \\\n 'If list, nb_layer is used as provided. ' \\\n 'Note that list size must be (nb_dense_block)'\n final_nb_layer = nb_layers[-1]\n nb_layers = nb_layers[:-1]\n else:\n if nb_layers_per_block == -1:\n assert (depth - 4) % 3 == 0, \\\n 'Depth must be 3 N + 4 if nb_layers_per_block == -1'\n count = int((depth - 4) / 3)\n\n if bottleneck:\n count = count // 2\n\n nb_layers = [count for _ in range(nb_dense_block)]\n final_nb_layer = count\n else:\n final_nb_layer = nb_layers_per_block\n nb_layers = [nb_layers_per_block] * nb_dense_block\n\n # compute initial nb_filter if -1, else accept users initial nb_filter\n if nb_filter <= 0:\n nb_filter = 2 * growth_rate\n\n # compute compression factor\n compression = 1.0 - reduction\n\n # Initial convolution\n if subsample_initial_block:\n initial_kernel = (7, 7)\n initial_strides = (2, 2)\n else:\n initial_kernel = (3, 3)\n initial_strides = (1, 1)\n\n x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal',\n padding='same', strides=initial_strides, use_bias=False,\n kernel_regularizer=l2(weight_decay))(img_input)\n\n if subsample_initial_block:\n x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,\n fused=fuse)(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)\n\n # Add dense blocks\n for block_idx in range(nb_dense_block - 1):\n x, nb_filter = __dense_block(\n x, nb_layers[block_idx], nb_filter, growth_rate,\n bottleneck=bottleneck, dropout_rate=dropout_rate,\n weight_decay=weight_decay\n )\n # add transition_block\n x = __transition_block(x, nb_filter, compression=compression,\n weight_decay=weight_decay)\n nb_filter = int(nb_filter * compression)\n\n # The last dense_block does not have a transition_block\n x, nb_filter = __dense_block(\n x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,\n dropout_rate=dropout_rate, weight_decay=weight_decay\n )\n\n x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, fused=fuse)(x)\n x = Activation('relu')(x)\n x = GlobalAveragePooling2D()(x)\n\n if include_top:\n x = Dense(nb_classes, activation=activation)(x)\n\n return x", "def trainNet():", "def model(inputs, target_images, is_training):\n # if isinstance(inputs, tuple):\n assert mask_augs >= 0. and mask_augs <= 1., \"mask_augs must be in [0, 1]\"\n if FLAGS.use_td_loss and isinstance(inputs, tuple):\n # print('#'*80)\n # print(inputs)\n assert metric is not None, \"Metric function is None\"\n inputs, augs = inputs\n B = inputs.get_shape().as_list()[0]\n A = augs.get_shape().as_list()[1]\n if mask_augs > 0:\n mask = tf.cast(tf.greater(tf.random.uniform(shape=[B, A], minval=0., maxval=1.), 0.5), augs.dtype) # noqa\n bias = mask * -1\n augs = (augs * mask) + bias # Randomly mask out augs for difficulty and code those dims as -1\n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training=is_training)\n print(\"Features: \")\n print(features)\n print(\"---\")\n # Global average pool of B 7 7 2048 -> B 2048\n if data_format == 'channels_last':\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n print(\"Outputs: \")\n print(outputs)\n print(\"---\")\n # B 2048\n\n h_w = features.get_shape().as_list()[1]\n # print(h_w)\n\n augs = tf.tile(augs[:,None,None,:], tf.constant([1,h_w,h_w,1]))\n print(\"Augs: \")\n print(augs)\n print(\"---\")\n features = tf.concat([features, augs], axis=-1)\n \n with tf.variable_scope('decoder'):\n recon_images = decoder(\n features,\n block_activities,\n is_training=is_training,\n skip=skip)\n print(\"Reconstructed images and target images: \")\n print(recon_images)\n print(target_images)\n print(\"---\")\n with tf.variable_scope('metric'):\n # Squash both recon and target images\n recon_images_squash = tf.tanh(recon_images)\n target_images = (target_images * 2) - 1\n Bt = target_images.get_shape().as_list()[0]\n Br = recon_images_squash.get_shape().as_list()[0]\n if Bt == Br:\n # Attractive + repulsive loss\n pass\n elif Bt * 2 == Br:\n # Attractive-only loss\n target_images = tf.concat([target_images, target_images], 0)\n\n # Differentiable perceptual metric. First reconstruction.\n # both_images = tf.concat([recon_images, target_images], -1) # B H W 6\n all_images = tf.concat([recon_images_squash, target_images], 0) # Stack these in batch dim\n metric_all_images = metric(all_images, is_training=is_training)\n # B = metric_all_images.get_shape().as_list()[0]\n metric_all_images = tf.reshape(metric_all_images, [B, -1])\n metric_hidden_r, metric_hidden_t = tf.split(metric_all_images, 2, 0) # Split these in batch dim\n\n # Prep recon_images for visualization\n # recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n # recon_images = (recon_images + 5) / 10\n\n recon_mean, recon_std = tf.nn.moments(recon_images, axes=[1, 2], keep_dims=True)\n recon_images = (recon_images - recon_mean) / recon_std\n recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n recon_images = (recon_images + 5) / 10\n # recon_images = recon_images_squash\n if greyscale_viz:\n recon_images = tf.image.rgb_to_grayscale(recon_images)\n recon_images = tf.concat([recon_images, recon_images, recon_images], -1)\n print(\"Embedding output: \")\n print(metric_hidden_t)\n print(\"---\")\n return outputs, recon_images, metric_hidden_r, metric_hidden_t\n\n else:\n # augs = None\n \n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training)\n \n if data_format == 'channels_last':\n print(\"Features:\")\n print(features)\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n \n # filter_trainable_variables(trainable_variables, after_block=5)\n # add_to_collection(trainable_variables, 'trainable_variables_inblock_')\n\n return outputs", "def forward(self, inputs, hidden):\n # import pdb; pdb.set_trace()\n (seq_len, batch_size, dims) = inputs[0].size()\n\n img1s = []\n img2s = []\n for i in range(batch_size):\n img1 = inputs[0][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n img1s.append(img1.contiguous().view(seq_len, 1, -1))\n\n # img2 = inputs[1][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n # img2s.append(img2.contiguous().view(seq_len, 1, -1))\n\n seq1 = torch.cat(img1s, dim=1)\n both = seq1\n # seq2 = torch.cat(img2s, dim=1)\n # both = torch.cat([seq1, seq2], dim=2)\n # import pdb; pdb.set_trace()\n embeds = self.fc1(both.view(-1, 100352 // 2))\n embeds = self.relu1(embeds)\n # embeds = self.bn1(embeds)\n\n embeds = embeds.view(-1, batch_size, self.hidden_dim)\n lstm_out, hidden = self.lstm(embeds, hidden)\n out = self.fc2(lstm_out.view(-1, lstm_out.size(2)))\n out = out.view(-1, batch_size, self.output_dim)\n out = self.sigmoid(out)\n return out, hidden", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def create_cnn(num_features: int = None) -> models.Model:\n nclass = num_features or 5\n inp = Input(shape=(187, 1))\n img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding=\"valid\")(inp)\n # img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = MaxPool1D(pool_size=2)(img_1)\n img_1 = Dropout(rate=0.1)(img_1)\n img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n # img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = MaxPool1D(pool_size=2)(img_1)\n img_1 = Dropout(rate=0.1)(img_1)\n img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n # img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = MaxPool1D(pool_size=2)(img_1)\n img_1 = Dropout(rate=0.1)(img_1)\n img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n # img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = GlobalMaxPool1D()(img_1)\n img_1 = Dropout(rate=0.2)(img_1)\n\n dense_1 = Dense(64, activation=activations.relu, name=\"dense_1\")(img_1)\n dense_1 = Dense(64, activation=activations.relu, name=\"dense_2\")(dense_1)\n dense_1 = Dense(nclass, activation=activations.softmax, name=\"dense_3_mitbih\")(dense_1)\n\n model = models.Model(inputs=inp, outputs=dense_1)\n opt = optimizers.Adam(learning_rate=0.001)\n\n model.compile(optimizer=opt,\n loss=losses.sparse_categorical_crossentropy,\n metrics=['accuracy'])\n model.summary()\n return model", "def run_net(inputs, **parameter):\n\n #---- set numpy random state for each run----\n np.random.set_state(np_state)\n\n # -----parameter setting-------\n n_ex = 1600\n n_inh = int(n_ex/4)\n n_input = MNIST_shape[1]*coding_n\n n_read = n_ex+n_inh\n\n R = parameter['R']\n f_in = parameter['f_in']\n f_EE = parameter['f_EE']\n f_EI = parameter['f_EI']\n f_IE = parameter['f_IE']\n f_II = parameter['f_II']\n\n A_EE = 60*f_EE\n A_EI = 60*f_EI\n A_IE = 60*f_IE\n A_II = 60*f_II\n A_inE = 60*f_in\n A_inI = 60*f_in\n\n tau_ex = parameter['tau_ex']*coding_duration\n tau_inh = parameter['tau_inh']*coding_duration\n tau_read= 30\n\n p_inE = parameter['p_in']*0.1\n p_inI = parameter['p_in']*0.1\n\n #------definition of equation-------------\n neuron_in = '''\n I = stimulus(t,i) : 1\n '''\n\n neuron = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1 (unless refractory)\n dg/dt = (-g)/(3*ms) : 1\n dh/dt = (-h)/(6*ms) : 1\n I = (g+h)+13.5: 1\n x : 1\n y : 1\n z : 1\n '''\n\n neuron_read = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1\n dg/dt = (-g)/(3*ms) : 1 \n dh/dt = (-h)/(6*ms) : 1\n I = (g+h): 1\n '''\n\n synapse = '''\n w : 1\n '''\n\n on_pre_ex = '''\n g+=w\n '''\n\n on_pre_inh = '''\n h-=w\n '''\n\n # -----Neurons and Synapses setting-------\n Input = NeuronGroup(n_input, neuron_in, threshold='I > 0', method='euler', refractory=0 * ms,\n name = 'neurongroup_input')\n\n G_ex = NeuronGroup(n_ex, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=3 * ms,\n name ='neurongroup_ex')\n\n G_inh = NeuronGroup(n_inh, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=2 * ms,\n name ='neurongroup_in')\n\n G_readout = NeuronGroup(n_read, neuron_read, method='euler', name='neurongroup_read')\n\n S_inE = Synapses(Input, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inE')\n\n S_inI = Synapses(Input, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inI')\n\n S_EE = Synapses(G_ex, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EE')\n\n S_EI = Synapses(G_ex, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EI')\n\n S_IE = Synapses(G_inh, G_ex, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_IE')\n\n S_II = Synapses(G_inh, G_inh, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_I')\n\n S_E_readout = Synapses(G_ex, G_readout, 'w = 1 : 1', on_pre=on_pre_ex, method='euler')\n\n S_I_readout = Synapses(G_inh, G_readout, 'w = 1 : 1', on_pre=on_pre_inh, method='euler')\n\n #-------initialization of neuron parameters----------\n G_ex.v = '13.5+1.5*rand()'\n G_inh.v = '13.5+1.5*rand()'\n G_readout.v = '0'\n G_ex.g = '0'\n G_inh.g = '0'\n G_readout.g = '0'\n G_ex.h = '0'\n G_inh.h = '0'\n G_readout.h = '0'\n G_ex.tau = tau_ex\n G_inh.tau = tau_inh\n G_readout.tau = tau_read\n\n [G_ex,G_in] = base.allocate([G_ex,G_inh],10,10,20)\n\n # -------initialization of network topology and synapses parameters----------\n S_inE.connect(condition='j<0.3*N_post', p = p_inE)\n S_inI.connect(condition='j<0.3*N_post', p = p_inI)\n S_EE.connect(condition='i != j', p='0.3*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_EI.connect(p='0.2*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_IE.connect(p='0.4*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_II.connect(condition='i != j', p='0.1*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_E_readout.connect(j='i')\n S_I_readout.connect(j='i+n_ex')\n\n S_inE.w = function.gamma(A_inE, S_inE.w.shape)\n S_inI.w = function.gamma(A_inI, S_inI.w.shape)\n S_EE.w = function.gamma(A_EE, S_EE.w.shape)\n S_IE.w = function.gamma(A_IE, S_IE.w.shape)\n S_EI.w = function.gamma(A_EI, S_EI.w.shape)\n S_II.w = function.gamma(A_II, S_II.w.shape)\n\n S_EE.pre.delay = '1.5*ms'\n S_EI.pre.delay = '0.8*ms'\n S_IE.pre.delay = '0.8*ms'\n S_II.pre.delay = '0.8*ms'\n\n # ------create network-------------\n net = Network(collect())\n net.store('init')\n\n # ------run network-------------\n stimulus = TimedArray(inputs[0], dt=Dt)\n net.run(duration * Dt)\n states = net.get_states()['neurongroup_read']['v']\n net.restore('init')\n return (states, inputs[1])", "def build_neural_net(net_def):\n\n populated_def = net_def.copy()\n\n for layer in populated_def['layers']:\n for n in range(0, layer['num_neurons']):\n weights = layer['weights'][n]\n bias = layer['bias'][n]\n\n neuron = Neuron(weights, bias, layer['activation'])\n layer['neurons'].append(neuron)\n\n\n return populated_def", "def forward(self, inputs, hidden):\n # import pdb; pdb.set_trace()\n (seq_len, batch_size, dims) = inputs[0].size()\n\n img1s = []\n img2s = []\n for i in range(batch_size):\n img1 = inputs[0][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n img1s.append(img1.contiguous().view(seq_len, 1, -1))\n\n img2 = inputs[1][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n img2s.append(img2.contiguous().view(seq_len, 1, -1))\n\n seq1 = torch.cat(img1s, dim=1)\n both = seq1\n # seq2 = torch.cat(img2s, dim=1)\n # both = torch.cat([seq1, seq2], dim=2)\n # import pdb; pdb.set_trace()\n embeds = self.fc1(both.view(-1, 100352))\n embeds = self.relu1(embeds)\n # embeds = self.bn1(embeds)\n\n embeds = embeds.view(-1, batch_size, self.hidden_dim)\n lstm_out, hidden = self.lstm(embeds, hidden)\n out = self.fc2(lstm_out.view(-1, lstm_out.size(2)))\n out = out.view(-1, batch_size, self.output_dim)\n out = self.sigmoid(out)\n return out, hidden", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def build_mlp(input_data, output_data, n_neurons=[512, 256, 128]):\n input_layer = keras.layers.Input([input_data.shape[-1]], name='input-layer')\n for i, n_unit in enumerate(n_neurons):\n if i == 0:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(input_layer)\n else:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(x)\n \n output_layer = keras.layers.Dense(units=output_data.shape[-1],activation='softmax' , name='output-layer')(x)\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n return model", "def build_2net(input_size, output_size, n_hidden=[5, 3]):\n\t# Create network and modules\n\tnet = FeedForwardNetwork()\n\tinp = LinearLayer(input_size)\n\th1 = SigmoidLayer(n_hidden[0])\n\th2 = TanhLayer(n_hidden[1])\n\toutp = LinearLayer(output_size)\n\t# Add modules\n\tnet.addOutputModule(outp)\n\tnet.addInputModule(inp)\n\tnet.addModule(h1)\n\tnet.addModule(h2)\n\t# Create connections\n\tnet.addConnection(FullConnection(inp, h1, inSliceTo=6))\n\tnet.addConnection(FullConnection(inp, h2, inSliceFrom=6))\n\tnet.addConnection(FullConnection(h1, h2))\n\tnet.addConnection(FullConnection(h2, outp))\n\t# Finish up\n\tnet.sortModules()\n\treturn net", "def miniImagenet_resnet_v2_generator(block_fn, layers, num_classes, data_format = None):\n\n\n if data_format is None:\n data_format = 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last'\n\n\n def model(inputs, is_training):\n \"\"\"constructs the ResNet model given the inputs\"\"\"\n\n\n if data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n\n #localize network to generate the transformation parameters\n # raw_inputs = inputs\n\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 32, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer=tf.variance_scaling_initializer())\n\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 64, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer = tf.variance_scaling_initializer())\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n # inputs = tf.layers.flatten(inputs = inputs)\n\n # inputs = tf.layers.dense(inputs = inputs, units = 128)\n # print(inputs.shape)\n # trans_parameters = tf.layers.dense(inputs = inputs, units = 6)\n # print(trans_parameters.shape)\n # inputs = stn(input_fmap = raw_inputs, theta = trans_parameters, out_dims = [60, 60])\n\n\n\n #embedding network\n inputs = conv2d_fixed_padding(inputs = inputs, filters = 64, kernel_size = 7, strides = 2, data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_conv')\n\n inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'SAME', data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n inputs = block_layer(inputs = inputs, filters = 64, block_fn = block_fn, blocks = layers[0], strides = 1, \n is_training = is_training, name = 'blcok_layer1', data_format = data_format)\n print('height:', inputs.shape[1])\n\n #attention module\n #input_fmap = inputs\n # inputs = tf.reshape(inputs, (-1, 64))\n #inputs = tf.layers.dense(inputs = inputs, units = 32, activation = tf.tanh)\n\n #inputs = tf.reshape(inputs, [-1, 32])\n #inputs = tf.layers.dense(inputs = inputs, units = 1, activation = tf.sigmoid)\n\n #attention_para = tf.reshape(inputs, [-1, 21, 21, 1])\n\n \n #inputs = tf.multiply(input_fmap, attention_para)\n\n inputs = block_layer(inputs = inputs, filters = 128, block_fn = block_fn, blocks = layers[1], strides = 2,\n is_training = is_training, name = 'block_layer2', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 256, block_fn = block_fn, blocks = layers[2], strides = 2, \n is_training = is_training, name = 'block_layer3', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 512, block_fn = block_fn, blocks = layers[3], strides = 2, \n is_training = is_training, name = 'block_layer4', data_format = data_format)\n\n print('height:', inputs.shape)\n inputs = batch_norm_relu(inputs, is_training, data_format)\n \n inputs = tf.layers.average_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'VALID', data_format = data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = tf.identity(inputs, 'final_avg_pool')\n\n inputs = tf.layers.flatten(inputs = inputs)\n\n #TODO\n inputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n print(inputs.shape)\n outputs = tf.identity(inputs, 'final_dense')\n\n return outputs\n\n return model\n\n # def model(inputs, is_training):\n\n # if data_format == 'channels_first':\n # inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n # ##84\n # inputs = conv_block(inputs = inputs, filters = 64, kernel_size = 5, strides = 2, is_training = is_training, data_format = data_format)\n\n # tf.identity(inputs, 'conv1_bn_ac')\n\n # #84\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'SAME', data_format = data_format)\n\n # tf.identity(inputs, 'pool1')\n\n # #42\n # inputs = conv_block(inputs = inputs, filters = 128, kernel_size = 3, strides = 2, is_training = is_training, data_format = data_format)\n\n # tf.identity(inputs, 'conv2_bn_ac')\n\n # #21\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'SAME', data_format = data_format)\n\n # tf.identity(inputs, 'pool2')\n\n # inputs = conv_block(inputs = inputs, filters = 256, kernel_size = 3, strides = 2, is_training = is_training, data_format = data_format)\n\n # tf.identity(inputs, 'conv3_bn_ac')\n \n # inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n \n # inputs = tf.layers.flatten(inputs)\n\n # tf.identity(inputs, 'flatten1')\n\n # inputs = tf.layers.dense(inputs = inputs, units = 256)\n\n # tf.identity(inputs, 'dense1')\n\n # outputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n # tf.identity(outputs, 'final_dense')\n\n # return outputs\n\n # return model", "def make_neural_net_two_layer():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n P = PerformanceElem(C, 0.0)\n\n net = Network(P,[A, B, C])\n return net", "def main():\n # Load database\n (images_train, targets_train), (images_test, targets_test) = tf.keras.datasets.mnist.load_data()\n\n # Normalization\n images_train = images_train.reshape(-1, 784).astype(float)\n scaler = StandardScaler()\n images_train = scaler.fit_transform(images_train)\n images_test = images_test.reshape(-1, 784).astype(float)\n images_test = scaler.transform(images_test)\n\n images_train = images_train.reshape(-1, 28, 28, 1).astype(float)\n images_test = images_test.reshape(-1, 28, 28, 1).astype(float)\n\n # One hot encoding\n targets_train = tf.keras.utils.to_categorical(targets_train)\n targets_test = tf.keras.utils.to_categorical(targets_test)\n\n # Network architecture\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(30, (5, 5), input_shape=(28, 28, 1), \\\n activation=\"relu\", padding='same'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Conv2D(15, (3, 3), activation=\"relu\", padding='same'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(50, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))\n\n # Learn\n optimizer = tf.keras.optimizers.SGD()\n\n @tf.function\n def train_step(images, targets):\n \"\"\"\n Define the training step by step\n \"\"\"\n # Save all operations\n with tf.GradientTape() as tape:\n # Make prediction\n predictions = model(images)\n # Compute loss\n loss = tf.keras.losses.categorical_crossentropy(targets, predictions)\n # Compute gradients\n gradients = tape.gradient(loss, model.trainable_variables)\n # Update model\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n batch_size = 32\n epochs = 10\n images_per_epoch = len(images_train) // batch_size\n for _ in range(epochs):\n for i in range(images_per_epoch):\n start = i*batch_size\n train_step(images_train[start:start+batch_size], targets_train[start:start+batch_size])\n\n # Compile must be defined to use evaluate method\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"sgd\",\n metrics=[\"accuracy\"])\n\n # Evaluate on the test database\n scores = model.evaluate(images_test, targets_test, verbose=0)\n print(scores)", "def neural_net_image_input(image_shape):\n # TODO: Implement Function\n n_input_1 = image_shape[0]\n n_input_2 = image_shape[1]\n n_input_3 = image_shape[2]\n return tf.placeholder(tf.float32,[None, n_input_1, n_input_2, n_input_3], name='x')", "def neural_net_train(X, W1, W2, Y):\n # bias term\n X_b = np.ones((3,1))\n X_b[1:] = X\n a_1 = X_b\n \n # calculate z_2\n z_2 = np.matmul(W1, a_1)\n \n # calculate a_2\n a_2 = sigmoid(z_2)\n \n # associate with bias\n b_2 = np.ones((3, 1))\n b_2[1:] = a_2\n a_2 = b_2\n \n # calculate z_3\n z_3 = np.matmul(W2, a_2)\n \n a_3 = sigmoid(z_3)\n # forward propagation end\n # -----------------------\n \n # start of backpropagation\n \n # delta 3 in 1 x 1\n delta_3 = a_3 - Y \n \n # delta 2 in 2 x 1\n delta_2 = np.multiply(np.matmul(W2.T, delta_3)[1:], sigmoid_grad(z_2))\n \n # we don't have delta 1\n grad_w_1 = np.matmul(delta_2, a_1.T)\n \n grad_w_2 = np.matmul(delta_3, a_2.T)\n # grad_w_2 = np.matmul(delta_3.T, a_2)\n \n return a_3, grad_w_1, grad_w_2# grad_w_2", "def build_cnn(input_var=None):\n\n # input layer\n network = lasagne.layers.InputLayer(\n shape=(\n None,\n 1,\n 128,\n 129\n ),\n input_var=input_var\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(5, 5),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(5, 5),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(3, 3),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(3, 3),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # Fully-connected layer of 256 units with 50% dropout on its inputs\n network = lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p=.5),\n num_units=256,\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.HeUniform() # W initialization\n )\n\n # Finally add a 1-unit softmax output layer\n network = lasagne.layers.DenseLayer(\n network,\n num_units=1,\n nonlinearity=lasagne.nonlinearities.sigmoid\n )\n\n return network", "def main():\n X_train, Y_train, y_train = load_batch(\"data_batch_1\")\n X_test, Y_test, y_test = load_batch(\"test_batch\")\n X_val, Y_val, y_val = load_batch((\"data_batch_2\"))\n\n X_train, X_train_mean, X_train_std = normalize(X_train)\n X_test = normalize_mean_std(X_test, X_train_mean, X_train_std)\n X_val = normalize_mean_std(X_val, X_train_mean, X_train_std)\n\n data = {\n \"X_train\": X_train,\n \"Y_train\": Y_train,\n \"y_train\": y_train,\n \"X_test\": X_test,\n \"Y_test\": Y_test,\n \"y_test\": y_test,\n \"X_val\": X_val,\n \"Y_val\": Y_val,\n \"y_val\": y_val,\n }\n\n network = Network(data)", "def create_LeNet(img_dim, n_labels):\n # Initialise sequential model\n model = Sequential()\n\n # Set 1: Conv2D-Activation-MaxPool\n model.add(Conv2D(32, (3, 3), padding=\"same\", input_shape=(img_dim, img_dim, 3)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n # Set 2: Con2D-Activation-MaxPool\n model.add(Conv2D(50, (5, 5), padding = \"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n # Set 3: Fully connected layer, with relu activation\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation(\"relu\"))\n\n # Set 4: Output layer, with softmax classification, to predict n classes (n artists)\n model.add(Dense(n_labels))\n model.add(Activation(\"softmax\"))\n\n # Compile CNN: using categorigical corrsentropy and stochastic gradient descent\n model.compile(loss=\"categorical_crossentropy\", \n optimizer=SGD(lr=0.01), metrics=[\"accuracy\"])\n \n return model", "def run_neural_network(mode, arg_placeholders, arg_data, arg_hyperparams, arg_paths_extensions, **kwargs):\n\n\tif verbose: print('model_tensorflow.run_neural_network() called')\n\n\t# Placeholders\n\tx, y = arg_placeholders['x'], arg_placeholders['y'] \n\tkeep_prob = arg_placeholders['keep_prob']\n\t# Data\n\tx_trn, y_trn, x_vld, y_vld = (arg_data['x_trn'], arg_data['y_trn'], \n\t\t\t\t\t\t\t\t arg_data['x_vld'], arg_data['y_vld'])\n\tx_tst, y_tst = arg_data['x_tst'], arg_data['y_tst']\n\t# Hyperparameters\n\tuse_stored_weights, user_model = (arg_hyperparams['use_stored_weights'], \n\t\t\t\t\t\t\t\t\t arg_hyperparams['user_model'])\n\tlayer_sizes, val_perc, mini_batch_size, epochs, seed = (arg_hyperparams['layer_sizes'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['val_perc'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['mini_batch_size'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['epochs'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['seed'])\n\tlrn_rate, kp = arg_hyperparams['lrn_rate'], arg_hyperparams['kp']\n\t# Paths and extensions \n\tstore_path, out_ext, fv_ext = (arg_paths_extensions['store_path'], \n\t\t\t\t\t\t\t\t arg_paths_extensions['out_ext'], \n\t\t\t\t\t\t\t\t arg_paths_extensions['fv_ext'])\n\t# Weights\n\tweights_biases = {}\n\tif mode == trn or mode == tst:\n\t\tweights_biases = create_neural_network(mode, layer_sizes, use_stored_weights, store_path)\n\telif mode == app:\n\t\tweights_biases = kwargs['weights_biases']\n#\tprint('(1) initial weights W1:')\n#\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t# Logits (linear output from the network's output layer), softmaxes, accuracy\n\tlogits = evaluate_neural_network(x, keep_prob, len(layer_sizes) - 1, seed,\n\t\t\t\t\t\t\t\t\t\t weights_biases['weights'], weights_biases['biases'])\n\tsoftm = tf.nn.softmax(logits)\n\tpred_class = tf.argmax(softm)\n\tcorrect = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n\taccuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n\n\tif mode == trn or mode == tst:\t\t\n\t\tif mode == trn:\n\t\t\t# Declare cost and optimizer here: optimizer has global variables that must be initialised (see below)\n\t\t\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=lrn_rate).minimize(cost)\n\n\t\t# Initialise all global variables that have not been initialised yet (e.g., variables for Adam). See \n\t\t# https://stackoverflow.com/questions/35164529/in-tensorflow-is-there-any-way-to-just-initialize-uninitialised-variables\n\t\t# (answer by Salvador Dali) \n\t\tglobal_vars = tf.global_variables()\n\t\tis_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n\t\tnot_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\t\tif verbose: print('uninitialised variables:', [str(i.name) for i in not_initialized_vars])\n\t\tif len(not_initialized_vars):\n\t\t\tsess.run(tf.variables_initializer(not_initialized_vars))\n\t\tif verbose: print('uninitialised variables:', sess.run(tf.report_uninitialized_variables()))\n\n\t\tsaver = tf.train.Saver()\n\n\t# Save weights and model output (softmaxes)\n\tif mode == trn:\n\t\tif val_perc != 0:\n\t\t\t# Logits (linear output from the network's output layer), softmaxes, accuracy\n\t\t\tlogits_vld = evaluate_neural_network(x, keep_prob, len(layer_sizes) - 1, seed,\n\t\t\t\t\t\t\t\t\t\t\t\t\t weights_biases['weights'], weights_biases['biases'])\n\t\t\tsoftm_vld = tf.nn.softmax(logits_vld)\n\t\t\tpred_class_vld = tf.argmax(softm_vld)\n\t\t\tcorrect_vld = tf.equal(tf.argmax(logits_vld, 1), tf.argmax(y, 1))\n\t\t\taccuracy_vld = tf.reduce_mean(tf.cast(correct_vld, 'float'))\n#\t\tprint('(2) weights W1 before training (should be the same as (1))')\n#\t\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t\ttotal_cost = []\n\t\taccs_trn = []\n\t\taccs_vld = []\n\t\tbest_acc = 0.0\t\t\n\t\tfor epoch in range(epochs): # one epoch is one fwd-bwd propagation over the complete dataset\n\t\t\tepoch_loss = 0\n\t\t\tfor _ in range(int(len(x_trn)/mini_batch_size)):\n\t\t\t\tepoch_x, epoch_y = x_trn, y_trn\n\t\t\t\t_, c, acc_trn, sm_trn = sess.run([optimizer, cost, accuracy, softm], \n\t\t\t\t\t\t\t\t\t\t\t\t feed_dict = {x: epoch_x, y: epoch_y, keep_prob: kp})\n\t\t\t\tepoch_loss += c\n\n\t\t\t\tif check_accuracies and (epoch == 10 or epoch == 20):\n\t\t\t\t\tprint('Accuracy check (trn)')\n\t\t\t\t\tprint('acc_trn :', acc_trn)\n\t\t\t\t\tcheck_accuracy(epoch_x, epoch_y, sm_trn)\n#\t\t\tprint('(3) updated weights W1 after one training epoch (should be different from (2))')\n#\t\t\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t\t\t# In case of mini-batch gradient descent, accumulate the results from the mini batches\n\t\t\t# acc_trn = ...\n\t\t\t# sm_trn_comb = ...\n\t\t\t# sm_trn = sm_trn_comb \n\n\t\t\tprint('epoch', str(epoch) + '/' + str(epochs), 'completed: loss =', epoch_loss, 'acc =', acc_trn)\n\n\t\t\t# Non-user model (model selection) case: save weights and softmaxes for the current epoch \n\t\t\t# if its acc_vld is the highest so far. Check acc_vld every tenth epoch\n\t\t\tif not user_model and epoch % 10 == 0:\n\t\t\t\ttotal_cost.append(epoch_loss)\n\t\t\t\taccs_trn.append(acc_trn)\n\t\t\t\tif val_perc != 0:\n\t\t\t\t\tif arg_hyperparams['ismir_2018']:\n\t\t\t\t\t\t# This is incorrect: sess.run() should not be run again (see loop over the mini \n\t\t\t\t\t\t# batches) on accuracy and softm, which are for calculating trn results, but on \n\t\t\t\t\t\t# accuracy_vld and softm_vld. Rerunning leads to unwanted changes in tensor calculations\n\t\t\t\t\t\t# NB: for the ISMIR paper, sm_vld is not calculated\n\t\t\t\t\t\tacc_vld, sm_vld = sess.run([accuracy, softm],\n\t\t\t\t\t\t\t\t\t\t\t \t\tfeed_dict={x: x_vld, y: y_vld, keep_prob: 1.0})\n\t\t\t\t\telse:\n\t\t\t\t\t\tacc_vld, sm_vld = sess.run([accuracy_vld, softm_vld],\n\t\t\t\t\t\t\t\t\t\t\t \t\tfeed_dict={x: x_vld, y: y_vld, keep_prob: 1.0})\n\t\t\t\t\taccs_vld.append(acc_vld)\n\n\t\t\t\t\tif check_accuracies and (epoch == 10 or epoch == 20):\n\t\t\t\t\t\tprint('Accuracy check (vld)')\n\t\t\t\t\t\tprint('acc_vld :', acc_vld)\n\t\t\t\t\t\tcheck_accuracy(x_vld, y_vld, sm_vld)\n\n\t\t\t\t\tif acc_vld > best_acc:\n\t\t\t\t\t\tbest_acc = acc_vld\n\t\t\t\t\t\t# Save weights\n\t\t\t\t\t\tsave_path = saver.save(sess, store_path + 'weights/' + 'trained.ckpt')\n\t\t\t\t\t\t# Save softmaxes (trn and vld)\n\t\t\t\t\t\tif arg_hyperparams['ismir_2018']:\n\t\t\t\t\t\t\t# This is incorrect: sess.run() should not be run again (see loop over the mini \n\t\t\t\t\t\t\t# batches) on softm. Rerunning leads to unwanted changes in tensor calculations \n\t\t\t\t\t\t\tsoftmaxes_trn = sess.run([softm, pred_class], \n\t\t\t\t\t\t\t\t\t\t\t\t\t feed_dict={x: x_trn, keep_prob: kp})[0]\n\t\t\t\t\t\t\tnp.savetxt(store_path + out_ext, softmaxes_trn, delimiter=',')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnp.savetxt(store_path + out_ext, sm_trn, delimiter=',')\n\t\t\t\t\t\tnp.savetxt(store_path + out_ext.replace('trn', 'vld'), sm_vld, delimiter=',')\n\t\t\t\t\t\t# Save best epoch\n\t\t\t\t\t\twith open(store_path + 'best_epoch.txt', 'w') as text_file:\n\t\t\t\t\t\t\ttext_file.write('highest accuracy on the validation set (' + \n\t\t\t\t\t\t\t\t\t\t\tstr(best_acc) + ') in epoch ' + str(epoch))\n\t\t\t\t\t\tnp.savetxt(store_path + 'best_epoch.csv', [[int(epoch), acc_vld]], delimiter=',')\n\n\t\t# User model case: save weights and softmaxes for the final epoch \n\t\tif user_model:\n\t\t\tsave_path = saver.save(sess, store_path + 'weights/' + 'trained.ckpt')\n\t\t\tnp.savetxt(store_path + out_ext, sm_trn, delimiter=',')\n\n\t\t# Plot the trn and vld accuracy\n\t\tif plot_or_not:\n\t\t\tplt.plot(np.squeeze(accs_trn))\n\t\t\tplt.plot(np.squeeze(accs_vld))\n\t\t\tplt.ylabel('acc')\n\t\t\tplt.xlabel('epochs (/10)')\n\t\t\tax = plt.subplot(111)\n\t\t\tax.set_prop_cycle('color', ['red', 'green'])\n#\t\t\tplt.gca().set_prop_cycle(['red', 'green'])\n\t\t\tplt.title('accuracy on training and validation set')\n\t\t\tplt.legend(['trn', 'vld'], loc='lower right')\n\t\t\tplt.savefig(store_path + 'trn_and_vld_acc.png')\n\n\t# Save model output (softmaxes)\n\tif mode == tst:\n\t\tacc_tst, sm_tst = sess.run([accuracy, softm], feed_dict={x: x_tst, y: y_tst, keep_prob: kp})\n\t\tnp.savetxt(store_path + out_ext, sm_tst, delimiter=',')\n\t\tif check_accuracies:\n\t\t\tprint('Accuracy check (tst)')\n\t\t\tprint('acc_tst :', acc_tst)\n\t\t\tcheck_accuracy(x_tst, y_tst, sm_tst)\n\n\t# Save or return model output (softmaxes)\n\tif mode == app:\n\t\tload_and_save_features = False\n\t\t# Get features and reshape to get required shape (1, number of features)\n\t\tx_app = (genfromtxt(store_path + fv_ext, delimiter=',') if load_and_save_features else \n\t\t\t\t np.array(kwargs['feature_vector']))\n\t\tx_app = x_app.reshape(1, -1)\n\t\tsm_app = sess.run(softm, feed_dict={x: x_app, keep_prob: kp})\n\t\tif load_and_save_features:\n\t\t\tnp.savetxt(store_path + out_ext, sm_app, delimiter=',')\n\t\telse:\n\t\t\treturn sm_app[0]", "def unet_network(input_tensor, nb_classes):\n # contraction 1\n conv1 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv1')(\n input_tensor) # (batch_size, ?, ?, 64)\n conv2 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv2')(\n conv1) # (batch_size, ?, ?, 64)\n crop2 = Cropping2D(\n cropping=((88, 88), (88, 88)),\n name=\"crop2\")(\n conv2) # (batch_size, ?, ?, 64)\n maxpool2 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool2\")(\n conv2) # (batch_size, ?, ?, 64)\n\n # contraction 2\n conv3 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv3')(\n maxpool2) # (batch_size, ?, ?, 128)\n conv4 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv4')(\n conv3) # (batch_size, ?, ?, 128)\n crop4 = Cropping2D(\n cropping=((40, 40), (40, 40)),\n name=\"crop4\")(\n conv4) # (batch_size, ?, ?, 128)\n maxpool4 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool4\")(\n conv4) # ((batch_size, ?, ?, 128)\n\n # contraction 3\n conv5 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv5')(\n maxpool4) # (batch_size, ?, ?, 256)\n conv6 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv6')(\n conv5) # (batch_size, ?, ?, 256)\n crop6 = Cropping2D(\n cropping=((16, 16), (16, 16)),\n name=\"crop6\")(\n conv6) # (batch_size, ?, ?, 256)\n maxpool6 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool6\")(\n conv6) # (batch_size, ?, ?, 256)\n\n # contraction 4\n conv7 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv7')(\n maxpool6) # (batch_size, ?, ?, 512)\n conv8 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv8')(\n conv7) # (batch_size, ?, ?, 512)\n crop8 = Cropping2D(\n cropping=((4, 4), (4, 4)),\n name=\"crop8\")(\n conv8) # (batch_size, ?, ?, 512)\n maxpool8 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool8\")(\n conv8) # (batch_size, ?, ?, 512)\n\n # bottom\n conv9 = Conv2D(\n filters=1024,\n kernel_size=(3, 3),\n activation='relu',\n name='conv9')(\n maxpool8) # (batch_size, ?, ?, 1024)\n conv10 = Conv2D(\n filters=1024,\n kernel_size=(3, 3),\n activation='relu',\n name='conv10')(\n conv9) # (batch_size, ?, ?, 1024)\n\n # expansion 1\n upconv11 = up_conv_2d(\n input_tensor=conv10,\n nb_filters=512,\n name='upconv11') # (batch_size, ?, ?, 512)\n concat11 = tf.concat(\n values=[crop8, upconv11],\n axis=-1,\n name='concat11') # (batch_size, ?, ?, 1024)\n conv12 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv12')(\n concat11) # (batch_size, ?, ?, 512)\n conv13 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv13')(\n conv12) # (batch_size, ?, ?, 512)\n\n # expansion 2\n upconv14 = up_conv_2d(\n input_tensor=conv13,\n nb_filters=256,\n name='upconv14') # (batch_size, ?, ?, 256)\n concat14 = tf.concat(\n values=[crop6, upconv14],\n axis=-1,\n name='concat14') # (batch_size, ?, ?, 512)\n conv15 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv15')(\n concat14) # (batch_size, ?, ?, 256)\n conv16 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv16')(\n conv15) # (batch_size, ?, ?, 256)\n\n # expansion 3\n upconv17 = up_conv_2d(\n input_tensor=conv16,\n nb_filters=128,\n name='upconv17') # (batch_size, ?, ?, 128)\n concat17 = tf.concat(\n values=[crop4, upconv17],\n axis=-1,\n name='concat17') # (batch_size, ?, ?, 256)\n conv18 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv18')(\n concat17) # (batch_size, ?, ?, 128)\n conv19 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv19')(\n conv18) # (batch_size, ?, ?, 128)\n\n # expansion 4\n upconv20 = up_conv_2d(\n input_tensor=conv19,\n nb_filters=64,\n name='upconv20') # (batch_size, ?, ?, 64)\n concat20 = tf.concat(\n values=[crop2, upconv20],\n axis=-1,\n name='concat20') # (batch_size, ?, ?, 128)\n conv21 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv21')(\n concat20) # (batch_size, ?, ?, 64)\n conv22 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv22')(\n conv21) # (batch_size, ?, ?, 64)\n conv23 = Conv2D(\n filters=nb_classes,\n kernel_size=(1, 1),\n activation='sigmoid',\n name='conv23')(\n conv22) # (batch_size, ?, ?, nb_classes)\n\n return conv23", "def batch_predict(filenames, net):\n\n N, C, H, W = net.blobs[net.inputs[0]].data.shape\n F = net.blobs[net.outputs[0]].data.shape[1]\n Nf = len(filenames)\n Hi, Wi, _ = imread(filenames[0]).shape\n allftrs = np.zeros((Nf, F))\n for i in range(0, Nf, N):\n start = time.time()\n in_data = np.zeros((N, C, H, W), dtype=np.float32)\n\n batch_range = range(i, min(i+N, Nf))\n batch_filenames = [filenames[j] for j in batch_range]\n Nb = len(batch_range)\n\n batch_images = np.zeros((Nb, 3, H, W))\n for j,fname in enumerate(batch_filenames):\n im = imread(fname)\n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], (1,1,3))\n # RGB -> BGR\n im = im[:,:,(2,1,0)]\n # mean subtraction\n im = im - np.array([103.939, 116.779, 123.68])\n # resize\n im = imresize(im, (H, W), 'bicubic')\n # get channel in correct dimension\n im = np.transpose(im, (2, 0, 1))\n batch_images[j,:,:,:] = im\n\n # inserhttp://web.engr.illinois.edu/~slazebni/spring14/lec24_cnn.pdft into correct place\n in_data[0:len(batch_range), :, :, :] = batch_images\n\n # predict features\n ftrs = predict(in_data, net)\n\n for j in range(len(batch_range)):\n allftrs[i+j,:] = ftrs[j,:]\n\n end = time.time()\n files_left = (len(filenames) - i+len(batch_range)) / 10.0\n one_batch_time = end - start\n print 'Done %d/%d files. Took %d seconds. %f minutes left,' % (i+len(batch_range), len(filenames), one_batch_time, (one_batch_time * files_left) / 60.0)\n\n return allftrs", "def batch_predict(filenames, net):\n N, C, H, W = net.blobs[net.inputs[0]].data.shape\n F = net.blobs[net.outputs[0]].data.shape[1]\n Nf = len(filenames)\n allftrs = np.zeros((Nf, F))\n #allpreds = []\n for i in range(0, Nf, N):\n tic = time.time()\n in_data = np.zeros((N, C, H, W), dtype=np.float32)\n\n batch_range = range(i, min(i+N, Nf))\n batch_filenames = [filenames[j] for j in batch_range]\n Nb = len(batch_range)\n\n batch_images = np.zeros((Nb, 3, H, W))\n for j,fname in enumerate(batch_filenames):\n im = np.array(Image.open(fname))\n \n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], (1,1,3))\n # RGB -> BGR\n im = im[:,:,(2,1,0)]\n # mean subtraction\n im = im - np.array([103.939, 116.779, 123.68])\n # resize\n im = imresize(im, (H, W))\n # get channel in correct dimension\n im = np.transpose(im, (2, 0, 1))\n batch_images[j,:,:,:] = im\n\n # insert into correct place\n in_data[0:len(batch_range), :, :, :] = batch_images\n \n # predict features\n ftrs = predict(in_data, net)\n toc = time.time()\n \n for j in range(len(batch_range)):\n allftrs[i+j,:] = ftrs[j,:]\n\n return allftrs", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def get_network(x):\n n_classes = 5\n batch_size = x.get_shape().as_list()[0]\n channels = x.get_shape().as_list()[3]\n \n # split channels to process separately\n c1, c2, c3, c4 = tf.split(3, channels, x)\n \n # Model Helpers --------------------------------------------------------\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#conv2d\n def conv2d(img, w, b):\n \n x = tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='VALID')\n z = tf.nn.bias_add(x, b)\n return tf.nn.relu(z)\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#max_pool\n def max_pool(img, k):\n ks = [1, k, k, 1]\n return tf.nn.max_pool(img, ksize=ks, strides=ks, padding='VALID')\n\n # TODO implement\n def maxout(x):\n raise NotImplemented()\n\n def fc(x, w, b, act):\n return act(tf.add(tf.matmul(x, w), b))\n\n def conv_net(_x):\n # First convolution layer\n #print 'x: {}'.format(_X.get_shape())\n weights = {\n # 6x6 conv, 3-channel input, 32-channel outputs\n 'wc1': tf.Variable(tf.truncated_normal([10, 10, 1, 32], stddev=0.01)),\n # 5x5 conv, 32-channel inputs, 64-channel outputs\n 'wc2': tf.Variable(tf.truncated_normal([7, 7, 32, 64], stddev=0.01)),\n # 3x3 conv, 64-channel inputs, 128-channel outputs\n 'wc3': tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.01)),\n # 3x3 conv, 128-channel inputs, 128-channel outputs\n 'wc4': tf.Variable(tf.truncated_normal([3, 3, 128, 128], stddev=0.1)),\n }\n \n biases = {\n 'bc1': tf.Variable(tf.constant(0.1, shape=[32])),\n 'bc2': tf.Variable(tf.constant(0.1, shape=[64])),\n 'bc3': tf.Variable(tf.constant(0.1, shape=[128])),\n 'bc4': tf.Variable(tf.constant(0.1, shape=[128])),\n } \n \n \n conv1 = conv2d(_x, weights['wc1'], biases['bc1'])\n # k used to be 2\n conv1 = max_pool(conv1, k=4)\n\n #print 'conv1: {}'.format(conv1.get_shape())\n\n # Second Covolution layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n conv2 = max_pool(conv2, k=2)\n\n # Thrid Convolution Layer\n conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])\n\n #print 'conv3: {}'.format(conv3.get_shape())\n\n # Fourth Convolution Layer\n conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])\n conv4 = max_pool(conv4, k=2)\n\n return tf.reshape(conv4, [batch_size, -1])\n\n \n fc_weights = {\n 'wf1': tf.Variable(tf.truncated_normal([512, 2048], stddev=0.001)),\n # fully coneected 2048 inputs, 2048 outputs\n 'wf2': tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.001)),\n # 2048 inputs, 5 outputs (class prediction)\n 'out': tf.Variable(tf.truncated_normal([2048, n_classes], stddev=0.01))\n }\n \n fc_biases = {\n 'bf1': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'bf2': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes]))\n }\n\n c1 = conv_net(c1)\n c2 = conv_net(c2)\n c3 = conv_net(c3)\n c4 = conv_net(c4)\n \n # feed this into one fully connected layer\n cmb = tf.concat(1, [c1,c2,c3,c4]) \n \n # fully connected\n fc1 = fc(cmb, fc_weights['wf1'], fc_biases['bf1'], tf.nn.relu)\n fc2 = fc(fc1, fc_weights['wf2'], fc_biases['bf2'], tf.nn.relu)\n \n # output\n output = fc(fc2, fc_weights['out'], fc_biases['out'], tf.nn.softmax)\n \n return output", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def NNRunner(input_queue, output_queue):\n nn = NeuralNet('tiny_res_slow')\n # nn.export_weights()\n while True:\n pic = input_queue.get(True).resize((p.IMAGE_SIZE, p.IMAGE_SIZE))\n boxes = nn.run_images([np.asarray(pic)], cutoff=0.2)\n output_queue.put(boxes)\n input_queue.task_done()", "def image_batch():\n return np.zeros((2, 1, 4, 4))", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def build_net(self, inputs):\n with tf.variable_scope(self._scope, self._scope, [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.batch_norm],\n outputs_collections=end_points_collection):\n net = slim.conv2d(inputs, 32, 3, 1, scope='conv1')\n net = slim.conv2d(net, 32, 3, 1, scope='conv2')\n\n net = slim.conv2d(net, 64, 3, 1, scope='conv3')\n net = slim.conv2d(net, 64, 3, 1, scope='conv4')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool1')\n\n net = slim.conv2d(net, 128, 3, 1, scope='conv5')\n net = slim.conv2d(net, 128, 3, 1, scope='conv6')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool2')\n\n net = slim.conv2d(net, 256, 3, scope='conv7')\n net = slim.conv2d(net, 256, 3, scope='conv8')\n\n net = slim.max_pool2d(net, 2, [2, 1], scope='pool3')\n\n net = slim.conv2d(net, 512, 3, scope='conv9')\n net = slim.conv2d(net, 512, 3, scope='conv10')\n\n net = slim.max_pool2d(net, 2, [1, 1], scope='pool4')\n\n net = slim.conv2d(net, 512, 2, padding='VALID', scope='conv11')\n\n net = slim.dropout(net, keep_prob=0.5)\n\n self.end_points = utils.convert_collection_to_dict(end_points_collection)\n self.net = net", "def build_dense_network(data, hidden_layers, **kwargs):\n # Input layer\n with tf.variable_scope(\"layer_1\"): \n weights = tf.get_variable(\"weights\", shape = [input_shape[-1] + 1\n , hidden_layers[0]], initializer = tf.variance_scaling_initializer())\n\n output = tf.nn.leaky_relu(tf.matmul(tf.concat([data, tf.ones(dtype = tf.float32\n , shape = (tf.shape(data)[0], 1))], axis = 1) # concat\n , weights, name = \"multiply\") # matmul\n , α, name = \"output\") # leaky relu\n\n # DROP-OUT after the activation func\n output = tf.nn.dropout(output, keep_prob=δ, name = \"output\") \n\n # Hidden layers 1 to len(hidden_layers) - 1\n for i in range(2, len(hidden_layers)-1+2):\n\n with tf.variable_scope(f\"layer_{i}\"):\n n_nodes = hidden_layers[i-1]\n\n weights = tf.get_variable(\"weights\", shape = [hidden_layers[i-2]+1, hidden_layers[i-1]], initializer = tf.variance_scaling_initializer())\n output = tf.nn.leaky_relu(tf.matmul(tf.concat([output, tf.ones(dtype = tf.float32, shape = (tf.shape(data)[0], 1))], axis = 1), weights, name = \"multiply\"), α, name = \"output\")\n\n # DROP-OUT after the activation func\n output = tf.nn.dropout(output, keep_prob=δ, name = \"output\") \n\n # Output layer\n with tf.variable_scope(f\"layer_{len(hidden_layers)+1}\"):\n\n weights = tf.get_variable(\"weights\", shape = (hidden_layers[1]+1, n_summaries), initializer = tf.variance_scaling_initializer())\n output = tf.identity(tf.matmul(tf.concat([output, tf.ones(dtype = tf.float32, shape = (tf.shape(data)[0], 1))], axis = 1), weights, name = \"multiply\"), name = \"output\")\n # NO DROP-OUT in the last layer\n\n\n return output", "def create_net(num_classes=1001, sample_shape=(3, 299, 299), is_training=True,\n dropout_keep_prob=0.8, final_endpoint='InceptionV4/Mixed_7d',\n aux_endpoint='InceptionV4/Mixed_6e'):\n end_points = {}\n name = 'InceptionV4'\n net, end_points = inception_v4_base(sample_shape,\n final_endpoint=final_endpoint,\n aux_endpoint=aux_endpoint)\n # Auxiliary Head logits\n if aux_endpoint is not None:\n # 17 x 17 x 1024\n aux_logits = end_points[aux_endpoint + '-aux']\n blk = name + '/AuxLogits'\n net.add(AvgPooling2D('%s/AvgPool_1a_5x5' % blk, 5, stride=3,\n border_mode='VALID'), aux_logits)\n t = conv2d(net, '%s/Conv2d_1b_1x1' % blk, 128, 1)\n conv2d(net, '%s/Conv2d_2a' % blk, 768,\n t.get_output_sample_shape()[1:3], border_mode='VALID')\n net.add(Flatten('%s/flat' % blk))\n end_points[blk] = net.add(Dense('%s/Aux_logits' % blk, num_classes))\n\n # Final pooling and prediction\n # 8 x 8 x 1536\n blk = name + '/Logits'\n last_layer = end_points[final_endpoint]\n net.add(AvgPooling2D('%s/AvgPool_1a' % blk,\n last_layer.get_output_sample_shape()[1:3],\n border_mode='VALID'),\n last_layer)\n # 1 x 1 x 1536\n net.add(Dropout('%s/Dropout_1b' % blk, 1 - dropout_keep_prob))\n net.add(Flatten('%s/PreLogitsFlatten' % blk))\n # 1536\n end_points[blk] = net.add(Dense('%s/Logits' % blk, num_classes))\n return net, end_points", "def __cnnNetFn(self, input, is_training):\n with tf.variable_scope('CNN'):\n conv1 = tf.layers.conv2d(input, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv1_bn = tf.layers.batch_normalization(conv1)\n conv2 = tf.layers.conv2d(conv1_bn, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv2_bn = tf.layers.batch_normalization(conv2)\n conv2_pool = tf.layers.max_pooling2d(conv2_bn, 2, 2, padding='SAME')\n conv2_drop = tf.layers.dropout(conv2_pool, rate=0.2, training=is_training)\n\n conv3 = tf.layers.conv2d(conv2_drop, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv3_bn = tf.layers.batch_normalization(conv3)\n conv4 = tf.layers.conv2d(conv3_bn, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv4_bn = tf.layers.batch_normalization(conv4)\n conv4_pool = tf.layers.max_pooling2d(conv4_bn, 2, 2, padding='SAME')\n conv4_drop = tf.layers.dropout(conv4_pool, rate=0.3, training=is_training)\n\n conv5 = tf.layers.conv2d(conv4_drop, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv5_bn = tf.layers.batch_normalization(conv5)\n conv6 = tf.layers.conv2d(conv5_bn, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv6_pool = tf.layers.max_pooling2d(conv6, 2, 2, padding='SAME')\n\n csnn_features = tf.stop_gradient(self.__csnn.getTrainOp(input))\n csnn_features = tf.identity(csnn_features)\n if self.__use_csnn:\n joint_features = tf.concat((conv6_pool, csnn_features), axis=3)\n else:\n joint_features = conv6_pool\n\n conv6_bn = tf.layers.batch_normalization(joint_features)\n\n conv7 = tf.layers.conv2d(conv6_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv7_bn = tf.layers.batch_normalization(conv7)\n conv8 = tf.layers.conv2d(conv7_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv8_bn = tf.layers.batch_normalization(conv8)\n conv8_pool = tf.layers.max_pooling2d(conv8_bn, 2, 2, padding='SAME')\n conv8_drop = tf.layers.dropout(conv8_pool, rate=0.4, training=is_training)\n\n flat = tf.contrib.layers.flatten(conv8_drop)\n logits = tf.layers.dense(flat, self.__num_classes)\n return logits, csnn_features", "def model_fn(nc=64, batch_size=1):\n\n # Create the mesh TF graph\n graph = mtf.Graph()\n mesh = mtf.Mesh(graph, \"my_mesh\")\n\n # Define the named dimensions\n n_block_x = 4\n n_block_y = 2\n n_block_z = 1\n\n batch_dim = mtf.Dimension(\"batch\", batch_size`)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc//n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc//n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc//n_block_z)\n\n image_c_dim = mtf.Dimension('image_c', 3)\n hidden_dim = mtf.Dimension('h', 128)\n\n # Create some input data\n data = mtf.random_uniform(mesh, [batch_dim, nx_dim, ny_dim, nz_dim,\n sx_dim, sy_dim, sz_dim,\n image_c_dim])\n\n net = mtf.layers.conv3d_with_blocks(data, hidden_dim,\n filter_size=(3, 3, 3), strides=(1, 1, 1), padding='SAME',\n d_blocks_dim=nx_dim, h_blocks_dim=ny_dim)\n\n net = mtf.reduce_sum(net, output_shape=[batch_dim, hidden_dim] )\n\n return net", "def make_prediction_net(num_out_channels, kernel_sizes=(3), num_filters=(256),\n bias_fill=None, use_depthwise=False, name=None,\n unit_height_conv=True):\n if isinstance(kernel_sizes, int) and isinstance(num_filters, int):\n kernel_sizes = [kernel_sizes]\n num_filters = [num_filters]\n assert len(kernel_sizes) == len(num_filters)\n if use_depthwise:\n conv_fn = tf.keras.layers.SeparableConv2D\n else:\n conv_fn = tf.keras.layers.Conv2D\n\n # We name the convolution operations explicitly because Keras, by default,\n # uses different names during training and evaluation. By setting the names\n # here, we avoid unexpected pipeline breakage in TF1.\n out_conv = tf.keras.layers.Conv2D(\n num_out_channels,\n kernel_size=1,\n name='conv1' if tf_version.is_tf1() else None)\n\n if bias_fill is not None:\n out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)\n\n layers = []\n for idx, (kernel_size,\n num_filter) in enumerate(zip(kernel_sizes, num_filters)):\n layers.append(\n conv_fn(\n num_filter,\n kernel_size=[1, kernel_size] if unit_height_conv else kernel_size,\n padding='same',\n name='conv2_%d' % idx if tf_version.is_tf1() else None))\n layers.append(tf.keras.layers.ReLU())\n layers.append(out_conv)\n net = tf.keras.Sequential(layers, name=name)\n return net", "def make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def inference(images, hidden1_units, hidden2_units, is_training):\n # Hidden 1\n with tf.name_scope('hidden1'):\n weights = tf.Variable(\n tf.truncated_normal([IMAGE_PIXELS, hidden1_units],\n stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),\n name='weights')\n biases = tf.Variable(tf.zeros([hidden1_units]),\n name='biases')\n conv1 = tf.matmul(images, weights) + biases\n# bn1 = tf.layers.batch_normalization(conv1, training = is_training)\n bn1 = tf.contrib.layers.batch_norm(conv1,is_training=is_training)\n# bn1 = batch_norm(conv1, hidden1_units, is_training, \"bn1\")\n hidden1 = tf.nn.relu(bn1)\n # Hidden 2\n with tf.name_scope('hidden2'):\n weights = tf.Variable(\n tf.truncated_normal([hidden1_units, hidden2_units],\n stddev=1.0 / math.sqrt(float(hidden1_units))),\n name='weights')\n biases = tf.Variable(tf.zeros([hidden2_units]),\n name='biases')\n \n conv2 = tf.matmul(hidden1, weights) + biases\n# bn2 = tf.layers.batch_normalization(conv2, training = is_training)\n bn2 = tf.contrib.layers.batch_norm(conv2,is_training=is_training)\n# bn2 = batch_norm(conv2, hidden2_units, is_training, \"bn2\")\n hidden2 = tf.nn.relu(bn2)\n # Linear\n with tf.name_scope('softmax_linear'):\n weights = tf.Variable(\n tf.truncated_normal([hidden2_units, NUM_CLASSES],\n stddev=1.0 / math.sqrt(float(hidden2_units))),\n name='weights')\n biases = tf.Variable(tf.zeros([NUM_CLASSES]),\n name='biases')\n logits = tf.matmul(hidden2, weights) + biases\n return logits", "def build(data_shape_1, data_shape_2):\n # create NN model \n # design network\n \n inputs = keras.Input(shape=(data_shape_1, data_shape_2), name='inp')\n cnn1 = layers.Conv1D(16, 5, activation='relu')(inputs)\n cnn2 = layers.Conv1D(32, 3, activation='relu')(cnn1)\n cnn3 = layers.Conv1D(64, 3, activation='relu')(cnn2)\n cnn3 = layers.Flatten()(cnn3)\n lstm = layers.LSTM(100,return_sequences = True, activation='relu')(inputs)\n lstm = layers.Flatten()(lstm)\n x = layers.concatenate([cnn3,lstm])\n x = layers.Dense(100, activation='sigmoid')(x)\n outputs = layers.Dense(24)(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')\n \n return model", "def feedforward(self,inputs,hidden_activation=tanh,output_activation=tanh):\n\n # These two lists will contain the inputs and the outputs for each layer, respectively\n self.netIns = []\n self.netOuts = []\n\n input_samples=inputs.shape[0]\n\n #Currently, this will cause a crash when the network was created without bias nodes\n I = np.concatenate((inputs,np.ones((input_samples,1))),axis=1) # adds the bias input of 1\n self.netOuts.append(I) # keeping track of the outputs of every layer\n\n #The input is propagated through the layers\n for idx in range(self.size):\n W = self.weights[idx]\n\n I = np.dot(I,W) #performs the dot product between the input vector and the weight matrix\n self.netIns.append(I) # keeping track of the inputs to each layer\n\n #if we are on the last layer, we use the output activation function\n if idx == self.size -1:\n I = output_activation(I)\n #otherwise, we use the activation for the hidden layers\n else:\n I = hidden_activation(I)\n #I = np.concatenate((I,np.ones((I.shape[0],1))), axis=1)\n self.netOuts.append(I)\n\n #self.out = I\n return I", "def u_net_bn(x, is_train=False, reuse=False, pad='SAME', n_out=3):\n _, nx, ny, nz = x.shape\n print(\" * Input: size of image: (%d %d %d)\" % (nx, ny, nz))\n w_init = tf.truncated_normal_initializer(stddev=0.01)\n b_init = tf.constant_initializer(value=0.0)\n decay = 0.9\n gamma_init=tf.random_normal_initializer(1., 0.02)\n lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)\n with tf.variable_scope(\"u_net_bn\", reuse=reuse):\n inputs = InputLayer(x, name='in')\n\n conv1 = Conv2d(inputs, 64, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv1')\n conv2 = Conv2d(conv1, 128, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv2')\n conv2 = BatchNormLayer(conv2, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn2')\n\n conv3 = Conv2d(conv2, 256, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv3')\n conv3 = BatchNormLayer(conv3, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn3')\n\n conv4 = Conv2d(conv3, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv4')\n conv4 = BatchNormLayer(conv4, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn4')\n\n conv5 = Conv2d(conv4, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv5')\n conv5 = BatchNormLayer(conv5, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn5')\n\n conv6 = Conv2d(conv5, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv6')\n conv6 = BatchNormLayer(conv6, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn6')\n\n conv7 = Conv2d(conv6, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv7')\n conv7 = BatchNormLayer(conv7, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn7')\n\n conv8 = Conv2d(conv7, 512, (4, 4), (2, 2), act=lrelu, padding=pad, W_init=w_init, b_init=b_init, name='conv8')\n print(\" * After conv: %s\" % conv8.outputs)\n\n up7 = DeConv2d(conv8, 512, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv7')\n up7 = BatchNormLayer(up7, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn7')\n\n # print(up6.outputs)\n up6 = ConcatLayer([up7, conv7], concat_dim=3, name='concat6')\n up6 = DeConv2d(up6, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv6')\n up6 = BatchNormLayer(up6, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn6')\n # print(up6.outputs)\n\n up5 = ConcatLayer([up6, conv6], concat_dim=3, name='concat5')\n up5 = DeConv2d(up5, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv5')\n up5 = BatchNormLayer(up5, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn5')\n # print(up5.outputs)\n\n up4 = ConcatLayer([up5, conv5] ,concat_dim=3, name='concat4')\n up4 = DeConv2d(up4, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv4')\n up4 = BatchNormLayer(up4, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn4')\n\n up3 = ConcatLayer([up4, conv4] ,concat_dim=3, name='concat3')\n up3 = DeConv2d(up3, 256, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv3')\n up3 = BatchNormLayer(up3, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn3')\n\n up2 = ConcatLayer([up3, conv3] ,concat_dim=3, name='concat2')\n up2 = DeConv2d(up2, 128, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv2')\n up2 = BatchNormLayer(up2, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn2')\n\n up1 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat1')\n up1 = DeConv2d(up1, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv1')\n up1 = BatchNormLayer(up1, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn1')\n\n up0 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat0')\n up0 = DeConv2d(up0, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv0')\n up0 = BatchNormLayer(up0, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn0')\n # print(up0.outputs)\n\n out = Conv2d(up0, n_out, (1, 1), act=tf.nn.sigmoid, name='out')\n\n print(\" * Output: %s\" % out.outputs)\n\n return out", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader_MultiClass_Loss(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n RANDOM_SEED,\n False, # No random scale.\n False, # No random mirror.\n coord)\n image, l2_catg, binary_catg, hinge_catg = reader.image, reader.l2_catg, reader.binary_catg, reader.hinge_catg\n image_batch = tf.expand_dims(image, dim=0)\n binary_catg_batch = tf.expand_dims(binary_catg, dim=0)\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc1_voc12']\n\n # Do the global average pooling\n raw_output_bcgd_rmvd = raw_output[:,:,:,1:]\n g_avg_pool = tf.reduce_mean(tf.reduce_mean(raw_output_bcgd_rmvd, axis=1, keep_dims=True),\\\n axis=2, keep_dims=True) # Avg across the width and height dimension -> [Bx21]\n g_avg_pool_sqzd = tf.squeeze(g_avg_pool, axis=[1, 2])\n pred = tf.nn.softmax(g_avg_pool_sqzd)\n\n # Get the class activation map\n raw_output_up = tf.image.resize_bilinear(raw_output_bcgd_rmvd, tf.shape(image_batch)[1:3,])\n raw_output_up = raw_output_up - tf.reduce_min(tf.reduce_min(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True) + EPSILON\n raw_output_up = raw_output_up / tf.reduce_max(tf.reduce_max(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True)\n cam_m_1 = tf.argmax(raw_output_up, dimension=3) + 1\n raw_output_catgs_rmvd = raw_output_up * tf.expand_dims(tf.expand_dims(binary_catg_batch, 1), 2)\n cam_m_2 = tf.argmax(raw_output_catgs_rmvd, dimension=3) + 1\n cam = tf.cast(tf.equal(cam_m_1, cam_m_2), tf.int64) * cam_m_1\n\n cam_batch = tf.expand_dims(cam, dim=3)\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n \n # Iterate over training steps.\n for step in range(args.num_steps):\n preds, images, cams, bin_catg = sess.run([pred, image_batch, cam_batch, binary_catg])\n \"\"\"\n print(bin_catg)\n print(np.unique(np.unique(cams)))\n \"\"\"\n img = inv_preprocess(images)\n attMap = decode_labels(cams)\n output_dir = './output_maps_binary_without_norm/'\n img_name = output_dir + str(step) + '.jpg'\n map_name = output_dir + str(step) + '.png'\n misc.imsave(img_name, img[0,:,:,:])\n misc.imsave(map_name, attMap[0,:,:,:])\n coord.request_stop()\n coord.join(threads)", "def ResNet(images, device):\n blocksPerSection = [2, 2, 2, 2]\n channelsPerSection = [64, 128, 256, 512]\n channelsPerBlock = [1, 1]\n downsampleSection = [0, 1, 1, 1]\n\n\n x = images\n channelsOut = 64\n\n with tf.device(device):\n\n x = ConvBlock(x, 64, [7,7], 2, '_init')\n x = slim.max_pool2d(x, [3, 3], stride=2, scope='pool_1')\n\n for s in range(len(blocksPerSection)):\n for l in range(blocksPerSection[s]):\n\n # Stride at the beginning of each block\n stride = 1\n if l == 0 and downsampleSection[s]:\n stride = 2\n\n sumInput = x\n\n # 2 conv only\n x = ConvBlock(x, channelsPerSection[s]*channelsPerBlock[1], [3, 3], stride, '%d_1_%d'%(s,l))\n x = ConvBlock(x, channelsPerSection[s]*channelsPerBlock[1], [3, 3], 1, '%d_2_%d'%(s,l), False)\n\n if l == 0 and channelsOut != channelsPerSection[s]*channelsPerBlock[1]:\n sumInput = ConvBlock(sumInput, channelsPerSection[s]*channelsPerBlock[1], [1,1], stride, '_sum%d'%(s), False)\n\n channelsOut = channelsPerSection[s]*channelsPerBlock[1]\n x = sumInput + x\n x = tf.nn.relu(x)\n\n with slim.arg_scope([slim.fully_connected],\n activation_fn=None,\n weights_regularizer=slim.l2_regularizer(0.0005),\n biases_regularizer=slim.l2_regularizer(0.0005),\n trainable=True):\n x = tf.reduce_mean(x, [1,2])\n softmax_linear = slim.fully_connected(x, NUM_CLASSES, scope='fc_1')\n\n return softmax_linear", "def _nn_train_net(structure, training_set, optimizer, epochs, batch_size, verbose=1):\n\n # Preallocate output variables\n net=[]\n loss_stat=[]\n\n # Collect training time statistics\n time_start = datetime.datetime.now()\n\n # Convert input data to be in float 32 (compression)\n # Note: the conversion makes information loss\n norm_data=training_set.copy().astype(np.float32)\n\n # Calculate input data statistics\n mu=np.mean(norm_data[:,0])\n sig=np.std(norm_data[:,0])\n if sig==0: sig=1\n norm_data[:,0]=(norm_data[:,0]-mu)/sig\n\n #Calculate the output factor of the net\n min_out=np.min(norm_data[:,-1])\n max_out=np.max(norm_data[:,-1])\n output_factor=(max_out-min_out)\n if output_factor == 0: output_factor=1\n # Normalize the net output to be in [0, 1]\n norm_data[:,-1]=(norm_data[:,-1]-min_out)/output_factor\n\n # New TF graph\n with tf.Graph().as_default():\n # Request initializer for net structure\n values=_nn_initial_values(structure)\n # Build net\n net_variables=_nn_create(values)\n data_in=net_variables[0]\n data_out=net_variables[-1]\n # Build optimizer\n desired_out=tf.placeholder(dtype=tf.float32, shape=[None, structure[0]])\n loss=tf.losses.mean_squared_error(data_out, desired_out)\n opt=optimizer.minimize(loss)\n\n # Perform training\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n avg_cost=0.0\n total_batch=np.ceil(norm_data.shape[0]/batch_size).astype(int)\n\n # Split samples to batches\n batches = np.array_split(norm_data, total_batch)\n\n # For each batch\n for i in range(total_batch):\n _, c = sess.run([opt, loss],\n feed_dict={\n data_in: batches[i][:, 0:1],\n desired_out: batches[i][:, 1:2]\n })\n\n avg_cost += c / total_batch\n\n # Update statistics\n loss_stat.append(avg_cost)\n _log(verbose, 'Epoch %04d/%d: cost=%.9f' % (epoch+1, epochs, avg_cost), epoch>0)\n\n # Get the trained net's internal values\n for v in net_variables[1:-1]:\n net.append(sess.run(v))\n\n # Measure net training time\n time_end = datetime.datetime.now()\n time_diff = (time_end - time_start).total_seconds()\n\n _log(verbose, ' (mu: %.3f, sig: %.3f, fac: %.3f, omin: %.3f, training time: %.3f sec) \\n' % (mu, sig, output_factor, min_out, time_diff))\n\n # Return all data\n return ( (loss_stat), (mu, sig, output_factor, min_out, net) )", "def model(inputs, is_training):\n\n #for performance on GPUs using cudnn\n if data_format == 'channels_first':\n inputs = tf.transpose(inputs, [0,3,1,2])\n\n \n inputs = conv2d_fixed_padding(inputs = inputs, filters = 16, kernel_size = 3, strides = 1, data_format = data_format)\n inputs = tf.identity(inputs, 'initial_conv')\n\n inputs = block_layer(inputs = inputs, filters = 16, block_fn = building_block, blcoks = layers[0], strides = 1, is_training = is_training,\n name = 'block_layer1', data_format = data_format)\n\n inputs = block_layer(inputs = inputs, filters = 32, block_fn = building_block, blocks = layers[1], strides = 1, is_training = is_training,\n name = 'block_layer2', data_format = data_format)\n\n inputs = block_layer(inputs = inputs, filters = 64, block_fn = building_block, blocks = layers[2], strides = 1, is_training = is_training,\n name = 'block_layer3', data_format = data_format)\n\n inputs = block_layer(inputs = inputs, filters = 128, block_fn = building_block, blcoks = layers[3], strides = 1, is_training = is_training,\n name = 'block_layer4', data_format = data_format)\n\n inputs = batch_norm_relu(inputs, is_training, data_format)\n\n inputs = tf.layers.average_pooling2d(inputs = inputs, pool_size = 2, strides = 1, padding = 'VALID', data_format = data_format)\n\n inputs = tf.identity(inputs, 'final_avg_pool')\n\n inputs = tf.reshape(inputs, [-1, 128])\n\n inputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n inputs = tf.identity(inputs, 'final_dense')\n\n return inputs", "def __init__(self, input_size, hidden_sizes, output_size=1,\n batchnorm_bool=False,\n dropout_bool=False):\n super(NeuralNet, self).__init__()\n self.input_size = input_size\n sizes = [input_size] + hidden_sizes + [output_size]\n self.layers = nn.ModuleList(\n [nn.Linear(in_f, out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.bns = nn.ModuleList(\n [nn.BatchNorm1d(out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.dps = nn.ModuleList(\n [nn.Dropout(p=0.5) for _ in range(len(self.layers))])\n self.relus = nn.ModuleList(\n [nn.ReLU() for _ in range(len(self.layers))])\n self.sigmoid = nn.Sigmoid()\n\n self.batchnorm_bool = batchnorm_bool\n self.dropout_bool = dropout_bool", "def create_base_networks(in_dims):\n model = Sequential()\n model.add(Dense(300, input_dim=in_dims))\n model.add(Activation(\"tanh\"))\n model.add(Dropout(0.1))\n model.add(Dense(300, kernel_initializer='normal', activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Dense(300, kernel_initializer='normal', activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Dense(10, kernel_initializer='normal', activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Activation(\"sigmoid\"))\n # model.add(Dense(600))\n\n return model", "def LeNet(img_size=224, channels=1, output_dim=1):\n if type(output_dim) is not int:\n raise ValueError('[ERROR] Output dimensions need to be an integer')\n if type(channels) is not int:\n raise ValueError('[ERROR] Channels needs to be an integer')\n\n # Initialize the Model\n model = Sequential()\n input_shape = (img_size,img_size,channels)\n\n # If 'channels first', update the input_shape\n if backend.image_data_format() == 'channels_first':\n input_shape = (channels, img_size,img_size)\n \n # First set\n model.add(Conv2D(32, (3,3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n # Second set\n model.add(Conv2D(64, (3,3), padding='same'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n # Flattening\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation('relu'))\n\n model.add(Dense(output_dim, activation=\"relu\")) # Softmax works too if multiple Dense nodes required\n\n return model", "def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))", "def __init__(self,\n image_shape,\n z_dim,\n num_blocks,\n action_space,\n hiddens=[],\n dropout=False,\n subsampling=True):\n super().__init__()\n self.layers = nn.ModuleList()\n self.layers.append(\n ImageInputNetwork(image_shape, z_dim, num_blocks, dropout,\n subsampling))\n self.layers.append(ActorNet(action_space, z_dim, hiddens))", "def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2, num_code_units, filter_size, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n print(\"Input shape: \",lasagne.layers.get_output_shape(l_in))\n\n # print(shaped_units)\n # shaped_units = shaped_units[0]\n shaped_units = 2800\n\n # print(shape)\n\n l_conv2D_1 = lasagne.layers.Conv2DLayer(\n l_in, \n num_filters=8,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n print(\"Conv 2D shape: \",lasagne.layers.get_output_shape(l_conv2D_1))\n\n l_reshape_1 = lasagne.layers.ReshapeLayer(\n l_conv2D_1,\n shape=(([0], -1))\n )\n\n print(\"Reshape 1 shape: \", lasagne.layers.get_output_shape(l_reshape_1))\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_reshape_1,\n num_units= num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 1 shape: \", lasagne.layers.get_output_shape(l_hidden_1))\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Code layer shape: \",lasagne.layers.get_output_shape(l_code_layer))\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 2 shape: \",lasagne.layers.get_output_shape(l_hidden_2))\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=shaped_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 3 shape: \",lasagne.layers.get_output_shape(l_hidden_3))\n\n l_reshape_2 = lasagne.layers.ReshapeLayer(\n l_hidden_3,\n shape=(([0],8,7,50))\n )\n\n print(\"Reshape 2 shape: \",lasagne.layers.get_output_shape(l_reshape_2))\n\n l_out = lasagne.layers.Conv2DLayer(\n l_reshape_2, \n num_filters=1,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n # print(\"Deconv shape: \",lasagne.layers.get_output_shape(l_deconv2D_1))\n\n print(\"Output shape: \",lasagne.layers.get_output_shape(l_out))\n\n return l_out", "def LeNet5(input_shape=None):\n input_data = tf.keras.layers.Input(shape=input_shape)\n # First block\n conv1 = tf.keras.layers.Conv2D(\n 6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_uniform')(input_data)\n maxpool1 = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(conv1)\n\n # Second block\n conv2 = tf.keras.layers.Conv2D(16, (5, 5), padding='valid',\n activation='relu', kernel_initializer='he_uniform')(maxpool1)\n maxpool2 = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(conv2)\n\n # Third block\n flatten = tf.keras.layers.Flatten()(maxpool2)\n dense1 = tf.keras.layers.Dense(400, activation='relu',\n kernel_initializer='he_uniform')(flatten)\n dense2 = tf.keras.layers.Dense(120, activation='relu',\n kernel_initializer='he_uniform')(dense1)\n dense3 = tf.keras.layers.Dense(84, activation='relu',\n kernel_initializer='he_uniform')(dense2)\n\n # Output\n dense4 = tf.keras.layers.Dense(10, activation='softmax')(dense3)\n\n model = tf.keras.models.Model(inputs=input_data, outputs=dense4)\n\n return model", "def forward(self, batch):\n # Convolutional layers\n batch = self.conv1(batch)\n batch = F.relu(batch)\n batch = self.pool(batch)\n batch = self.conv2(batch)\n batch = F.relu(batch)\n batch = self.pool(batch)\n # Flatten\n batch = batch.reshape(batch.shape[0], -1)\n # Fully connected layers\n batch = self.fc1(batch)\n batch = self.dropout(batch)\n batch = self.fc2(batch)\n batch = torch.sigmoid(batch)\n return batch", "def setup_ff_network(in_dim,\n out_dim,\n num_layers,\n num_neurons):\n activations = [Rectifier()]\n dims = [in_dim]\n\n for i in xrange(num_layers):\n activations.append(Rectifier())\n dims.append(num_neurons)\n\n dims.append(out_dim)\n\n net = MLP(activations=activations,\n dims=dims,\n weights_init=IsotropicGaussian(),\n biases_init=Constant(0.01))\n\n return net", "def neural_net_image_input(image_shape):\n return tf.placeholder(tf.float32, shape=[None]+list(image_shape), name=\"x\")", "def WideResNet(input_shape,\n nb_output_nodes,\n output_activation):\n\n # At some point turn these back into input parameters\n N=2\n k=2\n dropout=0.0\n \n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n\n ip = Input(shape=input_shape)\n\n x = initial_conv(ip)\n x = expand_conv(x, 16, k)\n\n for i in range(N - 1):\n x = conv1_block(x, k, dropout)\n\n x = BatchNormalization(axis=channel_axis, momentum=0.1,\n epsilon=1e-5, gamma_initializer='uniform')(x)\n x = Activation('relu')(x)\n\n x = expand_conv(x, 32, k, strides=(2, 2))\n\n for i in range(N - 1):\n x = conv2_block(x, k, dropout)\n\n\n x = BatchNormalization(axis=channel_axis, momentum=0.1,\n epsilon=1e-5, gamma_initializer='uniform')(x)\n x = Activation('relu')(x)\n\n x = expand_conv(x, 64, k, strides=(2, 2))\n\n\n for i in range(N - 1):\n x = conv3_block(x, k, dropout)\n\n\n x = BatchNormalization(axis=channel_axis, momentum=0.1,\n epsilon=1e-5, gamma_initializer='uniform')(x)\n x = Activation('relu')(x)\n\n x = AveragePooling2D((8, 8))(x)\n x = Flatten()(x)\n\n #x = Dense(nb_output_nodes, kernel_regularizer=l2(weight_decay),\n # activation=output_activation)(x)\n\n x = Dense(nb_output_nodes, kernel_regularizer=l2(weight_decay))(x)\n x = Activation(output_activation)(x)\n\n model = Model(ip, x)\n return model", "def init_three_layer_neuralnet(weight_scale=1, bias_scale=0, input_feat_dim=786,\n num_classes=10, num_neurons=(20, 30)):\n \n assert len(num_neurons) == 2, 'You must provide number of neurons for two layers...'\n\n model = {}\n #model['W1'] = np.random.randn((num_neurons[0],(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)) # Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n \n model['W1'] = (np.random.rand(input_feat_dim,num_neurons[0])*weight_scale) * math.sqrt(2.0/input_feat_dim)\n model['b1'] = np.zeros(num_neurons[0])# Initialize with zeros\n \n #model['W2'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n #print ((model['W1'])[0,:]).shape\n #numcols = len(input[0])\n t=len((model['W1'])[0])\n #print t\n model['W2'] = (np.random.rand(num_neurons[0],num_neurons[1])*weight_scale) * math.sqrt(2.0/t)\n model['b2'] = np.zeros(num_neurons[1])# Initialize with zeros\n\n t=len((model['W2'])[0])\n #model['W3'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n model['W3'] = (np.random.rand(num_neurons[1],num_classes)*weight_scale) * math.sqrt(2.0/t)\n model['b3'] = np.zeros(num_classes)# Initialize with zeros\n\n return model", "def Network_model(input_data):\n layer1_param={'weights':tf.Variable(tf.random_normal([784, no_neurons_layer1])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer1]))}\n \n layer2_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer1, no_neurons_layer2])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer2]))}\n \n layer3_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer2, no_neurons_layer3])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer3]))}\n \n layer4_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer3, no_neurons_layer4])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer4]))}\n \n output_layer_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer4, no_classes])), \n 'biases': tf.Variable(tf.random_normal([no_classes]))}\n \n #so uptill now the weights for each layer is initialized\n \n \"\"\"\n Now what will happened in each layer, I will define next. basically the weights are multiplied\n in each layer with the corresponding inputs and then it is passed through activation function \n (relu in this case) and the output is given as input to the other layer.\n sign:B-Jan\n \"\"\"\n \n l1_output= tf.add(tf.matmul(input_data,layer1_param['weights']), layer1_param['biases'])\n l1_output=tf.nn.relu(l1_output)\n \n l2_output= tf.add(tf.matmul(l1_output,layer2_param['weights']), layer2_param['biases'])\n l2_output=tf.nn.relu(l2_output)\n \n \n l3_output= tf.add(tf.matmul(l2_output,layer3_param['weights']), layer3_param['biases'])\n l3_output=tf.nn.relu(l3_output)\n \n l4_output= tf.add(tf.matmul(l3_output,layer4_param['weights']), layer4_param['biases'])\n l4_output=tf.nn.relu(l4_output)\n \n #The final output Layer\n output= tf.matmul(l4_output, output_layer_param['weights'])+output_layer_param['biases']\n \n return output # contains the output of the last output layer", "def AlexNet_ImageNet(sobel, batch_normalization, device, concat_sobel=False):\n n_input_channels = 2 + int(not sobel) if not concat_sobel else 5\n \n alexnet_features_cfg = [\n {\n \"type\": \"convolution\",\n \"out_channels\":96,\n \"kernel_size\":11,\n \"stride\":4,\n \"padding\":2,\n \"activation\":\"ReLU\",\n },\n\n {\n \"type\":\"max_pool\",\n \"kernel_size\":3,\n \"stride\":2,\n },\n\n {\n \"type\": \"convolution\",\n \"out_channels\":256,\n \"kernel_size\":5,\n \"stride\":1,\n \"padding\":2,\n \"activation\":\"ReLU\",\n },\n\n {\n \"type\":\"max_pool\",\n \"kernel_size\":3,\n \"stride\":2,\n },\n\n {\n \"type\": \"convolution\",\n \"out_channels\":384,\n \"kernel_size\":3,\n \"stride\":1,\n \"padding\":1,\n \"activation\":\"ReLU\",\n },\n\n {\n \"type\": \"convolution\",\n \"out_channels\":384,\n \"kernel_size\":3,\n \"stride\":1,\n \"padding\":1,\n \"activation\":\"ReLU\",\n },\n\n {\n \"type\": \"convolution\",\n \"out_channels\":256,\n \"kernel_size\":3,\n \"stride\":1,\n \"padding\":1,\n \"activation\":\"ReLU\",\n },\n\n {\n \"type\":\"max_pool\",\n \"kernel_size\":3,\n \"stride\":2,\n } \n ]\n\n classifier_cfg = [\n {\"type\":\"drop_out\",\n \"drop_ratio\": 0.5},\n\n {\"type\":\"linear\",\n \"out_features\":4096,\n \"activation\":\"ReLU\"},\n\n {\"type\":\"drop_out\",\n \"drop_ratio\": 0.5},\n\n {\"type\":\"linear\",\n \"out_features\":4096}\n ]\n\n model = DeepClusteringNet(input_size=(3,224,224),\n features= stack_convolutional_layers(input_channels= n_input_channels, cfg=alexnet_features_cfg, batch_normalization=batch_normalization),\n classifier= stack_linear_layers(input_features= 256 * 6 * 6, cfg= classifier_cfg),\n top_layer = None,\n with_sobel=sobel,\n device=device)\n return model", "def UNet(input_size=(256, 256, 1)):\n inputs = Input(input_size)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(inputs)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c1)\n p1 = MaxPooling2D((2, 2))(c1)\n\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p1)\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c2)\n p2 = MaxPooling2D((2, 2))(c2)\n\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p2)\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c3)\n p3 = MaxPooling2D((2, 2))(c3)\n\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p3)\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c4)\n p4 = MaxPooling2D(pool_size=(2, 2))(c4)\n\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p4)\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c5)\n\n u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u6)\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c6)\n\n u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u7)\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c7)\n\n u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u8)\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c8)\n\n u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u9)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c9)\n\n outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)\n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=['accuracy', dice_coef])\n return model", "def __init__(self, num_1d=None):\n super(Net, self).__init__()\n\n self.lconv1 = nn.Sequential(\n nn.Conv1d(4, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n )\n\n self.lconv2 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(64, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n )\n\n self.lconv3 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(96, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv4 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv5 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv6 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv7 = nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv7 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )\n if num_1d is not None:\n self.final_1d = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=1, padding=0),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, num_1d, kernel_size=1, padding=0),\n nn.Sigmoid(),\n )\n self.num_1d = num_1d", "def create_vision_transformer_model_2d(input_image_size,\n number_of_classification_labels=1000,\n mode='classification',\n patch_size=6,\n number_of_transformer_layers=8,\n transformer_units=[128, 64],\n projection_dimension=64,\n number_of_attention_heads=4,\n mlp_head_units=[2048, 1024],\n dropout_rate=0.5):\n\n inputs = Input(shape=input_image_size)\n\n patches = ExtractPatches2D(patch_size)(inputs)\n number_of_patches = ((input_image_size[0] * input_image_size[1]) // (patch_size ** 2))\n encoded_patches = EncodePatches(number_of_patches,\n projection_dimension)(patches)\n\n for _ in range(number_of_transformer_layers):\n\n x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)\n\n attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,\n key_dim=projection_dimension,\n dropout=dropout_rate/5.0)(x1, x1)\n x2 = Add()([attention_output, encoded_patches])\n x3 = LayerNormalization(epsilon=1e-6)(x2)\n x3 = multilayer_perceptron(x3,\n hidden_units=transformer_units,\n dropout_rate=dropout_rate/5.0)\n encoded_patches = Add()([x3, x2])\n\n representation = LayerNormalization(epsilon=1e-6)(encoded_patches)\n representation = Flatten()(representation)\n representation = Dropout(dropout_rate)(representation)\n\n features = multilayer_perceptron(representation,\n hidden_units=mlp_head_units,\n dropout_rate=dropout_rate)\n\n layer_activation = ''\n if mode == 'classification':\n layer_activation = 'softmax'\n elif mode == 'regression':\n layer_activation = 'linear'\n elif mode == 'sigmoid':\n layer_activation = 'sigmoid'\n else:\n raise ValueError('mode must be either `classification` or `regression`.')\n\n outputs = Dense(number_of_classification_labels,\n activation=layer_activation)(features)\n\n model = Model(inputs=inputs, outputs=outputs)\n\n return model", "def LeNet(input_shape=(28, 28, 1), classes=10):\n model = Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(classes, activation='softmax'))\n return model", "def forward(self, inputs, inputs1):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n # Second Modality\n\n down11, indices_11, unpool_shape11 = self.layer_11(inputs=inputs,\n layer_size=2)\n down12, indices_12, unpool_shape12 = self.layer_12(inputs=down1,\n layer_size=2)\n down13, indices_13, unpool_shape13 = self.layer_13(inputs=down2,\n layer_size=3)\n down14, indices_14, unpool_shape14 = self.layer_14(inputs=down3,\n layer_size=3)\n down15, indices_15, unpool_shape15 = self.layer_15(inputs=down4,\n layer_size=3)\n\n up15 = self.layer_16(inputs=down15, indices=indices_15,\n output_shape=unpool_shape15, layer_size=3)\n up14 = self.layer_17(inputs=up15, indices=indices_14,\n output_shape=unpool_shape4, layer_size=3)\n up13 = self.layer_18(inputs=up14, indices=indices_13,\n output_shape=unpool_shape13, layer_size=3)\n up12 = self.layer_19(inputs=up13, indices=indices_12,\n output_shape=unpool_shape12, layer_size=2)\n output1 = self.layer_110(inputs=up12, indices=indices_11,\n output_shape=unpool_shape11, layer_size=2)\n\n # End Pipe\n\n Concat = torch.cat((output, output1), 1)\n\n finalout = self.layer_1110(Concat)\n\n return finalout", "def forward_pass_unet(images, phase_train):\n\n K = 4\n images = tf.expand_dims(images, -1)\n\n # Network blocks\n conv1 = sdn.convolution('Conv1', images, 3, K, 1, phase_train=phase_train)\n down = sdn.convolution('Down128', conv1, 2, K*2, 2, phase_train=phase_train)\n\n conv2 = sdn.convolution('Conv2', down, 3, K*2, 1, phase_train=phase_train)\n conv2 = sdn.residual_layer('Conv2b', conv2, 3, K*2, 1, phase_train=phase_train)\n down = sdn.convolution('Down64', conv2, 2, K*4, 2, phase_train=phase_train)\n\n conv3 = sdn.residual_layer('Conv3', down, 3, K*4, 1, phase_train=phase_train)\n conv3 = sdn.residual_layer('Conv3b', conv3, 3, K*4, 1, phase_train=phase_train)\n down = sdn.convolution('Down32', conv3, 2, K*8, 2, phase_train=phase_train) # Now 32x32\n\n conv4 = sdn.residual_layer('Conv4', down, 3, K*8, 1, phase_train=phase_train)\n conv4 = sdn.residual_layer('Conv4b', conv4, 3, K*8, 1, phase_train=phase_train)\n down = sdn.convolution('Down16', conv4, 2, K*16, 2, phase_train=phase_train)\n\n conv5 = sdn.inception_layer('Conv5', down, K*16, 1, phase_train=phase_train)\n conv5 = sdn.inception_layer('Conv5b', conv5, K*16, 1, phase_train=phase_train)\n down = sdn.convolution('Down8', conv5, 2, K*32, 2, phase_train=phase_train)\n\n conv6 = sdn.inception_layer('Conv6', down, K*32, phase_train=phase_train)\n conv6 = sdn.inception_layer('Conv6b', conv6, K*32, phase_train=phase_train)\n down = sdn.convolution('Down4', conv6, 2, K*64, 2, phase_train=phase_train)\n\n # Bottom of the decoder: 4x4\n conv7 = sdn.inception_layer('Bottom1', down, K*64, phase_train=phase_train)\n conv7 = sdn.residual_layer('Bottom2', conv7, 3, K*64, 1, dropout=FLAGS.dropout_factor, phase_train=phase_train)\n conv7 = sdn.inception_layer('Bottom2', conv7, K*64, phase_train=phase_train)\n\n # Upsample 1\n dconv = sdn.deconvolution('Dconv1', conv7, 2, K*32, S=2, phase_train=phase_train, concat=False, concat_var=conv6, out_shape=[FLAGS.batch_size, 8, 8, K*32])\n dconv = sdn.inception_layer('Dconv1b', dconv, K*32, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv2', dconv, 2, K*16, S=2, phase_train=phase_train, concat=False, concat_var=conv5, out_shape=[FLAGS.batch_size, 16, 16, K*16])\n dconv = sdn.inception_layer('Dconv2b', dconv, K*16, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv3', dconv, 2, K*8, S=2, phase_train=phase_train, concat=False, concat_var=conv4, out_shape=[FLAGS.batch_size, 32, 32, K*8])\n dconv = sdn.inception_layer('Dconv3b', dconv, K*8, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv4', dconv, 2, K*4, S=2, phase_train=phase_train, concat=False, concat_var=conv3, out_shape=[FLAGS.batch_size, 64, 64, K*4])\n dconv = sdn.residual_layer('Dconv4b', dconv, 3, K*4, S=1, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv5', dconv, 2, K*2, S=2, phase_train=phase_train, concat=False, concat_var=conv2, out_shape=[FLAGS.batch_size, 128, 128, K*2])\n dconv = sdn.residual_layer('Dconv5b', dconv, 3, K*2, S=1, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv6', dconv, 2, K, S=2, phase_train=phase_train, concat=False, concat_var=conv1, out_shape=[FLAGS.batch_size, 256, 256, K])\n dconv = sdn.convolution('Dconv6b', dconv, 3, K, S=1, phase_train=phase_train, dropout=FLAGS.dropout_factor)\n\n # Output is a 1x1 box with 3 labels\n Logits = sdn.convolution('Logits', dconv, 1, FLAGS.num_classes, S=1, phase_train=phase_train, BN=False, relu=False, bias=False)\n\n return Logits, sdn.calc_L2_Loss(FLAGS.l2_gamma)", "def __init__(\n self,\n numpy_rng,\n train_set_x,\n train_set_y,\n hidden_layers_sizes,\n n_ins=784,\n n_outs=10\n ):\n\n self.sigmoid_layers = []\n self.AE_layers = []\n self.params = []\n self.n_layers = len(hidden_layers_sizes)\n self.train_set_x = train_set_x\n self.train_set_y = train_set_y\n\n assert self.n_layers > 0\n\n self.x = T.matrix('x') # the data is presented as rasterized images\n self.y = T.ivector('y') # the labels are presented as 1D vector of\n\n for i in xrange(self.n_layers): # used to be n layers\n\n # construct the sigmoid layer = encoder stack\n if i == 0:\n layer_input = self.x\n else:\n layer_input = self.sigmoid_layers[-1].output\n\n sigmoid_layer = HiddenLayer(rng=numpy_rng,\n input=layer_input,\n n_in=(n_ins if i == 0 else\n hidden_layers_sizes[i-1]),\n n_out=hidden_layers_sizes[i],\n activation=T.nnet.sigmoid)\n\n # add the layer to our list of layers\n self.sigmoid_layers.append(sigmoid_layer)\n self.params.extend(sigmoid_layer.params)\n\n # init the DA_layer, takes weights from sigmoid layer\n AE_layer = AutoEncoder(\n numpy_rng=numpy_rng,\n input=layer_input,\n n_visible=(n_ins if i == 0 else hidden_layers_sizes[i-1]),\n n_hidden=hidden_layers_sizes[i],\n W=sigmoid_layer.W,\n bhid=sigmoid_layer.b)\n\n self.AE_layers.append(AE_layer)\n\n # on top of the layers\n # log layer for fine-tuning\n self.logLayer = LogisticRegression(\n input=self.sigmoid_layers[-1].output,\n n_in=hidden_layers_sizes[-1],\n n_out=n_outs\n )\n self.params.extend(self.logLayer.params)\n self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)\n self.errors = self.logLayer.errors(self.y)" ]
[ "0.7127509", "0.6960329", "0.6916975", "0.67752737", "0.6708114", "0.66940296", "0.66714424", "0.6586228", "0.6581007", "0.6574923", "0.6569222", "0.6543639", "0.6534254", "0.65283036", "0.64970976", "0.6490959", "0.6476524", "0.6472872", "0.6470503", "0.64555347", "0.64446694", "0.6432964", "0.6430325", "0.64266366", "0.6420732", "0.6419453", "0.6398648", "0.63969404", "0.6396145", "0.6385163", "0.6379969", "0.63785505", "0.63766646", "0.6369808", "0.63696533", "0.63655645", "0.6358101", "0.63512516", "0.6341734", "0.6338745", "0.6337854", "0.6337649", "0.6331491", "0.63304865", "0.6327708", "0.63253987", "0.6320476", "0.63172674", "0.6314293", "0.62975734", "0.62966996", "0.629591", "0.627033", "0.6270166", "0.626979", "0.62587535", "0.6258501", "0.625808", "0.6247757", "0.62439346", "0.62328935", "0.62303096", "0.62302023", "0.62238675", "0.62224483", "0.6221732", "0.62171763", "0.6215132", "0.62135047", "0.6213008", "0.6212162", "0.6207171", "0.6194059", "0.619402", "0.6193177", "0.61890084", "0.6182832", "0.61810964", "0.61799914", "0.6177524", "0.6175946", "0.6173139", "0.6171946", "0.61657983", "0.616085", "0.61587137", "0.6154344", "0.6154236", "0.61530185", "0.61527175", "0.6151323", "0.6149572", "0.6146792", "0.6146289", "0.61401194", "0.6136234", "0.6135718", "0.6134829", "0.61324555", "0.61318517", "0.6129083" ]
0.0
-1
Computes the height and width of the output of a convolution layer.
def conv_output_shape( h_w: Tuple[int, int], kernel_size: int = 1, stride: int = 1, pad: int = 0, dilation: int = 1, ): h = floor( ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1 ) w = floor( ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1 ) return h, w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = floor(((h_w[0] + (2 * pad) - (dilation *\n (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad) - (dilation *\n (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = math.floor( ((h_w[0] + (2 * pad) - ( dilation * (kernel_size[0] - 1)\n ) - 1 )/ stride) + 1)\n w = math.floor( ((h_w[1] + (2 * pad) - ( dilation * (kernel_size[1] - 1)\n ) - 1 )/ stride) + 1)\n return h, w", "def convolution_shape(input_shape, n_filters, filter_shape, stride, padding):\n img_height, img_width, _ = input_shape\n height = (img_height + 2 * padding[0] - filter_shape[0]) / float(stride) + 1\n width = (img_width + 2 * padding[1] - filter_shape[1]) / float(stride) + 1\n\n return int(height), int(width), n_filters", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w", "def _output_size_conv2d(conv, size):\n o_size = np.array(size) + 2 * np.array(conv.padding)\n o_size -= np.array(conv.dilation) * (np.array(conv.kernel_size) - 1)\n o_size -= 1\n o_size = o_size / np.array(conv.stride) + 1\n return np.floor(o_size)", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def conv_dims(self):\n img_w = np.shape(self.image)[0]\n img_h = np.shape(self.image)[1]\n \n x = (img_w - self.size) // self.stride\n y = (img_h - self.size) // self.stride\n \n return x, y", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def stride_height(self):\n\t\treturn self.strides_shape_param('H')", "def get_img_output_length(width, height):\n def get_output_length(input_length):\n return input_length//16\n\n return get_output_length(width), get_output_length(height)", "def num_conv_locations(input_shape, filter_shape, strides, padding):\n if len(input_shape) != 4 and len(input_shape) != 3:\n raise ValueError(\"input_shape must be length 4, corresponding to a Conv2D,\"\n \" or length 3, corresponding to a Conv1D.\")\n if len(input_shape) != len(filter_shape):\n raise ValueError(\"Inconsistent number of dimensions between input and \"\n \"filter for convolution\")\n\n if strides is None:\n if len(input_shape) == 4:\n strides = [1, 1, 1, 1]\n else:\n strides = [1, 1, 1]\n\n # Use negative integer division to implement 'rounding up'.\n # Formula for convolution shape taken from:\n # http://machinelearninguru.com/computer_vision/basics/convolution/convolution_layer.html\n if len(input_shape) == 3:\n if padding is not None and padding.lower() == \"valid\":\n out_width = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n else:\n out_width = -(-input_shape[1] // strides[1])\n\n return out_width\n else:\n if padding is not None and padding.lower() == \"valid\":\n out_height = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n out_width = -(-(input_shape[2] - filter_shape[1] + 1) // strides[2])\n else:\n out_height = -(-input_shape[1] // strides[1])\n out_width = -(-input_shape[2] // strides[2])\n\n return out_height * out_width", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def output_width_2d(layer, width):\n assert isinstance(layer, (torch.nn.Conv2d, torch.nn.MaxPool2d))\n padding = _value_at_axis(layer.padding, 1)\n dilation = _value_at_axis(layer.dilation, 1)\n kernel_size = _value_at_axis(layer.kernel_size, 1)\n stride = _value_at_axis(layer.stride, 1)\n return math.floor((width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)", "def output_shape_conv_and_pool_layer(rows: int,\n columns: int,\n kernel: int,\n stride: int = 1,\n padding: int = 0,\n dilatation: float = 1.) -> Tuple[int, int]:\n return (\n int((rows + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n int((columns + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n )", "def output_width(self):\n\t\treturn self.output_shape_param('W')", "def conv(input, inch, outch, filter_h, filter_w, stride_h, stride_w, padding='SAME', name='conv_layer'):\n with tf.name_scope(name) as scope:\n layer = tf.layers.conv2d(input, outch, filter_h, strides=(stride_h, stride_w), padding=\"same\",\n activation=tf.nn.relu)\n return layer", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def output_height(self):\n\t\treturn self.output_shape_param('H')", "def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):\n\tdummy = np.zeros(X_shape)\n\ts, p, d = stride, pad, dilation\n\tif len(X_shape) == 3:\n\t\t_, p = pad1D(dummy, p)\n\t\tpw1, pw2 = p\n\t\tfw, in_ch, out_ch = W_shape\n\t\tn_ex, in_length, in_ch = X_shape\n\n\t\t_fw = fw * (d+1) - d\n\t\tout_length = (in_length + pw1 + pw2 - _fw) // s + 1\n\t\tout_dim = (n_ex, out_length, out_ch)\n\n\telif len(X_shape) == 4:\n\t\t_, p = pad2D(dummy, p)\n\t\tpr1, pr2, pc1, pc2 = p\n\t\tfr, fc, in_ch, out_ch = W_shape\n\t\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t\t# adjust effective filter size to account for dilation\n\t\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\t\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\t\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\t\tout_dims = (n_ex, out_rows, out_cols, out_ch)\n\telse:\n\t\traise ValueError(\"unrecognized number of the input dims: {}\".format(len(X_shape)))", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_dim(self) -> int:\n return 2 * self._hidden_dim", "def output_height_2d(layer, height):\n assert isinstance(layer, (torch.nn.Conv2d, torch.nn.MaxPool2d))\n padding = _value_at_axis(layer.padding, 0)\n dilation = _value_at_axis(layer.dilation, 0)\n kernel_size = _value_at_axis(layer.kernel_size, 0)\n stride = _value_at_axis(layer.stride, 0)\n return math.floor((height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n weight_init=weight, has_bias=False, pad_mode=\"valid\")", "def outputSize(in_size, kernel_size, stride, padding):\n output = int((in_size - kernel_size + 2 * padding) / stride) + 1\n return output", "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n weight_init=weight,\n has_bias=False,\n pad_mode=\"valid\",\n )", "def input_image_size(interpreter):\n _, height, width, channels = interpreter.get_input_details()[0]['shape']\n return width, height, channels", "def conv(n_inputs, n_filters, kernel_size=3, stride=1, bias=False) -> torch.nn.Conv2d:\n return nn.Conv2d(n_inputs, n_filters,\n kernel_size=kernel_size, stride=stride,\n padding=kernel_size//2, bias=bias)", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):\n conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding=\"same\")\n output = conv_layer(conv1d_placeholder)\n output_width = output.axes.find_by_name(\"W\")[0].length\n assert output_width == np.ceil(width / float(stride)), (\"Same convolution output width != \"\n \"ceil(input_width / stride): {} != \"\n \"ceil({} / {})\").format(output_width,\n width,\n stride)", "def conv2d(x, n_filters,\n n_in = 0,\n k_h=5, k_w=5,\n stride_h=2, stride_w=2,\n stddev=0.02,\n activation=lambda x: x,\n bias=False,\n padding='VALID',\n name=\"Conv2D\"):\n with tf.variable_scope(name):\n with tf.name_scope('weights'):\n if(n_in == 0):\n w = tf.get_variable(\n 'w', [k_h, k_w, x.get_shape()[-1], n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n else:\n w = tf.get_variable(\n 'w', [k_h, k_w, n_in, n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n variable_summaries(w, name + '/weights')\n with tf.name_scope('conv'): \n conv = tf.nn.conv2d(\n x, w, strides=[1, stride_h, stride_w, 1], padding=padding)\n if bias:\n with tf.name_scope('biases'):\n b = tf.get_variable(\n 'b', [n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n variable_summaries(b, name + '/bias')\n with tf.name_scope('conv'): \n conv = conv + b\n \n with tf.name_scope('conv'): \n tf.histogram_summary(name + '/conv', conv) \n return conv", "def compute_size(h, w, n):\n\n res = []\n for x in [h, w]:\n for i in range(n):\n x = compute_conv(x, 3, 1, 1)\n x = compute_pool(x)\n res.append(x)\n return res", "def get_dim(self, name):\n \n if name==\"input\":\n return (self.num_channels,)+self.fm_size;\n elif name==\"output\":\n if self.border_mode==\"same\":\n return (self.num_channels,)+self.fm_size;\n else:\n return ((self.num_filters,)+\n conv.ConvOp.getOutputShape(self.fm_size, self.filter_size,\n self.step, self.border_mode));", "def stride_width(self):\n\t\treturn self.strides_shape_param('W')", "def complexity_conv2d(cx, w_in, w_out, k, stride, padding, groups=1, bias=False):\n h, w, flops, params, acts = cx[\"h\"], cx[\"w\"], cx[\"flops\"], cx[\"params\"], cx[\"acts\"]\n h = (h + 2 * padding - k) // stride + 1\n w = (w + 2 * padding - k) // stride + 1\n flops += k * k * w_in * w_out * h * w // groups\n params += k * k * w_in * w_out // groups\n flops += w_out if bias else 0\n params += w_out if bias else 0\n acts += w_out * h * w\n return {\"h\": h, \"w\": w, \"flops\": flops, \"params\": params, \"acts\": acts}", "def output_size(self) -> int:\n return self.output_dim", "def conv(dims, inplanes, outplanes, kernel_size, stride, dilation, bias):\n padding = math.floor((dilation * (kernel_size - 1) + 2 - stride) / 2)\n if dims == 2:\n return nn.Conv2d(inplanes, outplanes, kernel_size, stride,\n padding, dilation, bias=bias)\n elif dims == 3:\n return nn.Conv3d(inplanes, outplanes, kernel_size, stride,\n padding, dilation, bias=bias)\n else:\n raise ValueError('dimension of conv must be 2 or 3')", "def clConvolution(self, size, mask):", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def get_conv_rows_cols(height, width):\n dims = [height, width]\n for i in range(len(dims)):\n # (3, 3) zeropad\n dims[i] += 6\n for filter_size in [7, 3, 1, 1]:\n # all strides use valid padding, formula is (W - F + 2P) / S + 1\n dims[i] = (dims[i] - filter_size) // 2 + 1\n\n return dims", "def conv_out_shape(dims, conv):\n kernel_size, stride, pad, dilation = conv.kernel_size, conv.stride, conv.padding, conv.dilation\n return tuple(int(((dims[i] + (2 * pad[i]) - (dilation[i]*(kernel_size[i]-1))-1)/stride[i])+1) for i in range(len(dims)))", "def Conv2d(X, size, stride, init, name, padding, activation):\n print(name, size, size[-1])\n w = get_weights(shape=size, name='W_' + name, init=init)\n b = get_weights(shape=[size[-1]], name='b_' + name, init=init)\n \n conv = tf.nn.conv2d(X, w, strides=[1, stride, stride, 1], \n padding=padding) + b\n \n ## Applying activation\n\n if activation == 'relu':\n h_conv = tf.nn.relu(conv)\n elif activation == 'sigmoid':\n h_conv = tf.nn.sigmoid(conv)\n elif activation == 'leaky_relu':\n h_conv = tf.nn.leaky_relu(conv)\n \n return h_conv", "def conv2d(x, w, stride=1, b=None):\n h = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding='SAME')\n if b is not None:\n h += b\n return h", "def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\n layers = []\n layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=True)) # bias=False\n if bn:\n layers.append(nn.BatchNorm2d(c_out))\n return nn.Sequential(*layers)", "def conv_to_fc_size(\n input_shape, conv_depth, pools,\n stride=[2, 2, 2], padding='SAME',\n dropout_keep_prob=1.0):\n h, w, d = input_shape\n if padding == 'SAME':\n for i in range(pools):\n h = math.ceil(float(h) / float(stride[0]))\n w = math.ceil(float(w) / float(stride[1]))\n d = math.ceil(float(d) / float(stride[2])) \n else:\n # 'VALID' padding\n pass\n \n return conv_depth * h * w * d", "def _conv_bn_layer(cnn_input, filters, kernel_size, strides, layer_id):\n output = tf.keras.layers.Conv2D(\n filters=filters, kernel_size=kernel_size, strides=strides, padding=\"same\",\n activation=\"linear\", name=\"cnn_{}\".format(layer_id))(cnn_input)\n output = tf.keras.layers.BatchNormalization(\n momentum=_MOMENTUM, epsilon=_EPSILON)(output)\n return output", "def output_dim(self):\n return self._output_dim", "def convnet_layers( inputs, widths, mode ):\n\n training = (mode == \"train\")\n \n with tf.variable_scope( \"convnet\" ): # h,w\n \n #print(inputs.shape)\n x = conv_layer( inputs, layer_params[0], training ) \n #print(x.shape)\n x = conv_layer( x, layer_params[1], training ) \n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool2' )\n #print(x.shape)\n x = conv_layer( x, layer_params[2], training ) \n x = conv_layer( x, layer_params[3], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool4' )\n #print(x.shape)\n x = conv_layer( x, layer_params[4], training ) \n x = conv_layer( x, layer_params[5], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool6') \n #print(x.shape)\n x = conv_layer( x, layer_params[6], training ) \n x = conv_layer( x, layer_params[7], training )\n \n x = tf.layers.max_pooling2d( x, [2, 1], [2, 1], \n padding='valid', \n name='pool8' ) \n\n #print(x.shape)\n\n # squeeze row dim\n x = tf.squeeze( x, axis=1, name='features' )\n\n #print(x.shape)\n\n sequence_length = get_sequence_lengths( widths ) \n\n return x, sequence_length", "def convolve_grayscale_same(images, kernel):\n\n # num images\n n_images = images.shape[0]\n\n # input_width and input_height\n i_h = images.shape[1]\n i_w = images.shape[2]\n\n # kernel_width and kernel_height\n\n k_h = kernel.shape[0]\n k_w = kernel.shape[1]\n\n # pad_h ⊛ = int (k_h - 1)/2\n # pad_w ⊛ = int (k_w - 1)/2\n p_h = int((k_h - 1) / 2)\n p_w = int((k_w - 1) / 2)\n\n if k_h % 2 == 0:\n p_h = int(k_h / 2)\n\n if k_w % 2 == 0:\n p_w = int(k_w / 2)\n\n # output_height and output_width\n # H = i_h + 2pad - k_h + 1, W = i_w + 2pad - k_w + 1\n o_h = i_h + 2 * p_h - k_h + 1\n o_w = i_w + 2 * p_w - k_w + 1\n\n if k_h % 2 == 0:\n o_h = i_h + 2 * p_h - k_h\n\n if k_w % 2 == 0:\n o_w = i_w + 2 * p_w - k_w\n\n # creating outputs of size: n_images, o_h x o_w\n outputs = np.zeros((n_images, o_h, o_w))\n\n # creating pad of zeros around the output images\n padded_imgs = np.pad(images,\n pad_width=((0, 0), (p_h, p_h), (p_w, p_w)),\n mode=\"constant\",\n constant_values=0)\n\n # vectorizing the n_images into an array\n imgs_arr = np.arange(0, n_images)\n\n # iterating over the output array and generating the convolution\n for x in range(o_h):\n for y in range(o_w):\n x1 = x + k_h\n y1 = y + k_w\n outputs[imgs_arr, x, y] = np.sum(np.multiply(\n padded_imgs[imgs_arr, x: x1, y: y1], kernel), axis=(1, 2))\n\n return outputs", "def kernel_height(self):\n\t\treturn self.kernel_shape_param('H')", "def conv2d_config(input_shape, output_shape, filter_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if len(input_shape) == 4:\n batch_size = input_shape[0]\n else:\n batch_size = None\n\n input_shape = np.array(input_shape[-3:])\n output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])\n\n # Determine what kind of convolution to use\n if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):\n conv_type = \"NORMAL\"\n elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):\n conv_type = 'FULL'\n # swap input and output shape\n input_shape, output_shape = output_shape, input_shape\n else:\n raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')\n\n filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])\n stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)\n padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1\n\n # Determine what type of padding can be used\n if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):\n padding_type = 'SAME'\n else:\n padding_type = 'VALID'\n\n # get padded input shape\n input_shape[:2] = input_shape[:2] + padding.astype(np.int)\n padded_shape = [batch_size] + input_shape.tolist()\n\n left_padding = np.ceil(padding / 2).astype(np.int)\n right_padding = np.floor(padding / 2).astype(np.int)\n\n padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]\n stride = [1, stride[0], stride[1], 1]\n\n return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type", "def conv(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1, stride=stride, bias=False)", "def conv_block(\r\n in_channels,\r\n out_channels,\r\n kernel_size,\r\n stride,\r\n dilation=1,\r\n):\r\n pad_mode = 'same'\r\n padding = 0\r\n\r\n dbl = nn.SequentialCell(\r\n [\r\n nn.Conv2d(\r\n in_channels,\r\n out_channels,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n dilation=dilation,\r\n pad_mode=pad_mode,\r\n ),\r\n nn.BatchNorm2d(out_channels, momentum=0.1),\r\n nn.ReLU(),\r\n ]\r\n )\r\n init_cov(dbl[0])\r\n init_bn(dbl[1])\r\n return dbl", "def cnn(channels=(4, 16, 32), kernel_sizes=(8, 4), strides=(4, 2), in_size=84):\n cnn_layers = []\n output_size = in_size\n\n for i in range(len(channels)-1):\n in_channels, out_channels = channels[i], channels[i+1]\n kernel_size, stride = kernel_sizes[i], strides[i]\n conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n activation = nn.ReLU()\n cnn_layers += [conv, activation]\n output_size = (output_size - kernel_size) / stride + 1\n\n cnn_layers = nn.Sequential(*cnn_layers)\n output_size = int(out_channels * (output_size**2))\n return cnn_layers, output_size", "def out_conv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n\n return (spatial + p2 - k)//s + 1", "def conv_transpose_output_shape(h_w, kernel_size=1, stride=1, pad=0, output_padding=0):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = (h_w[0] - 1) * stride - (2 * pad) + kernel_size[0] + output_padding\n w = (h_w[1] - 1) * stride - (2 * pad) + kernel_size[1] + output_padding\n return h, w", "def get_params(img, output_size):\n c, h, w = img.shape\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = (h - th)//2\n j = (w - tw)//2\n return i, j, th, tw", "def _conv(self, indim, outdim, ksize, stride, padding):\n\n return nn.Sequential(\n nn.BatchNorm2d(indim),\n nn.Conv2d(indim, outdim, ksize, stride, padding),\n self.activ(),\n )", "def conv2d(X,W,b,strides=1):\n \"\"\"\n If the padding = 'SAME', the input and output images are of the same size by implementing\n zero padding on the input. (TF will compute using the padding equation from notes 4-12-2018) \n If the padding = 'VALID', the input is not padded and the output image size will be less \n than the input image.\n \"\"\"\n net = tf.nn.conv2d(X,W,strides=[1,strides,strides,1],padding='SAME')\n net = tf.nn.bias_add(net,b) #add bias to each convolved value, but all get the same bias value\n return tf.nn.relu(net) #return the output of the detection layer", "def FCN_grainsize(img_input, bins=22, output_scalar=False):\n\n x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), name='conv1', padding='same')(img_input)\n x = BatchNormalization(axis=3, name='bn_conv1')(x)\n x = Activation('relu')(x)\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(2, 2))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', strides=(1, 1))\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', strides=(2, 2))\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', strides=(1, 1))\n\n x = conv_block(x, 3, [128, 128, 512], stage=4, block='a', strides=(2, 2))\n x = identity_block(x, 3, [128, 128, 512], stage=4, block='b', strides=(1, 1))\n\n if not output_scalar:\n # output (invariant to input size)\n x = Conv2D(filters=bins, kernel_size=(1, 1), strides=(1, 1), name='conv3', padding='same')(x)\n x = GlobalAveragePooling2D()(x)\n x = Activation('softmax', name='histogram_prediction')(x)\n else:\n x = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), name='conv3', padding='same')(x)\n x = GlobalAveragePooling2D()(x)\n\n return Model(inputs=img_input, outputs=x)", "def conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def _conv2d(self, x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def output_dim(self) -> int:\n return (\n self.mlp_hidden_dims[-1]\n if self.mlp_hidden_dims is not None\n else self.blocks_dims[-1]\n )", "def convolution(image, kernel):\n\n #Se encuentra la dimencion de la imagen\n if len(image.shape) == 3: #De 3 dimenciones\n print(\"Dimenciones de imagen: {}\".format(image.shape))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Se cambia a dos dimenciones\n print(\"Nuevas dimenciones: {}\".format(image.shape))\n else:\n print(\"Dimenciones de imagen: {}\".format(image.shape))\n\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n kernel_row, kernel_col = kernel.shape #asigna alto y ancho del filtro\n\n output_x = (image_col - (kernel_col / 2) * 2) + 1 #asigna el ancho del output\n output_y = (image_row - (kernel_row / 2) * 2) + 1 #asigna el alto del output\n \n output = np.zeros([int(output_y), int(output_x)]) #matriz donde se guarda el resultado\n\n padded_size = int((kernel_row - 1) / 2) #Tamaño de padding\n\n #Obtenemos la imagen con padding\n padded_image = padding(image,padded_size)\n \n for row in range(int(output_y)):\n for col in range(int(output_x)):\n output[row, col] = conv_helper(\n padded_image[row:row + kernel_row, \n col:col + kernel_col], kernel)\n \n # Se muestra la imagen en pantalla\n plt.imshow(output, cmap='gray')\n plt.title(\"Edge detection\")\n plt.show()\n\n return output", "def deconv2d(input_, \n output_dims,\n k_h=5, \n k_w=5,\n d_h=2,\n d_w=2,\n stddev=0.02,\n name='deconv2d',\n with_w=False):\n \n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w',\n [k_h, k_w, output_dims[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n try:\n deconv = tf.nn.conv2d_transpose(input_,\n w, \n output_shape=output_dims,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_,\n w, \n output_shape=output_dims,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_dims[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv", "def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)", "def output_dims(self) -> Optional[Tuple[int]]:\n return None", "def snconv2d(input_, output_dim, k_h=3, k_w=3, d_h=2, d_w=2, training=True,\n name='snconv2d'):\n with tf.variable_scope(\n name,\n custom_getter=sn_gettr(training=training, equality_constrained=False)):\n return tf.layers.conv2d(\n input_,\n filters=output_dim,\n kernel_size=(k_h, k_w),\n strides=(d_h, d_w),\n padding='same',\n activation=None,\n use_bias=True,\n kernel_initializer=tf.keras.initializers.VarianceScaling(\n scale=1.0, mode='fan_avg', distribution='uniform'),\n bias_initializer=tf.initializers.zeros(),\n name=name)", "def conv(in_planes,\n out_planes,\n kernel_size=3,\n stride=1,\n padding=1,\n dilation=1,\n groups=1):\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=False)", "def l_out_conv(layer_num, kernel_size, pool=False):\n l_out_list = []\n l_in = constants.SHAPE_OF_ONE_DATA_POINT[1]\n for i in range(layer_num):\n l_out = l_out_conv1d(l_in, kernel_size, stride=2)\n l_out = l_out_conv1d(l_out, kernel_size, stride=2)\n\n l_out_list.append(l_out)\n\n if pool:\n pool_size = 3\n l_out = l_out_pool(l_out, pool_size)\n l_out_list.append(l_out)\n l_in = l_out\n\n # make a copy and reverse for decoder size def\n\n l_out_list_copy = copy.deepcopy(l_out_list)\n l_out_list.append(32)\n encoder_sizes = l_out_list\n l_out_list_copy.reverse()\n l_out_list_copy.append(constants.SHAPE_OF_ONE_DATA_POINT[1])\n decoder_sizes = l_out_list_copy\n return encoder_sizes, decoder_sizes", "def conv(input, output, size, stride,\n reuse=False,\n norm=instance_norm,\n activation=leaky_relu,\n dropout=1.0,\n padding='VALID',\n pad_size=None,\n is_training=True,\n name='conv'):\n with tf.variable_scope(name, reuse=reuse):\n dropout = 1.0 if dropout is None else dropout\n # Pre pad the input feature map\n x = pad(input, pad_size)\n # Apply convolution\n x = slim.conv2d(x, output, size, stride,\n activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(stddev=0.02),\n padding=padding)\n # Apply dropout\n x = tf.nn.dropout(x, dropout)\n # Apply activation\n x = activation(x) if activation else x\n # Apply normalization\n x = norm(x, is_training) if norm else x\n return x", "def conv(h,x):\n\n final_conv_dim=(512,512) ## dimension of the convolution result before cropping\n x_dim=(x.size(2),x.size(3)) ## dimension of x\n h_dim=(h.size(2),h.size(3)) ## dimension of h\n crop_dim=x_dim ## image obtained after cropping is the same dimension as the image x\n\n padding=(final_conv_dim[0]-(x_dim[0]-h_dim[0]+1),final_conv_dim[1]-(x_dim[1]-h_dim[1]+1)) ## calculate the amount of padding required given final_conv_dim, x_dim and h_dim\n\n x_pad=F.pad(x,(padding[0]//2,padding[0]//2+1,padding[1]//2,padding[1]//2+1)) ## pad x\n y=F.conv2d(x_pad,h.flip(2,3),padding=0) ## convolve x_pad with h\n \n ## starting and ending values along the column and the rows for cropping\n starti=(final_conv_dim[0]-crop_dim[0])//2 \n endi=crop_dim[0]+starti\n startj=(final_conv_dim[1]-crop_dim[1])//2\n endj=crop_dim[1]+startj\n\n ## Cropping\n y=y[:,:,starti:endi,startj:endj]\n \n return y", "def get_convolution_op(input_shape, output_shape, kernel_shape):\n filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)\n if conv_type == 'NORMAL':\n def conv_op(inputs, weight, name='generic_convolution'):\n with tf.name_scope(name):\n if padding_type=='VALID' and np.sum(padding) > 0:\n inputs = tf.pad(inputs, padding, name='padding')\n return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution')\n\n else:\n def conv_op(inputs, weight, name='generic_convolution'):\n if padding_type=='SAME':\n padded_output = [padded_shape[0]] + output_shape[-3:]\n else:\n padded_output = padded_shape\n with tf.name_scope(name):\n if padded_output[0] is None:\n batch_size = tf.shape(inputs)[0]\n padded_output = [batch_size] + padded_output[1:]\n\n output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')\n if padding_type=='VALID' and np.sum(padding) > 0:\n output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],\n [-1] + output_shape[-3:], name='cropping')\n return output\n\n return filter_shape, conv_op", "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height", "def _calculate_output_image_size(input_image_size: list[int], stride: int | tuple[int]):\n\n # checks to extract integer stride in case tuple was received\n if isinstance(stride, tuple):\n all_strides_equal = all(stride[0] == s for s in stride)\n if not all_strides_equal:\n raise ValueError(f\"unequal strides are not possible, got {stride}\")\n\n stride = stride[0]\n\n # return output image size\n return [int(math.ceil(im_sz / stride)) for im_sz in input_image_size]", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def __find_net_dims(self):\n\n input_params = INPUT_CHANNELS * INPUT_SIZE ** 2\n net_dims = [input_params]\n for w in self._conv_weights + self._lin_weights:\n net_dims.append(w.shape[0])", "def get_size(img):\n ih, iw = img.shape[:2]\n return iw * ih", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def _conv2d_block(in_f, out_f, *args, **kwargs):\n return nn.Sequential(\n nn.Conv2d(in_f, out_f, *args, **kwargs),\n nn.BatchNorm2d(out_f),\n nn.ReLU(),\n nn.Dropout2d(p=0.2)\n )", "def calculate_output_image_size(input_image_size, stride):\n def get_width_and_height_from_size(x):\n \"\"\"Obtain height and width from x.\n Args:\n x (int, tuple or list): Data size.\n Returns:\n size: A tuple or list (H,W).\n \"\"\"\n if isinstance(x, int):\n return x, x\n if isinstance(x, list) or isinstance(x, tuple):\n return x\n else:\n raise TypeError()\n\n if input_image_size is None:\n return None\n image_height, image_width = get_width_and_height_from_size(input_image_size)\n stride = stride if isinstance(stride, int) else stride[0]\n image_height = int(math.ceil(image_height / stride))\n image_width = int(math.ceil(image_width / stride))\n return [image_height, image_width]", "def conv(self, inputs, filters, kernel_size, strides, padding='SAME', name='conv_layer'):\n input_channels = inputs[-1]\n kernel = tf.Variable(tf.random.truncated_normal(shape=[kernel_size, kernel_size, input_channels, filters]),\n dtype=tf.float32, name='kernel')\n bias = tf.Variable(tf.zeros(shape=[filters]), name='bias')\n conv = tf.nn.conv2d(inputs, filter=kernel,\n strides=[1, strides, strides, 1],\n padding=padding, name='conv')\n out = tf.nn.relu(conv + bias, name='relu')\n return out", "def get_output_shape(self):\n return self.shape", "def conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True):\n # first layer\n x = layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), kernel_initializer = 'random_uniform', padding = 'same')(input_tensor)\n if batchnorm:\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n \n # second layer\n x = layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\\\n kernel_initializer = 'he_normal', padding = 'same')(x)\n if batchnorm:\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n \n return x", "def conv2d(x_tensor, conv_num_outputs, conv_ksize, conv_strides, name = \"conv\"):\n with tf.name_scope(name):\n # Create weight and bias\n with tf.name_scope(\"weights\"):\n W = tf.Variable(tf.truncated_normal(list(conv_ksize) + [x_tensor.get_shape().as_list()[3], conv_num_outputs], stddev=0.1))\n variable_summaries(W)\n with tf.name_scope(\"biases\"):\n b = tf.Variable(tf.constant(0.1, shape=[conv_num_outputs]))\n variable_summaries(b)\n\n # Apply convolution and add bias\n with tf.name_scope('Wx_plus_b'):\n conv = tf.nn.conv2d(x_tensor, W, strides=[1] + list(conv_strides) + [1], padding='SAME') + b\n tf.summary.histogram('pre_activations', conv)\n \n\n return conv", "def conv_layer(self, input_data, out_dims, name):\n\n with tf.variable_scope(name_or_scope=name):\n [_, _, _, channel_num] = input_data.get_shape().as_list()\n w = tf.get_variable(\"w\", [3, 3, channel_num, out_dims],\n initializer=tf.contrib.layers.variance_scaling_initializer(),\n trainable=self.is_train())\n conv = tf.nn.conv2d(input_data, w, [1, 1, 1, 1], \"SAME\", name=\"conv\")\n bn = tf.contrib.layers.batch_norm(conv, scope=\"bn\", trainable=self.is_train())\n relu = tf.nn.relu(bn, name=\"relu\")\n return relu", "def get_pad_size(img_h, img_w, out_size):\n out_h, out_w = out_size if isinstance(out_size, tuple) else (out_size, out_size)\n assert out_h >= img_h, f'out_h must be < img_h. img_h is {img_h}'\n assert out_w >= img_w, f'out_w must be < img_w. img_w is {img_w}'\n \n nx_pad = max(out_w - img_w, 0)\n ny_pad = max(out_h - img_h, 0)\n return ny_pad, nx_pad" ]
[ "0.7580664", "0.72124845", "0.72124845", "0.7204811", "0.72040206", "0.71981996", "0.71090627", "0.7094966", "0.70632535", "0.6922401", "0.6799323", "0.6729911", "0.66794556", "0.6671254", "0.66672844", "0.66361433", "0.66361433", "0.66361433", "0.660082", "0.6562536", "0.65303624", "0.6505865", "0.6442873", "0.6404379", "0.639757", "0.63920754", "0.63692856", "0.636866", "0.63665175", "0.6319103", "0.6307988", "0.6304073", "0.6300316", "0.6300081", "0.62947965", "0.6292601", "0.62708855", "0.62438065", "0.6242514", "0.62296724", "0.62242854", "0.62033606", "0.61982065", "0.61757857", "0.61453414", "0.6139367", "0.61326957", "0.61118144", "0.6091966", "0.6066268", "0.6060966", "0.60555875", "0.6038998", "0.6028681", "0.6026162", "0.6006148", "0.6005082", "0.6002833", "0.6000791", "0.59995115", "0.5991748", "0.59843725", "0.5981266", "0.5976795", "0.59682757", "0.5962989", "0.59429866", "0.59429866", "0.5942431", "0.5932407", "0.59282327", "0.5923877", "0.5906105", "0.5901775", "0.5899744", "0.58979875", "0.5891306", "0.588995", "0.58811724", "0.5873833", "0.58676463", "0.58676463", "0.58676463", "0.58676463", "0.58661985", "0.586341", "0.5856703", "0.5854656", "0.58544207", "0.5853057", "0.5845803", "0.5840491", "0.582839", "0.58252543", "0.58193046", "0.58133656", "0.5801944", "0.5801145", "0.57991517", "0.5797808" ]
0.7131408
6
Given a Unity Environment and a QNetwork, this method will generate a buffer of Experiences obtained by running the Environment with the Policy derived from the QNetwork.
def generate_trajectories( env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float ): # Create an empty Buffer buffer: Buffer = [] # Reset the environment env.reset() # Read and store the Behavior Name of the Environment behavior_name = list(env.behavior_specs)[0] # Read and store the Behavior Specs of the Environment spec = env.behavior_specs[behavior_name] # Create a Mapping from AgentId to Trajectories. This will help us create # trajectories for each Agents dict_trajectories_from_agent: Dict[int, Trajectory] = {} # Create a Mapping from AgentId to the last observation of the Agent dict_last_obs_from_agent: Dict[int, np.ndarray] = {} # Create a Mapping from AgentId to the last observation of the Agent dict_last_action_from_agent: Dict[int, np.ndarray] = {} # Create a Mapping from AgentId to cumulative reward (Only for reporting) dict_cumulative_reward_from_agent: Dict[int, float] = {} # Create a list to store the cumulative rewards obtained so far cumulative_rewards: List[float] = [] while len(buffer) < buffer_size: # While not enough data in the buffer # Get the Decision Steps and Terminal Steps of the Agents decision_steps, terminal_steps = env.get_steps(behavior_name) # For all Agents with a Terminal Step: for agent_id_terminated in terminal_steps: # Create its last experience (is last because the Agent terminated) last_experience = Experience( obs=dict_last_obs_from_agent[agent_id_terminated].copy(), reward=terminal_steps[agent_id_terminated].reward, done=not terminal_steps[agent_id_terminated].interrupted, action=dict_last_action_from_agent[agent_id_terminated].copy(), next_obs=terminal_steps[agent_id_terminated].obs[0], ) # Clear its last observation and action (Since the trajectory is over) dict_last_obs_from_agent.pop(agent_id_terminated) dict_last_action_from_agent.pop(agent_id_terminated) # Report the cumulative reward cumulative_reward = ( dict_cumulative_reward_from_agent.pop(agent_id_terminated) + terminal_steps[agent_id_terminated].reward ) cumulative_rewards.append(cumulative_reward) # Add the Trajectory and the last experience to the buffer buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated)) buffer.append(last_experience) # For all Agents with a Decision Step: for agent_id_decisions in decision_steps: # If the Agent does not have a Trajectory, create an empty one if agent_id_decisions not in dict_trajectories_from_agent: dict_trajectories_from_agent[agent_id_decisions] = [] dict_cumulative_reward_from_agent[agent_id_decisions] = 0 # If the Agent requesting a decision has a "last observation" if agent_id_decisions in dict_last_obs_from_agent: # Create an Experience from the last observation and the Decision Step exp = Experience( obs=dict_last_obs_from_agent[agent_id_decisions].copy(), reward=decision_steps[agent_id_decisions].reward, done=False, action=dict_last_action_from_agent[agent_id_decisions].copy(), next_obs=decision_steps[agent_id_decisions].obs[0], ) # Update the Trajectory of the Agent and its cumulative reward dict_trajectories_from_agent[agent_id_decisions].append(exp) dict_cumulative_reward_from_agent[agent_id_decisions] += ( decision_steps[agent_id_decisions].reward ) # Store the observation as the new "last observation" dict_last_obs_from_agent[agent_id_decisions] = ( decision_steps[agent_id_decisions].obs[0] ) # Generate an action for all the Agents that requested a decision # Compute the values for each action given the observation actions_values = ( q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy() ) # Pick the best action using argmax print("ACTION VALS", actions_values) actions_values += epsilon * ( np.random.randn(actions_values.shape[0], actions_values.shape[1]) ).astype(np.float32) actions = np.argmax(actions_values, axis=1) actions.resize((len(decision_steps), 1)) # Store the action that was picked, it will be put in the trajectory later for agent_index, agent_id in enumerate(decision_steps.agent_id): dict_last_action_from_agent[agent_id] = actions[agent_index] # Set the actions in the environment # Unity Environments expect ActionTuple instances. action_tuple = ActionTuple() action_tuple.add_discrete(actions) env.set_actions(behavior_name, action_tuple) # Perform a step in the simulation env.step() return buffer, np.mean(cumulative_rewards)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_experiences(self):\n for i in range(self.num_frames_per_proc):\n # Do one agent-environment interaction\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n \n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n \n model_results0 = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n \n dist0 = model_results0['dist'] ### NOTE\n value0 = model_results0['value']\n memory0 = model_results0['memory']\n msg0 = model_results0['message']\n dists_speaker0 = model_results0['dists_speaker']\n extra_predictions0 = model_results0['extra_predictions']\n #self.rng_states0[i] = model_results0['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states0[i] = model_results0['cuda_rng_states']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n model_results1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(msg0.transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1)) ### NOTE\n \n dist1 = model_results1['dist']\n value1 = model_results1['value']\n memory1 = model_results1['memory']\n msg1 = model_results1['message']\n dists_speaker1 = model_results1['dists_speaker']\n extra_predictions1 = model_results1['extra_predictions']\n #self.rng_states1[i] = model_results1['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states1[i] = model_results1['cuda_rng_states']\n \n #state = torch.get_rng_state()\n action0 = dist0.sample()\n \n #torch.set_rng_state(state)\n action1 = dist1.sample()\n\n obs0, reward0, done0, env_info0 = self.env0.step(action0.cpu().numpy())\n \n obs1, reward1, done1, env_info1 = self.env1.step(action1.cpu().numpy())\n \n # mask any rewards based on (previous) been_done\n rewardos0 = [0] * self.num_procs\n rewardos1 = [0] * self.num_procs\n for j in range(self.num_procs):\n rewardos0[j] = reward0[j] * (1 - self.been_done0[j].item())\n rewardos1[j] = reward1[j] * (1 - self.been_done1[j].item())\n \n reward0 = tuple(rewardos0)\n reward1 = tuple(rewardos1)\n \n #reward0 = tuple(0.5*r0 + 0.5*r1 for r0, r1 in zip(reward0, reward1)) ### NOTE\n #reward1 = reward0\n \n # reward sender agent (0) equally for success of receiver agent (1) ### NOTE\n reward0 = reward1\n \n self.been_done0 = (1 - (1 - self.been_done0) * (1 - torch.tensor(done0, device=self.device, dtype=torch.float)))\n self.been_done1 = (1 - (1 - self.been_done1) * (1 - torch.tensor(done1, device=self.device, dtype=torch.float)))\n both_done = self.been_done0 * self.been_done1\n \n # reset if receiver agent (1) is done ### NOTE\n both_done = self.been_done1\n \n obs0 = self.env0.sync_reset(both_done, obs0)\n obs1 = self.env1.sync_reset(both_done, obs1)\n \n if self.aux_info:\n env_info0 = self.aux_info_collector0.process(env_info0)\n # env_info0 = self.process_aux_info0(env_info0)\n \n env_info1 = self.aux_info_collector1.process(env_info1)\n # env_info1 = self.process_aux_info1(env_info1)\n\n # Update experiences values\n\n self.obss0[i] = self.obs0\n self.obs0 = obs0\n \n self.obss1[i] = self.obs1\n self.obs1 = obs1\n\n self.memories0[i] = self.memory0\n self.memory0 = memory0\n \n self.memories1[i] = self.memory1\n self.memory1 = memory1\n \n self.msgs0[i] = self.msg0\n self.msg0 = msg0\n \n self.msgs1[i] = self.msg1\n self.msg1 = msg1\n \n self.msgs_out0[i] = msg0\n \n self.msgs_out1[i] = msg1\n\n self.masks0[i] = self.mask0\n #self.mask0 = 1 - torch.tensor(done0, device=self.device, dtype=torch.float)\n self.mask0 = 1 - both_done\n self.actions0[i] = action0\n self.values0[i] = value0\n if self.reshape_reward is not None:\n self.rewards0[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs0, action0, reward0, done0)\n ], device=self.device)\n else:\n self.rewards0[i] = torch.tensor(reward0, device=self.device)\n self.log_probs0[i] = dist0.log_prob(action0)\n self.speaker_log_probs0[i] = self.acmodel0.speaker_log_prob(dists_speaker0, msg0)\n \n self.masks1[i] = self.mask1\n #self.mask1 = 1 - torch.tensor(done1, device=self.device, dtype=torch.float)\n self.mask1 = 1 - both_done\n self.actions1[i] = action1\n self.values1[i] = value1\n if self.reshape_reward is not None:\n self.rewards1[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs1, action1, reward1, done1)\n ], device=self.device)\n else:\n self.rewards1[i] = torch.tensor(reward1, device=self.device)\n self.log_probs1[i] = dist1.log_prob(action1)\n self.speaker_log_probs1[i] = self.acmodel1.speaker_log_prob(dists_speaker1, msg1)\n\n if self.aux_info:\n self.aux_info_collector0.fill_dictionaries(i, env_info0, extra_predictions0)\n \n self.aux_info_collector1.fill_dictionaries(i, env_info1, extra_predictions1)\n\n # Update log values\n\n self.log_episode_return0 += torch.tensor(reward0, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return0 += self.rewards0[i]\n \n self.log_episode_return1 += torch.tensor(reward1, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return1 += self.rewards1[i]\n \n self.log_episode_num_frames0 += torch.ones(self.num_procs, device=self.device)\n self.log_episode_num_frames1 += torch.ones(self.num_procs, device=self.device)\n \n #for i, done_ in enumerate(done0):\n for i in range(self.num_procs):\n #if done_:\n if both_done[i]:\n self.log_done_counter0 += 1\n self.log_return0.append(self.log_episode_return0[i].item())\n self.log_reshaped_return0.append(self.log_episode_reshaped_return0[i].item())\n self.log_num_frames0.append(self.log_episode_num_frames0[i].item())\n \n #for i, done_ in enumerate(done1):\n #if done_:\n self.log_done_counter1 += 1\n self.log_return1.append(self.log_episode_return1[i].item())\n self.log_reshaped_return1.append(self.log_episode_reshaped_return1[i].item())\n self.log_num_frames1.append(self.log_episode_num_frames1[i].item())\n\n # if both are done, reset both to not done\n self.been_done0 *= (1 - both_done)\n self.been_done1 *= (1 - both_done)\n\n self.log_episode_return0 *= self.mask0\n self.log_episode_reshaped_return0 *= self.mask0\n self.log_episode_num_frames0 *= self.mask0\n\n self.log_episode_return1 *= self.mask1\n self.log_episode_reshaped_return1 *= self.mask1\n self.log_episode_num_frames1 *= self.mask1\n\n # Add advantage and return to experiences\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n tmp = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n next_value0 = tmp['value']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n next_value1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(tmp['message'].transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1))['value'] ### NOTE\n\n for i in reversed(range(self.num_frames_per_proc)):\n next_mask0 = self.masks0[i+1] if i < self.num_frames_per_proc - 1 else self.mask0\n next_value0 = self.values0[i+1] if i < self.num_frames_per_proc - 1 else next_value0\n next_advantage0 = self.advantages0[i+1] if i < self.num_frames_per_proc - 1 else 0\n \n next_mask1 = self.masks1[i+1] if i < self.num_frames_per_proc - 1 else self.mask1\n next_value1 = self.values1[i+1] if i < self.num_frames_per_proc - 1 else next_value1\n next_advantage1 = self.advantages1[i+1] if i < self.num_frames_per_proc - 1 else 0\n\n delta0 = self.rewards0[i] + self.discount * next_value0 * next_mask0 - self.values0[i]\n self.advantages0[i] = delta0 + self.discount * self.gae_lambda * next_advantage0 * next_mask0\n \n delta1 = self.rewards1[i] + self.discount * next_value1 * next_mask1 - self.values1[i]\n self.advantages1[i] = delta1 + self.discount * self.gae_lambda * next_advantage1 * next_mask1\n\n # Flatten the data correctly, making sure that\n # each episode's data is a continuous chunk\n\n exps0 = DictList()\n exps0.obs = [self.obss0[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n exps1 = DictList()\n exps1.obs = [self.obss1[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n # In commments below T is self.num_frames_per_proc, P is self.num_procs,\n # D is the dimensionality\n\n # T x P x D -> P x T x D -> (P * T) x D\n exps0.memory = self.memories0.transpose(0, 1).reshape(-1, *self.memories0.shape[2:])\n \n exps1.memory = self.memories1.transpose(0, 1).reshape(-1, *self.memories1.shape[2:])\n \n exps0.message = self.msgs0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message = self.msgs1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n exps0.message_out = self.msgs_out0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message_out = self.msgs_out1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n #exps0.rng_states = self.rng_states0.transpose(0, 1).reshape(-1, *self.rng_states0.shape[2:])\n #if torch.cuda.is_available():\n # exps0.cuda_rng_states = self.cuda_rng_states0.transpose(0, 1).reshape(-1, *self.cuda_rng_states0.shape[2:])\n \n #exps1.rng_states = self.rng_states1.transpose(0, 1).reshape(-1, *self.rng_states1.shape[2:])\n #if torch.cuda.is_available():\n # exps1.cuda_rng_states = self.cuda_rng_states1.transpose(0, 1).reshape(-1, *self.cuda_rng_states1.shape[2:])\n \n # T x P -> P x T -> (P * T) x 1\n exps0.mask = self.masks0.transpose(0, 1).reshape(-1).unsqueeze(1)\n \n exps1.mask = self.masks1.transpose(0, 1).reshape(-1).unsqueeze(1)\n\n # for all tensors below, T x P -> P x T -> P * T\n exps0.action = self.actions0.transpose(0, 1).reshape(-1)\n exps0.value = self.values0.transpose(0, 1).reshape(-1)\n exps0.reward = self.rewards0.transpose(0, 1).reshape(-1)\n exps0.advantage = self.advantages0.transpose(0, 1).reshape(-1)\n exps0.returnn = exps0.value + exps0.advantage\n exps0.log_prob = self.log_probs0.transpose(0, 1).reshape(-1)\n exps0.speaker_log_prob = self.speaker_log_probs0.transpose(0, 1).reshape(-1)\n \n exps1.action = self.actions1.transpose(0, 1).reshape(-1)\n exps1.value = self.values1.transpose(0, 1).reshape(-1)\n exps1.reward = self.rewards1.transpose(0, 1).reshape(-1)\n exps1.advantage = self.advantages1.transpose(0, 1).reshape(-1)\n exps1.returnn = exps1.value + exps1.advantage\n exps1.log_prob = self.log_probs1.transpose(0, 1).reshape(-1)\n exps1.speaker_log_prob = self.speaker_log_probs1.transpose(0, 1).reshape(-1)\n\n if self.aux_info:\n exps0 = self.aux_info_collector0.end_collection(exps0)\n \n exps1 = self.aux_info_collector1.end_collection(exps1)\n\n # Preprocess experiences\n\n exps0.obs = self.preprocess_obss(exps0.obs, device=self.device)\n\n exps1.obs = self.preprocess_obss(exps1.obs, device=self.device)\n\n # Log some values\n\n keep0 = max(self.log_done_counter0, self.num_procs)\n\n keep1 = max(self.log_done_counter1, self.num_procs)\n\n log0 = {\n \"return_per_episode\": self.log_return0[-keep0:],\n \"reshaped_return_per_episode\": self.log_reshaped_return0[-keep0:],\n \"num_frames_per_episode\": self.log_num_frames0[-keep0:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter0,\n }\n\n log1 = {\n \"return_per_episode\": self.log_return1[-keep1:],\n \"reshaped_return_per_episode\": self.log_reshaped_return1[-keep1:],\n \"num_frames_per_episode\": self.log_num_frames1[-keep1:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter1,\n }\n\n self.log_done_counter0 = 0\n self.log_return0 = self.log_return0[-self.num_procs:]\n self.log_reshaped_return0 = self.log_reshaped_return0[-self.num_procs:]\n self.log_num_frames0 = self.log_num_frames0[-self.num_procs:]\n\n self.log_done_counter1 = 0\n self.log_return1 = self.log_return1[-self.num_procs:]\n self.log_reshaped_return1 = self.log_reshaped_return1[-self.num_procs:]\n self.log_num_frames1 = self.log_num_frames1[-self.num_procs:]\n\n return exps0, log0, exps1, log1", "def collect_experience(env_, agent_, size):\n env_.reset()\n state, reward, done, _ = env_.step(env_.action_space.sample())\n for data in range(size):\n action = env_.action_space.sample()\n next_state, reward, done, _ = env_.step(action)\n # penalize reward based on the position of the cart\n reward = max(0, reward * (1 - abs(next_state[0]/2.4)))\n if done:\n next_state = np.zeros(state.shape)\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n env_.reset()\n state, reward, done, _ = env_.step(env.action_space.sample())\n else:\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n state = next_state", "def append(self, experience: Experience) -> None:\n self.buffer.append(experience)", "def SendExperiences(self, request_iterator, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def explorer(global_rb, queue, trained_steps, n_transition,\n is_training_done, lock, env_fn, policy_fn,\n buffer_size=1024, max_transition=None,\n episode_max_steps=1000):\n env = env_fn()\n policy = policy_fn(env, \"Explorer\", global_rb.get_buffer_size())\n local_rb = ReplayBuffer(obs_shape=env.observation_space.shape,\n act_dim=env.action_space.low.size,\n size=buffer_size)\n\n s = env.reset()\n episode_steps = 0\n total_reward = 0.\n total_rewards = []\n start = time.time()\n sample_at_start = 0\n\n while not is_training_done.is_set():\n # Periodically copy weights of explorer\n if not queue.empty():\n actor_weights, critic_weights, critic_target_weights = queue.get()\n update_target_variables(policy.actor.weights, actor_weights, tau=1.)\n update_target_variables(policy.critic.weights, critic_weights, tau=1.)\n update_target_variables(policy.critic_target.weights, critic_target_weights, tau=1.)\n\n n_transition.value += 1\n episode_steps += 1\n a = policy.get_action(s)\n s_, r, done, _ = env.step(a)\n done_flag = done\n if episode_steps == env._max_episode_steps:\n done_flag = False\n total_reward += r\n local_rb.add(s, a, r, s_, done_flag)\n\n s = s_\n if done or episode_steps == episode_max_steps:\n s = env.reset()\n total_rewards.append(total_reward)\n total_reward = 0\n episode_steps = 0\n\n # Add collected experiences to global replay buffer\n if local_rb.get_stored_size() == buffer_size - 1:\n temp_n_transition = n_transition.value\n samples = local_rb.sample(local_rb.get_stored_size())\n states, next_states, actions, rewards, done = samples[\"obs\"], samples[\"next_obs\"], samples[\"act\"], samples[\"rew\"], samples[\"done\"]\n done = np.array(done, dtype=np.float64)\n td_errors = policy.compute_td_error(\n states, actions, next_states, rewards, done)\n print(\"Grad: {0: 6d}\\tSamples: {1: 7d}\\tTDErr: {2:.5f}\\tAveEpiRew: {3:.3f}\\tFPS: {4:.2f}\".format(\n trained_steps.value, n_transition.value, np.average(np.abs(td_errors).flatten()),\n sum(total_rewards) / len(total_rewards), (temp_n_transition - sample_at_start) / (time.time() - start)))\n total_rewards = []\n lock.acquire()\n global_rb.add(\n states, actions, rewards, next_states, done,\n priorities=np.abs(td_errors)+1e-6)\n lock.release()\n local_rb.clear()\n start = time.time()\n sample_at_start = n_transition.value\n\n if max_transition is not None and n_transition.value >= max_transition:\n is_training_done.set()", "def accumulate_experience(teacher, exp_replay: Supervised_ExperienceReplay, config=student_config):\n\n env = gym.make(\"PongNoFrameskip-v4\")\n env = wrap_deepmind(env, frame_stack=True)\n steps = 0\n while 1:\n state = env.reset()\n state = np.asarray(state)\n done = False\n while not done:\n steps += 1\n teacher_q_value = teacher.get_q(state=np.reshape(state, (1, state.shape[0], state.shape[1], state.shape[2])))\n action = teacher.select_action(teacher_q_value)\n next_state, reward, done, _ = env.step(action + 1)\n next_state = np.asarray(next_state)\n exp_replay.add_memory(state, teacher_q_value, action) # feeding the experience replay\n state = next_state\n if steps > config.OBSERVE: # we have OBSERVE number of exp in exp_replay\n try:\n del env\n except ImportError:\n pass\n break", "def sample(self):\n experiences = random.sample(self.memory, k=self.batch_size)\n \n states = torch.from_numpy(np.vstack([exp.state for exp in experiences if exp is not None])).float()#.to(deepQAgent.device)\n\n actions = torch.from_numpy(np.vstack([exp.action for exp in experiences if exp is not None])).long()#.to(deepQAgent.device)\n\n rewards = torch.from_numpy(np.vstack([exp.reward for exp in experiences if exp is not None])).float()#.to(deepQAgent.device)\n\n next_states = torch.from_numpy(np.vstack([exp.next_state for exp in experiences if exp is not None])).float()#.to(deepQAgent.device)\n\n\n return (states, actions, rewards, next_states)", "def sample_trajectory(self, env, animate_this_episode, is_evaluation):\n # Using current task with meta inside\n env.reset_task(is_evaluation=is_evaluation)\n stats = []\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n ep_steps = 0\n steps = 0\n\n num_samples = max(self.history, self.max_path_length + 1)\n meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))\n rewards = []\n\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n\n if ep_steps == 0:\n ob = env.reset()\n # first meta ob has only the observation\n # set a, r, d to zero, construct first meta observation in meta_obs\n # YOUR CODE HERE\n ac = np.zeros(self.ac_dim); rew = np.zeros(self.reward_dim); done = np.zeros(self.terminal_dim)\n meta_obs[steps, :] = np.concatenate((ob, ac, rew, done))\n steps += 1\n\n # index into the meta_obs array to get the window that ends with the current timestep\n # please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)\n # YOUR CODE HERE\n # padding for input obs size\n sample_action_in_ = meta_obs[steps-self.history:steps, :] if steps>=self.history else np.squeeze(np.concatenate(([meta_obs[0,:], ] * (self.history - steps), meta_obs[:steps, :]), axis=0))\n # need to clear hidden size, in order to avoid previous hidden state as it may be generated by the other totally different task (env setting may be changed)\n hidden = np.zeros((1, self.gru_size), dtype=np.float32)\n\n # get action from the policy\n # YOUR CODE HERE\n # Tensor(\"ob:0\", shape=(?, 1, 10), dtype=float32)\n # print(self.sy_ob_no)\n # Tensor(\"hidden:0\", shape=(?, 32), dtype=float32)\n # print(self.sy_hidden)\n ac = self.sess.run(self.sy_sampled_ac, feed_dict={\n self.sy_ob_no: sample_action_in_.reshape(-1, self.history, self.meta_ob_dim),\n self.sy_hidden: hidden,\n })\n assert len(ac) == 1\n ac = ac[0]\n\n # step the environment\n # YOUR CODE HERE\n ob, rew, done, _= env.step(ac)\n\n ep_steps += 1\n\n done = bool(done) or ep_steps == self.max_path_length\n # construct the meta-observation and add it to meta_obs\n # YOUR CODE HERE\n meta_obs[steps, :] = np.concatenate((ob, ac, [rew], [done]))\n\n rewards.append(rew)\n steps += 1\n\n in_ = meta_obs[steps, :]\n # add sample to replay buffer\n if is_evaluation:\n self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n else:\n self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n\n # start new episode\n if done:\n # compute stats over trajectory\n s = dict()\n s['rewards']= rewards[-ep_steps:]\n s['ep_len'] = ep_steps\n stats.append(s)\n ep_steps = 0\n\n if steps >= num_samples:\n break\n\n return steps, stats", "def __init__(self, gym_env: gym.Env) -> None:\n super().__init__()\n self._queue: Queue = Queue()\n self._action_counter: int = 0\n self.gym_address = str(GYM_CONNECTION_PUBLIC_ID)\n self._agent = ProxyAgent(\n name=\"proxy\", gym_env=gym_env, proxy_env_queue=self._queue\n )\n self._agent_thread = Thread(target=self._agent.start)\n self._active_dialogue = None # type: Optional[GymDialogue]\n self.gym_skill = \"fetchai/gym:0.1.0\"\n self.gym_dialogues = GymDialogues(self.gym_skill, role_from_first_message)", "def Collecting_experiences(self)-> None:\n for epoch_no in range(self.epochs):\n print(\"EPOCH %d\", epoch_no + 1)\n \n #beam_dqn = self.beam_min + int(self.beam_max * epoch_no/self.epochs)\n #egreed = self.egreed_max*(1 - epoch_no/(1.1*self.epochs))\n #self.gamma = self.gamma_max*(1 - epoch_no/(2*self.epochs))\n\n beam_dqn = 1\n egreed = 0.5\n #self.gamma = self.gamma_max\n self.gamma = 0.6\n\n self.tb_writer.add_scalar(\"parameters/beam_dqn\",\n beam_dqn, epoch_no)\n self.tb_writer.add_scalar(\"parameters/egreed\",\n egreed, epoch_no)\n self.tb_writer.add_scalar(\"parameters/gamma\",\n self.gamma, epoch_no)\n if beam_dqn > self.actions_size:\n print(\"The beam_dqn cannot exceed the action size!\")\n print(\"then the beam_dqn = action size\")\n beam_dqn = self.actions_size\n\n print(' beam_dqn, egreed, gamma: ', beam_dqn, egreed, self.gamma)\n for _, data_set in self.data_to_train_dqn.items():\n \n valid_iter = make_data_iter(\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\n shuffle=False, train=False)\n #valid_sources_raw = data_set.src\n # disable dropout\n #self.model.eval()\n\n i_sample = 0\n for valid_batch in iter(valid_iter):\n freeze_model(self.model)\n batch = Batch(valid_batch\n , self.pad_index, use_cuda=self.use_cuda)\n \n encoder_output, encoder_hidden = self.model.encode(\n batch.src, batch.src_lengths,\n batch.src_mask)\n # if maximum output length is not globally specified, adapt to src len\n \n if self.max_output_length is None:\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\n \n batch_size = batch.src_mask.size(0)\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\n dtype=torch.long)\n output = []\n hidden = self.model.decoder._init_hidden(encoder_hidden)\n prev_att_vector = None\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\n\n # print(\"Source_raw: \", batch.src)\n # print(\"Target_raw: \", batch.trg_input)\n # print(\"y0: \", prev_y)\n \n \n \n exp_list = []\n # pylint: disable=unused-variable\n for t in range(self.max_output_length):\n if t != 0:\n if self.state_type == 'hidden':\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n if t == 0:\n state = hidden[0].squeeze(1).detach().cpu().numpy()[0]\n else:\n state = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n \n # decode one single step\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=batch.src_mask,\n trg_embed=self.model.trg_embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # logits: batch x time=1 x vocab (logits)\n if t != 0:\n if self.state_type == 'hidden':\n state_ = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n state_ = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n # if t == 0:\n # print('states0: ', state, state_)\n\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\n \n if random.uniform(0, 1) < egreed:\n i_ran = random.randint(0,beam_dqn-1)\n next_word = torch.argsort(logits, descending=True)[:, :, i_ran]\n else:\n next_word = torch.argmax(logits, dim=-1) # batch x time=1\n # if t != 0:\n a = prev_y.squeeze(1).detach().cpu().numpy()[0]\n #a = next_word.squeeze(1).detach().cpu().numpy()[0]\n \n # print(\"state \",t,\" : \", state )\n # print(\"state_ \",t,\" : \", state_ )\n # print(\"action \",t,\" : \", a )\n # print(\"__________________________________________\")\n\n output.append(next_word.squeeze(1).detach().cpu().numpy())\n\n #tup = (self.memory_counter, state, a, state_)\n \n \n prev_y = next_word\n # check if previous symbol was <eos>\n is_eos = torch.eq(next_word, self.eos_index)\n finished += is_eos\n if t != 0:\n self.memory_counter += 1\n tup = (self.memory_counter, state, a, state_, 1)\n exp_list.append(tup)\n \n #print(t)\n # stop predicting if <eos> reached for all elements in batch\n if (finished >= 1).sum() == batch_size:\n a = next_word.squeeze(1).detach().cpu().numpy()[0]\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, np.zeros([self.state_size]), 0)\n exp_list.append(tup)\n #print('break')\n break\n if t == self.max_output_length-1:\n #print(\"reach the max output\")\n a = 0\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, -1*np.ones([self.state_size]), 1)\n exp_list.append(tup)\n \n \n \n \n #Collecting rewards\n hyp = np.stack(output, axis=1) # batch, time\n\n if epoch_no == 0:\n if i_sample == 0 or i_sample == 3 or i_sample == 6:\n #print(i_sample)\n r = self.Reward(batch.trg, hyp, show=True) # 1 , time-1 \n else:\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n else:\n #print(\"aaaa - \",i_sample)\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n \n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\n # print(\"\\n Sample Collected: \", i_sample, \"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\")\n # print(\"Target: \", batch.trg, decoded_valid_out_trg)\n # print(\"Eval : \", stacked_output, decoded_valid_out)\n # print(\"Reward: \", r, \"\\n\")\n \n i_sample += 1\n self.store_transition(exp_list, r)\n \n #Learning.....\n if self.memory_counter > self.mem_cap - self.max_output_length:\n self.learn()\n \n self.tb_writer.close()", "def add(self, experience):\n self.buffer.append(experience)", "def eval(self) -> None:\n\n config = self.config.clone()\n\n if len(self.config.VIDEO_OPTION) > 0:\n config.defrost()\n config.NUM_ENVIRONMENTS = 1\n config.freeze()\n\n logger.info(f\"env config: {config}\")\n with construct_envs(config, get_env_class(config.ENV_NAME)) as envs:\n observations = envs.reset()\n batch = batch_obs(observations, device=self.device)\n\n current_episode_reward = torch.zeros(\n envs.num_envs, 1, device=self.device\n )\n ppo_cfg = self.config.RL.PPO\n test_recurrent_hidden_states = torch.zeros(\n config.NUM_ENVIRONMENTS,\n self.actor_critic.net.num_recurrent_layers,\n ppo_cfg.hidden_size,\n device=self.device,\n )\n prev_actions = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.long,\n )\n not_done_masks = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.bool,\n )\n\n rgb_frames = [\n [] for _ in range(self.config.NUM_ENVIRONMENTS)\n ] # type: List[List[np.ndarray]]\n\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n self.actor_critic.eval()\n\n for _i in range(config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS):\n current_episodes = envs.current_episodes()\n\n with torch.no_grad():\n (\n _,\n actions,\n _,\n test_recurrent_hidden_states,\n ) = self.actor_critic.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n\n observations, rewards, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n batch = batch_obs(observations, device=self.device)\n\n not_done_masks = torch.tensor(\n [[not done] for done in dones],\n dtype=torch.bool,\n device=\"cpu\",\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=self.device\n ).unsqueeze(1)\n\n current_episode_reward += rewards\n\n # episode ended\n if not not_done_masks[0].item():\n generate_video(\n video_option=self.config.VIDEO_OPTION,\n video_dir=self.config.VIDEO_DIR,\n images=rgb_frames[0],\n episode_id=current_episodes[0].episode_id,\n checkpoint_idx=0,\n metrics=self._extract_scalars_from_info(infos[0]),\n tb_writer=None,\n )\n\n print(\"Evaluation Finished.\")\n print(\"Success: {}\".format(infos[0][\"episode_success\"]))\n print(\n \"Reward: {}\".format(current_episode_reward[0].item())\n )\n print(\n \"Distance To Goal: {}\".format(\n infos[0][\"object_to_goal_distance\"]\n )\n )\n\n return\n\n # episode continues\n elif len(self.config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[0], infos[0])\n rgb_frames[0].append(frame)\n\n not_done_masks = not_done_masks.to(device=self.device)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]", "def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)", "def append(self, experience):\n self.buffer.append(experience)\n self.number += 1", "def render_single(env, policy, render = False, n_episodes=100):\n total_rewards = 0\n for _ in range(n_episodes):\n ob = env.reset() # initialize the episode\n done = False\n while not done:\n if render:\n env.render() # render the game\n ############################\n # YOUR IMPLEMENTATION HERE #\n #env.step(np.where(policy[0]==1)[0].tolist()[0])\n agent_next_step=env.step(np.argmax(policy[ob,:]))\n ob=agent_next_step[0]\n reward= agent_next_step[1]\n done= agent_next_step[2]\n total_rewards+=reward\n if done:\n break\n return total_rewards", "def __init__(self, env: CityLearnEnv, **kwargs: Any):\n\n super().__init__(env, **kwargs)\n\n # internally defined\n self.normalized = [False for _ in self.action_space]\n self.soft_q_criterion = nn.SmoothL1Loss()\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.replay_buffer = [ReplayBuffer(int(self.replay_buffer_capacity)) for _ in self.action_space]\n self.soft_q_net1 = [None for _ in self.action_space]\n self.soft_q_net2 = [None for _ in self.action_space]\n self.target_soft_q_net1 = [None for _ in self.action_space]\n self.target_soft_q_net2 = [None for _ in self.action_space]\n self.policy_net = [None for _ in self.action_space]\n self.soft_q_optimizer1 = [None for _ in self.action_space]\n self.soft_q_optimizer2 = [None for _ in self.action_space]\n self.policy_optimizer = [None for _ in self.action_space]\n self.target_entropy = [None for _ in self.action_space]\n self.norm_mean = [None for _ in self.action_space]\n self.norm_std = [None for _ in self.action_space]\n self.r_norm_mean = [None for _ in self.action_space]\n self.r_norm_std = [None for _ in self.action_space]\n self.set_networks()", "def __init__(self, agent, make_env=lambda:gym.make(\"SpaceInvaders-v0\"), n_games=1, max_size=None,\n preprocess_observation = lambda obs:obs,agent_step=None):\n if not isinstance(make_env, function):\n env_name = make_env\n make_env = lambda: gym.make(env_name)\n\n #create atari games\n self.make_env = make_env\n self.envs = [self.make_env() for _ in range(n_games)]\n self.preprocess_observation = preprocess_observation\n\n\n #initial observations\n self.prev_observations = [self.preprocess_observation(make_env.reset()) for make_env in self.envs]\n\n #agent memory variables (if you use recurrent networks\n self.prev_memory_states = [np.zeros((n_games,)+tuple(mem.output_shape[1:]),\n dtype=get_layer_dtype(mem))\n for mem in agent.agent_states]\n\n #save agent\n self.agent = agent\n self.agent_step = agent_step or agent.get_react_function()\n\n # Create experience replay environment\n self.experience_replay = SessionPoolEnvironment(observations=agent.observation_layers,\n actions=agent.action_layers,\n agent_memories=agent.agent_states)\n self.max_size = max_size\n\n #whether particular session has just been terminated and needs restarting\n self.just_ended = [False] * len(self.envs)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def add(self, experience: []):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:1] = []\n self.buffer.append(experience)", "def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):\n\n utils.EzPickle.__init__(self, game, obs_type)\n assert obs_type in ('ram', 'image')\n\n self.game_path = atari_py.get_game_path(game)\n if not os.path.exists(self.game_path):\n raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))\n self._obs_type = obs_type\n self.frameskip = frameskip\n self.ale = ALEInterface()\n self.viewer = None\n\n # Tune (or disable) ALE's action repeat:\n # https://github.com/openai/gym/issues/349\n assert isinstance(repeat_action_probability, (float, int)), \"Invalid repeat_action_probability: {!r}\".format(repeat_action_probability)\n self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)\n\n self._seed()\n\n (screen_width, screen_height) = self.ale.getScreenDims()\n self._buffer = np.empty((screen_height, screen_width, 3), dtype=np.uint8)\n\n self._action_set = self.ale.getMinimalActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n\n (screen_width,screen_height) = self.ale.getScreenDims()\n if self._obs_type == 'ram':\n self.observation_space = spaces.Box(low=np.zeros(128), high=np.zeros(128)+255)\n elif self._obs_type == 'image':\n self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3))\n else:\n raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))", "def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')", "def test_cloned_policy(env, cloned_policy, num_episodes=50, render=True):\n total_rewards = []\n\n for i in range(num_episodes):\n print('Starting episode {}'.format(i))\n total_reward = 0\n state = env.reset()\n if render:\n env.render()\n time.sleep(.01)\n is_done = False\n while not is_done:\n action = np.argmax(\n cloned_policy.predict_on_batch(state[np.newaxis, ...])[0])\n state, reward, is_done, _ = env.step(action)\n total_reward += reward\n if render:\n env.render()\n time.sleep(.1)\n print(\n 'Total reward: {}'.format(total_reward))\n total_rewards.append(total_reward)\n\n mean = np.mean(total_rewards)\n std = np.std(total_rewards)\n print('Average total reward: {} (std: {})'.format(\n mean, std))\n\n return total_rewards", "def __init__(self, environment):\n self.env = environment\n self.cumreward = 0 # tracking cumulative reward\n self.samples = 0 # tracking the number of samples\n\n self.sensor_limits = None\n self.actor_limits = None\n self.clipping = True\n\n self.current_action = 0 # Saving current action\n self.prev_action = -1 # Saving previous action", "def store(self, experience):\n\n self.memory.store(experience)", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def collect_episode(environment, policy, num_episodes, replay_buffer_observer):\n initial_time_step = environment.reset()\n\n driver = py_driver.PyDriver(\n environment,\n py_tf_eager_policy.PyTFEagerPolicy(policy, use_tf_function=True),\n [replay_buffer_observer],\n max_episodes=num_episodes,\n )\n initial_time_step = environment.reset()\n driver.run(initial_time_step)", "def new_gen(self,agents,probs,p_c,p_mut_div,p_mut_fit,tour_size,elite):\n new_agents = []\n n_layers = len(agents[0].get_weights())\n # carrying over elite agent\n new_agents.append(AtariNet(\n self.obs_shape,\n self.action_shape,\n self.net_conf))\n new_agents[-1].set_weights(agents[elite].get_weights())\n exploration_size = 0\n for _ in range(len(agents)-1):\n n_parent = np.random.choice([1,2],1,p=[1-p_c,p_c])[0] # selecting whether to use crossover\n exploration_size += int(2-n_parent) # counting members of exploration population\n parent = self._tournament(probs,n_parent,tour_size)\n p_mut = self._calc_p_mut(parent,p_mut_div,p_mut_fit)\n offspring = self._create_offspring(agents,parent,n_layers,p_mut)\n new_agents.append(offspring)\n return new_agents, exploration_size", "def render_episode(env, policy):\n\n episode_reward = 0\n ob = env.reset()\n for t in range(100):\n env.render()\n time.sleep(0.5) \n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n assert done\n env.render()\n print(\"Episode reward: %f\" % episode_reward)", "def retrieve_capabilities(self, url, urlchain=[], pool=None, identity=None):\n\n # detect loops in capability links\n if url in urlchain:\n return\n\n if not self._default_url:\n self.set_default_url(url)\n\n if isinstance(url, str):\n url = urllib3.util.parse_url(url)\n\n if identity is None:\n identity = self._tls_state.extract_peer_identity(url)\n\n if pool is None:\n if url.host is not None:\n pool = self._tls_state.pool_for(url.scheme, url.host, url.port)\n else:\n raise ValueError(\"HttpInitiatorClient capability retrieval missing connection pool\")\n\n if url.path is not None:\n path = url.path\n else:\n path = \"/\"\n res = pool.request('GET', path)\n\n if res.status == 200:\n ctype = res.getheader(\"Content-Type\")\n if ctype == \"application/x-mplane+json\":\n\n # Probably an envelope. Process the message.\n self.handle_message(\n mplane.model.parse_json(res.data.decode(\"utf-8\")), identity)\n elif ctype == \"text/html\":\n # Treat as a list of links to capability messages.\n parser = CrawlParser()\n parser.feed(res.data.decode(\"utf-8\"))\n parser.close()\n for capurl in parser.urls:\n self.retrieve_capabilities(url=capurl,\n urlchain=urlchain + [url],\n pool=pool, identity=identity)", "def render(env, path, policy = None, num_episodes = 1):\n\n if policy is None:\n \n def policy(state):\n return env.action_space.sample()\n \n else:\n \n policy.custom_load(path)\n \n state = env.reset()\n frames = []\n ep_num = 0 \n \n frames.append(Image.fromarray(env.render(mode = 'rgb_array')))\n env.render()\n \n while ep_num < num_episodes:\n action = policy.select_clipped_action(state, exploration_noise = 0)\n state, _, done, _ = env.step(action)\n frames.append(Image.fromarray(env.render(mode = 'rgb_array')))\n env.render()\n\n if done:\n with open(path + 'test_' + str(ep_num) + '.gif', 'wb') as fp:\n im = Image.new('RGB', frames[0].size)\n im.save(fp, save_all = True, append_images = frames, duration = 100, loop = 1)\n ep_num += 1 \n \n state = env.reset()\n frames = []\n \n frames.append(Image.fromarray(env.render(mode = 'rgb_array')))\n env.render()", "def run_episode(env, policy, experience_observers=None, test=False):\n # Optimization: rendering takes a lot of time.\n def maybe_render(env, action, reward, timestep):\n if test:\n render = env.render()\n render.write_text(\"Action: {}\".format(str(action)))\n render.write_text(\"Reward: {}\".format(reward))\n render.write_text(\"Timestep: {}\".format(timestep))\n return render\n return None\n\n if experience_observers is None:\n experience_observers = []\n\n episode = []\n state = env.reset()\n timestep = 0\n renders = [maybe_render(env, None, 0, timestep)]\n hidden_state = None\n while True:\n action, next_hidden_state = policy.act(\n state, hidden_state, test=test)\n next_state, reward, done, info = env.step(action)\n timestep += 1\n renders.append(\n maybe_render(env, grid.Action(action), reward, timestep))\n experience = rl.Experience(\n state, action, reward, next_state, done, info, hidden_state,\n next_hidden_state)\n episode.append(experience)\n for observer in experience_observers:\n observer(experience)\n\n state = next_state\n hidden_state = next_hidden_state\n if done:\n return episode, renders", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def collect(self) -> int:\n self.get_logger().info('Generating experiences...')\n # Decay epsilon if requred\n self._wp.decay_epsilon(self.episode, 0.001)\n\n # Generate experiences\n trajectory_length, total_reward = self.step()\n self.publish(total_reward)\n self.flag.shift_to_compute_cycle()\n\n # Total reward section\n self._total_reward += total_reward\n if (self.episode + 1) % self._update == 0:\n self.get_logger().error(\n f'Expected Reward: {self._total_reward / self._update}')\n self._total_reward = 0\n\n return trajectory_length", "def evaluate(agent, env, n_games=1):\n # env.render()\n game_rewards = []\n for _ in range(n_games):\n states = env.reset()\n\n total_reward = 0\n i = 0\n while True:\n i += 1\n actions = agent.sample_actions(agent.step(states))\n states, rewards, dones, infos = env.step(actions)\n total_reward += sum(rewards)\n if dones[0]:\n break\n\n # We rescale the reward back to ensure compatibility\n # with other evaluations.\n game_rewards.append(total_reward / env.num_envs)\n # env.render('disable')\n return game_rewards", "def __init__(self, env_config, test_mode=False, render_mode='2d', verbose=False):\n\n self.test_mode = test_mode\n self.render_mode = render_mode\n self.verbose = verbose\n self.config = env_config\n\n # Setting dimension of observation vector\n self.n_observations = len(Vessel.NAVIGATION_FEATURES) + 3*self.config[\"n_sectors\"] + ColavRewarder.N_INSIGHTS\n\n self.episode = 0\n self.total_t_steps = 0\n self.t_step = 0\n self.history = []\n\n\n # Declaring attributes\n #self.obstacles = None\n self.main_vessel = None\n #self.agent = None\n\n #self.path = None\n\n self.reached_goal = None\n self.collision = None\n self.progress = None\n self.cumulative_reward = None\n self.last_reward = None\n self.last_episode = None\n self.rng = None\n self._tmp_storage = None\n\n self._action_space = gym.spaces.Box(\n low=np.array([-1, -1]),\n high=np.array([1, 1]),\n dtype=np.float32\n )\n self._observation_space = gym.spaces.Box(\n low=np.array([-1]*self.n_observations),\n high=np.array([1]*self.n_observations),\n dtype=np.float32\n )\n\n # Initializing rendering\n self._viewer2d = None\n self._viewer3d = None\n if self.render_mode == '2d' or self.render_mode == 'both':\n render2d.init_env_viewer(self)\n if self.render_mode == '3d' or self.render_mode == 'both':\n render3d.init_env_viewer(self, autocamera=self.config[\"autocamera3d\"])\n\n # self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1589625657ppo/6547288.pkl')\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1590746004ppo/2927552.pkl')\n # self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1590827849ppo/4070808.pkl')\n #'C:/Users/amalih/OneDrive - NTNU/github/logs/agents/MultiAgentPPO-v0/1064190.pkl'\n\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1590705511ppo/4425456.pkl')\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/gym-auv-master/logs/agents/MovingObstacles-v0/1589130704ppo/6916896.pkl')\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/gym-auv-master/logs/agents/MovingObstacles-v0/1589031909ppo/1760568.pkl')\n self.agent = PPO2.load('C:/Users/amalih/OneDrive - NTNU/github/logs/agents/MultiAgentPPO-v0/1591171914ppo/79288.pkl')\n\n self.rewarder_dict = {}\n\n self.reset()\n print('Init done')", "def start(self) -> UnityEnvironment:\n self.env = self.get_env(self.file)\n time.sleep(2)\n self.brain_name = self.env.brain_names[0]\n\n brain = self.env.brains[self.brain_name]\n self.action_size = brain.vector_action_space_size\n env_info = self.env.reset(train_mode=True)[self.brain_name]\n states = env_info.vector_observations\n self.n_agents, self.state_size = states.shape\n return self.env", "def remember(self, experience):\n self.memory.append(experience)", "def _env_runner(env, policy, num_local_steps, horizon, obs_filter):\n last_observation = obs_filter(env.reset())\n try:\n horizon = horizon if horizon else env.spec.tags.get(\n \"wrapper_config.TimeLimit.max_episode_steps\")\n except Exception:\n print(\"Warning, no horizon specified, assuming infinite\")\n if not horizon:\n horizon = 999999\n if hasattr(policy, \"get_initial_features\"):\n last_features = policy.get_initial_features()\n else:\n last_features = []\n features = last_features\n length = 0\n rewards = 0\n rollout_number = 0\n\n while True:\n terminal_end = False\n rollout = PartialRollout(extra_fields=policy.other_output)\n\n for _ in range(num_local_steps):\n action, pi_info = policy.compute(last_observation, *last_features)\n if policy.is_recurrent:\n features = pi_info[\"features\"]\n del pi_info[\"features\"]\n observation, reward, terminal, info = env.step(action)\n observation = obs_filter(observation)\n\n length += 1\n rewards += reward\n if length >= horizon:\n terminal = True\n\n # Concatenate multiagent actions\n if isinstance(action, list):\n action = np.concatenate(action, axis=0).flatten()\n\n # Collect the experience.\n rollout.add(observations=last_observation,\n actions=action,\n rewards=reward,\n terminal=terminal,\n features=last_features,\n **pi_info)\n\n last_observation = observation\n last_features = features\n\n if terminal:\n terminal_end = True\n yield CompletedRollout(length, rewards)\n\n if (length >= horizon or\n not env.metadata.get(\"semantics.autoreset\")):\n last_observation = obs_filter(env.reset())\n if hasattr(policy, \"get_initial_features\"):\n last_features = policy.get_initial_features()\n else:\n last_features = []\n rollout_number += 1\n length = 0\n rewards = 0\n break\n\n if not terminal_end:\n rollout.last_r = policy.value(last_observation, *last_features)\n\n # Once we have enough experience, yield it, and have the ThreadRunner\n # place it on a queue.\n yield rollout", "def run_episode(env, policy, gamma=1.0, render = False):\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(int(policy[obs]))\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n #print(total_reward)\n return total_reward", "def run_episode(env, policy, GAMMA=1.0, render=False):\r\n obs= env.reset()\r\n total_reward = 0\r\n step_idx = 0\r\n while True:\r\n if render:\r\n env.render()\r\n obs, reward, done, _ = env.step(policy[obs])\r\n total_reward += ((GAMMA ** step_idx)*reward)\r\n step_idx +=1\r\n if done:\r\n break\r\n return total_reward", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def run(self, agent_host, test_knowledge):\n\n total_reward = 0\n current_r = 0\n tol = 0.01\n\n self.drawQ_reward_history = defaultdict(str)\n self.state = None\n self.action = None\n self.next_state = None\n \n for i in range(self.movement_memory*9): #9 for vision radius\n self.moves_temp.append(0)\n self.moves = self.one_hot(torch.tensor(self.moves_temp), len(self.block_list)).flatten()\n # wait for a valid observation\n world_state = agent_host.peekWorldState()\n while world_state.is_mission_running and all(e.text == '{}' for e in world_state.observations):\n world_state = agent_host.peekWorldState()\n # wait for a frame to arrive after that\n num_frames_seen = world_state.number_of_video_frames_since_last_state\n while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:\n world_state = agent_host.peekWorldState()\n world_state = agent_host.getWorldState()\n for err in world_state.errors:\n print(err)\n\n if not world_state.is_mission_running:\n return 0 # mission already ended\n\n assert len(world_state.video_frames) > 0, 'No video frames!?'\n\n obs = json.loads(world_state.observations[-1].text)\n prev_x = obs[u'XPos']\n prev_z = obs[u'ZPos']\n print('Initial position:', prev_x, ',', prev_z)\n\n self.drawQ_reward_history[str(prev_x)+','+str(prev_z)] = 0.0\n\n if save_images:\n # save the frame, for debugging\n frame = world_state.video_frames[-1]\n image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels))\n iFrame = 0\n self.rep = self.rep + 1\n image.save('rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '.png')\n\n # take first action\n require_move = True\n check_expected_position = True\n\n count = 0\n\n # main loop:\n while world_state.is_mission_running:\n\n state, action = self.act(world_state, agent_host)\n #input(\"Press Enter to continue...\")\n # wait for the position to have changed and a reward received\n print('Waiting for data...', end=' ')\n while True:\n world_state = agent_host.peekWorldState()\n if not world_state.is_mission_running:\n print('mission ended.')\n break\n if len(world_state.rewards) > 0 and not all(e.text == '{}' for e in world_state.observations):\n obs = json.loads(world_state.observations[-1].text)\n curr_x = obs[u'XPos']\n curr_z = obs[u'ZPos']\n if require_move:\n if math.hypot(curr_x - prev_x, curr_z - prev_z) > tol:\n print('received.')\n break\n else:\n print('received.')\n break\n # wait for a frame to arrive after that\n num_frames_seen = world_state.number_of_video_frames_since_last_state\n while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:\n world_state = agent_host.peekWorldState()\n\n num_frames_before_get = len(world_state.video_frames)\n\n world_state = agent_host.getWorldState()\n for err in world_state.errors:\n print(err)\n current_r += sum(r.getValue() for r in world_state.rewards)\n\n if save_images:\n # save the frame, for debugging\n if world_state.is_mission_running:\n assert len(world_state.video_frames) > 0, 'No video frames!?'\n frame = world_state.video_frames[-1]\n image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels))\n iFrame = iFrame + 1\n image.save('rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '_after_' +\n self.actions[self.prev_a] + '.png')\n\n if world_state.is_mission_running:\n assert len(world_state.video_frames) > 0, 'No video frames!?'\n num_frames_after_get = len(world_state.video_frames)\n assert num_frames_after_get >= num_frames_before_get, 'Fewer frames after getWorldState!?'\n frame = world_state.video_frames[-1]\n obs = json.loads(world_state.observations[-1].text)\n curr_x = obs[u'XPos']\n curr_z = obs[u'ZPos']\n print('New position from observation:', curr_x, ',', curr_z, 'after action:', self.actions[self.prev_a],\n end=' ') # NSWE\n\n if check_expected_position:\n expected_x = prev_x + [0, 0, -1, 1][self.prev_a]\n expected_z = prev_z + [-1, 1, 0, 0][self.prev_a]\n if math.hypot(curr_x - expected_x, curr_z - expected_z) > tol:\n print(' - ERROR DETECTED! Expected:', expected_x, ',', expected_z)\n input(\"Press Enter to continue...\")\n else:\n print('as expected.')\n curr_x_from_render = frame.xPos\n curr_z_from_render = frame.zPos\n print('New position from render:', curr_x_from_render, ',', curr_z_from_render, 'after action:',\n self.actions[self.prev_a], end=' ') # NSWE\n if math.hypot(curr_x_from_render - expected_x, curr_z_from_render - expected_z) > tol:\n print(' - ERROR DETECTED! Expected:', expected_x, ',', expected_z)\n input(\"Press Enter to continue...\")\n else:\n print('as expected.')\n else:\n print()\n #input(\"Press Enter to continue...\")\n\n vision = obs['vision']\n encode = encode_observations(vision)\n\n emb = self.one_hot(torch.tensor(encode), len(self.block_list))\n next_state = torch.cat((emb.flatten(), self.moves))\n prev_x = curr_x\n prev_z = curr_z\n\n\n # place move into memory and update NN if necessary\n total_reward += current_r\n\n agent.step(state, action, total_reward, next_state)\n\n # save the total reward in the drawQ_reward_history dictionary\n # for drawQ\n self.drawQ_reward_history[str(curr_x)+\",\"+str(curr_z)] = total_reward\n\n ### SPECIAL ###\n # Here, we can replace our current spot with a normal block\n # to indicate that the item has been picked up.\n# print(current_r)\n if obs[u'vision'][floor(len(obs[u'vision']) / 2)] == \"grass\":\n temp = prev_x, prev_z\n result = \"chat /fill \" + str(temp[0]) + \" 45 \" + str(temp[1]) + \" \" + str(temp[0]) + \" 45 \" + str(\n temp[1]) \\\n + \" minecraft:sandstone 0 replace minecraft:grass\"\n agent_host.sendCommand(result)\n\n ### END ###\n\n current_r = 0\n\n count += 1\n\n\n\n # process final reward\n self.logger.debug(\"Final reward: %d\" % current_r)\n print(\"Final reward: %d\" % current_r)\n total_reward += current_r\n\n x = obs[u'XPos']\n z = obs[u'ZPos']\n\n action = int(action)\n if action == 0:\n z -= 1\n elif action == 1:\n z += 1\n elif action == 2:\n x -= 1\n elif action == 3:\n x += 1\n\n obs[u'XPos'] = x\n obs[u'ZPos'] = z\n state, action = self.act(world_state, agent_host, old_obs=obs)\n agent.step(state, action, total_reward, state)\n self.drawQ_reward_history[str(obs[u'XPos']) + \",\" + str(obs[u'ZPos'])] = total_reward\n\n # update epsilon for next run but don't let epsilon get below 0.01\n if self.epsilon > 0.01 or True: #override for test\n self.epsilon -= self.epsilon_decay\n if self.epsilon < 0:\n self.epsilon = 0\n\n # stochastic means we must lower alpha to 0 over time\n #if self.learning_rate > 0:\n # self.learning_rate -= 0.00003\n #if self.learning_rate < 0:\n # self.learning_rate = 0\n\n #suggestion: raise gamma over time\n #if self.gamma < 0.99:\n # self.gamma += 0.00003\n \n print()\n print('updated epsilon: ', self.epsilon)\n print('updated alpha: ', self.learning_rate)\n print('updated gamma: ', self.gamma)\n print()\n\n self.drawQ(curr_x = int(obs[u'XPos']), curr_y = int(obs[u'ZPos']))\n\n return total_reward", "def initiate_agent(self, env):\n from keras import Sequential\n from keras.optimizers import Adam\n from keras.layers import Dense, Dropout\n from rl.memory import SequentialMemory\n from rl.agents import DQNAgent\n\n self.env = env\n\n nb_actions = self.env.action_space.n\n\n model = Sequential()\n model.add(Dense(512, activation='relu', input_shape=env.observation_space))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(nb_actions, activation='linear'))\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n # even the metrics!\n memory = SequentialMemory(limit=memory_limit, window_length=window_length)\n policy = TrumpPolicy()\n from rl.core import Processor\n\n class CustomProcessor(Processor):\n \"\"\"he agent and the environment\"\"\"\n\n def process_state_batch(self, batch):\n \"\"\"\n Given a state batch, I want to remove the second dimension, because it's\n useless and prevents me from feeding the tensor into my CNN\n \"\"\"\n return np.squeeze(batch, axis=1)\n\n def process_info(self, info):\n processed_info = info['player_data']\n if 'stack' in processed_info:\n processed_info = {'x': 1}\n return processed_info\n\n nb_actions = env.action_space.n\n\n self.dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=nb_steps_warmup,\n target_model_update=1e-2, policy=policy,\n processor=CustomProcessor(),\n batch_size=batch_size, train_interval=train_interval, enable_double_dqn=enable_double_dqn)\n self.dqn.compile(Adam(lr=1e-3), metrics=['mae'])", "def interact(env, agent, num_episodes=30, window=1):\r\n # initialize average rewards\r\n average_reward_per_100_episodes = []\r\n best_average_reward_per_100_episodes = []\r\n avg_rewards = deque(maxlen=num_episodes)\r\n # initialize best average reward\r\n best_avg_reward = -math.inf\r\n # initialize monitor for most recent rewards\r\n samp_rewards = deque(maxlen=window)\r\n\r\n\r\n answer = input(\"Load QTable? (y/n) ?\")\r\n if answer == \"y\":\r\n # Do this.\r\n an = input(\"Name ?\")\r\n agent.q_table.readQ(an)\r\n agent.q_table.readStateList(an)\r\n agent.observation_space = np.size(agent.q_table.q, 0)\r\n agent.q_table.stateCount = len(agent.q_table.stateList)\r\n #print(\"files not found\")\r\n elif answer == \"n\":\r\n # Do that.\r\n pass\r\n else:\r\n print(\"Please enter y or n\")\r\n\r\n start_time = time.time()\r\n # for each episode\r\n for i_episode in range(1, num_episodes+1):\r\n # begin the episode\r\n state_array,state, state_with_act = env.reset()\r\n #if state <= agent.q_table.observation_space -1:\r\n #print('next state is in q table')\r\n y = agent.q_table.addStateList(state_with_act[state][4], state_with_act[state][1], state_with_act[state][2])\r\n if y != -1:\r\n print (\"state is in the stateList\")\r\n else : \r\n #agent.addState(state)\r\n y= agent.q_table.stateCount\r\n # initialize the sampled reward\r\n samp_reward = 0\r\n while True:\r\n #print(\"new action starts\")\r\n # agent selects an action\r\n action = agent.select_action(y)\r\n # agent performs the selected action\r\n next_state, reward,done, indices, stateENV, action_count = env.step(action)\r\n time.sleep(0.3)\r\n #print(\"indices =\")\r\n #print(indices)\r\n # agent performs internal updates based on sampled experience\r\n \r\n #if next_state <= agent.q_table.observation_space -1:\r\n #print('next state is in q table')\r\n x = agent.q_table.addStateList(stateENV[next_state][4], stateENV[next_state][1], stateENV[next_state][2])\r\n if x != -1:\r\n print (\"state is in the stateList\")\r\n else : \r\n #agent.addState(state)\r\n x= agent.q_table.stateCount\r\n #print (\"next state =\"+str(next_state))\r\n #print (\"observation_space\" + str(agent.q_table.observation_space-1))\r\n #agent.step(state, action, reward, next_state, indices)\r\n agent.step(y, action, reward, x, indices)\r\n #print('stateList = ')\r\n #print(agent.q_table.stateList)\r\n # update the sampled reward\r\n samp_reward += reward\r\n # update the state (s <- s') to next time step\r\n y = x\r\n agent.update_epsilon()\r\n if done:\r\n # save final sampled reward\r\n samp_rewards.append(samp_reward)\r\n break\r\n \r\n lenStateCOunt = len(agent.q_table.stateList)\r\n time_end = time.time()\r\n saveEpisodeResult(start_time,time_end,i_episode, lenStateCOunt)\r\n\r\n\r\n \r\n\r\n if (i_episode >= 100):\r\n # get average reward from last 100 episodes\r\n avg_reward = np.mean(samp_rewards)\r\n # append to deque\r\n avg_rewards.append(avg_reward)\r\n # update best average reward\r\n print('episode average reward {}'.format(avg_reward))\r\n average_reward_per_100_episodes.append(avg_reward)\r\n best_average_reward_per_100_episodes.append(best_avg_reward)\r\n if avg_reward > best_avg_reward:\r\n best_avg_reward = avg_reward\r\n print (\"State with activities =\")\r\n for key,value in stateENV.items():\r\n print(str(key) + '. ')\r\n print(str(stateENV[key][0]) + ', ' + str(stateENV[key][1])+ ', ' + str(stateENV[key][2]) + ', ' + str(stateENV[key][4]))\r\n \r\n # monitor progress\r\n print(\"\\rEpisode {}/{} || Best average reward {} || eps {} \".format(i_episode, num_episodes, best_avg_reward, agent.epsilon), end=\"\")\r\n sys.stdout.flush()\r\n \r\n # check if task is solved (according to OpenAI Gym)\r\n if best_avg_reward >= 9.7:\r\n print('\\nEnvironment solved in {} episodes.'.format(i_episode), end=\"\")\r\n agent.q_table.saveQ(best_avg_reward,appName)\r\n agent.q_table.saveStateList(appName)\r\n print(\"width = \"+ str(action_count))\r\n saveEpToCSV()\r\n save_rewards_csv(average_reward_per_100_episodes, best_average_reward_per_100_episodes)\r\n break\r\n if i_episode == num_episodes: \r\n agent.q_table.saveQ(best_avg_reward,appName)\r\n agent.q_table.saveStateList(appName)\r\n print(\"width = \"+ str(action_count))\r\n saveEpToCSV()\r\n save_rewards_csv(average_reward_per_100_episodes, best_average_reward_per_100_episodes)\r\n print('\\n')\r\n\r\n end_time = time.time()\r\n time_lapsed = end_time - start_time\r\n time_convert(time_lapsed)\r\n return avg_rewards, best_avg_reward\r\n print(\"width = \"+ str(action_count))", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def render_single(env, policy, render=False, n_episodes=100):\n total_rewards = 0\n\n for _ in range(n_episodes):\n ob = env.reset() # initialize the episode\n done = False\n while not done:\n if render:\n env.render() # render the game\n action_id = np.argmax(policy[ob, :])\n ob, reward, done, info = env.step(action_id)\n total_rewards += reward\n if done:\n break\n return total_rewards", "def get_experience(self):\n return self.experience_set.all()", "def run_episode(env, policy, T=5000, render=False):\n obs = env.reset()\n total_reward = 0\n for t in range(T):\n if render:\n env.render()\n action = policy_to_action(policy, obs)\n obs, reward, done, _ = env.step(action)\n total_reward += reward\n if done:\n break\n return total_reward", "def make_atari_RAM(env, num_frames, device, action_stack=False,max_frames=int(108e3)):\n\n # env.env.frame_skip = (0,1)\n #exit()\n\n print(\"MAX FRAMES:\",max_frames)\n\n env = AtariSkips(CombineRamPixel(env),max_frames)\n env = MaxAndSkipEnv(env, 4)\n env = FrameStack(env, num_frames, device)\n env.reset()\n # env = TorchTensorObservation(env, device)\n return env", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass", "def render_single(env, policy, seed_feed=99, if_render=True, iter_tot=100):\n\n episode_reward = 0\n ob_list = []\n ob = env.reset()\n ob_list.append(ob)\n env.seed(seed_feed)\n for t in range(iter_tot):\n if if_render:\n env.render()\n time.sleep(0.5) # Seconds between frames. Modify as you wish.\n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n ob_list.append(ob)\n episode_reward += rew\n if done:\n break\n # assert done\n if if_render:\n env.render()\n print(\"Episode cost: %f\" % episode_reward)\n return episode_reward, ob_list", "def __init__(self, buffer_size, batch_size):\n # Internal memory\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\",\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])", "def collect_episode(env, policy, buffer, render_option):\n state = env.reset()\n done = False\n while not done:\n if render_option == 'collect':\n env.render()\n action = policy(state)\n next_state, reward, done, _ = env.step(action)\n if done:\n reward = -1.0\n buffer.record(state, reward, next_state, action, done)\n state = next_state", "def run_episode(env, policy, gamma = 1.0, render = False):\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(int(policy[obs]))\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n return total_reward", "def __init__(self, state_size, action_size, seed, batch_size=BATCH_SIZE, \n train_mode=True, create_model=None, double_dqn=False):\n if create_model:\n local_model = create_model(state_size, action_size, seed)\n target_model = create_model(state_size, action_size, seed)\n else:\n local_model = QNetwork(state_size, action_size, seed)\n target_model = QNetwork(state_size, action_size, seed)\n \n super(AgentExperienceReplay, self).__init__(state_size, \n action_size, \n seed,\n local_model,\n target_model,\n batch_size=batch_size,\n train_mode=train_mode,\n double_dqn=double_dqn)\n\n\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, self.batch_size, seed)", "def __init__(self, buffer_size, batch_size):\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\"])", "def sample_episode(env, policy):\n states = []\n actions = []\n rewards = []\n dones = []\n \n # YOUR CODE HERE\n done = False\n state = env.reset() # Could also use env._get_obs(), but codegrade seems to expect this\n while done == False:\n states.append(state)\n \n action = policy.sample_action(state)\n actions.append(action)\n \n state, reward, done, _ = env.step(action)\n \n rewards.append(reward)\n dones.append(done)\n\n return states, actions, rewards, dones", "def build_replay_buffer(agent, batch_size, steps_per_loop):\n buf = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.policy.trajectory_spec,\n batch_size=batch_size,\n max_length=steps_per_loop)\n return buf", "def step(self, states, actions, rewards, next_states, dones):\n \n states = states.reshape(1, -1)\n next_states = next_states.reshape(1, -1)\n self.memory.add(states, actions, rewards, next_states, dones)\n\n # for each agent, sample experiences from the shared buffer and learn\n if len(self.memory) > self.batch_size:\n experiences = [self.memory.sample() for _ in range(self.n_agents)]\n self.learn(experiences, self.gamma)", "def __init__(self, env, skip=4, blend=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((blend,) + env.observation_space.shape, dtype=env.observation_space.dtype)\n self._skip = skip\n self._blend = blend", "def RunEpisode(env, policy, eps):\n\n obs = env.reset()\n memory = []\n R = 0\n for t in range(1000):\n action = policy(obs.astype('float32').reshape(1, 4))[0]\n# pdb.set_trace()\n r = np.random.rand()\n if r<eps:\n action = np.random.random_integers(0,1, ()).tolist()\n\n new_obs, reward, done, info = env.step(action)\n memory.append((obs, action, new_obs, reward, done))\n obs = new_obs\n if done:\n break\n\n return memory", "def experiment(config):\n with tf.Session() as sess:\n\n seed = config.pop('seed')\n\n if seed:\n seed = int(seed)\n random.seed(seed)\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n env_id = config.pop('env_id')\n LOGGER.info('using {} env'.format(env_id))\n\n env = gym.make(env_id)\n\n global_rewards = []\n global_step, episode = 0, 0\n\n config['env'] = env\n config['env_repr'] = repr(env)\n config['sess'] = sess\n\n render = int(config.pop('render'))\n\n agent = Agent(**config)\n\n rl_writer = tf.summary.FileWriter('./results/rl')\n save_args(config, 'results/args.txt')\n\n while global_step < config['total_steps']:\n episode += 1\n done = False\n rewards, actions = [], []\n observation = env.reset()\n\n while not done:\n global_step += 1\n\n # if episode % 1 == render:\n env.render()\n action = agent.act(observation)\n next_observation, reward, done, info = env.step(action)\n agent.remember(observation, action, reward, next_observation, done)\n train_info = agent.learn()\n\n rewards.append(reward)\n actions.append(action)\n observation = next_observation\n\n ep_rew = sum(rewards)\n global_rewards.append(ep_rew)\n avg_reward = sum(global_rewards[-100:]) / len(global_rewards[-100:])\n\n if episode % 10 == 0:\n log_str =' step {:.0f} ep {:.0f} reward {:.1f} avg {:.1f}'\n logging.info(log_str.format(global_step,\n episode,\n ep_rew,\n avg_reward))\n\n summary = tf.Summary(value=[tf.Summary.Value(tag='episode_reward',\n simple_value=ep_rew)])\n rl_writer.add_summary(summary, episode)\n avg_sum = tf.Summary(value=[tf.Summary.Value(tag='avg_last_100_ep',\n simple_value=avg_reward)])\n rl_writer.add_summary(avg_sum, episode)\n rl_writer.flush()\n \n return config", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)", "def add_new_experience(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done, self.max_priority)\n self.memory.append(e)", "def __init__(self, buffer_size, batch_size):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.episode = 0", "def __init__(self, venv, policy, dims, logger, active, T, rollout_batch_size=1,\n exploit=False, use_target_net=False, compute_Q=False, noise_eps=0,\n random_eps=0, history_len=100, render=False, monitor=False,\n exploration='eps_greedy', compute_root_Q=False, **kwargs):\n\n assert self.T > 0\n\n self.info_keys = [key.replace('info_', '') for key in dims.keys() if key.startswith('info_')]\n\n self.success_history = deque(maxlen=history_len)\n self.hit_time_mean_history = deque(maxlen=history_len)\n self.hit_time_std_history = deque(maxlen=history_len)\n self.Q_history = deque(maxlen=history_len)\n\n self.n_episodes = 0\n self.reset_all_rollouts()\n self.clear_history()", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):\n\n info = new_info[self.brain_name]\n for l in range(len(info.agents)):\n agent_actions = self.training_buffer[info.agents[l]]['actions']\n if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])\n and len(agent_actions) > 0):\n agent_id = info.agents[l]\n if info.local_done[l] and not info.max_reached[l]:\n value_next = 0.0\n else:\n if info.max_reached[l]:\n bootstrapping_info = self.training_buffer[agent_id].last_brain_info\n idx = bootstrapping_info.agents.index(agent_id)\n else:\n bootstrapping_info = info\n idx = l\n value_next = self.policy.get_value_estimate(bootstrapping_info, idx)\n\n self.training_buffer[agent_id]['advantages'].set(\n get_gae(\n rewards=self.training_buffer[agent_id]['rewards'].get_batch(),\n value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),\n value_next=value_next,\n gamma=self.trainer_parameters['gamma'],\n lambd=self.trainer_parameters['lambd']))\n self.training_buffer[agent_id]['discounted_returns'].set(\n self.training_buffer[agent_id]['advantages'].get_batch()\n + self.training_buffer[agent_id]['value_estimates'].get_batch())\n\n self.training_buffer.append_update_buffer(agent_id, batch_size=None,\n training_length=self.policy.sequence_length)\n\n self.training_buffer[agent_id].reset_agent()\n if info.local_done[l]:\n self.stats['cumulative_reward'].append(\n self.cumulative_rewards.get(agent_id, 0))\n self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))\n self.stats['episode_length'].append(\n self.episode_steps.get(agent_id, 0))\n self.cumulative_rewards[agent_id] = 0\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n self.stats['intrinsic_reward'].append(\n self.intrinsic_rewards.get(agent_id, 0))\n self.intrinsic_rewards[agent_id] = 0", "def render_single(env, policy, max_steps=100):\n\n episode_reward = 0\n ob = env.reset()\n for t in range(max_steps):\n env.render()\n time.sleep(0.25)\n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n env.render();\n if not done:\n print(\"The agent didn't reach a terminal state in {} steps.\".format(max_steps))\n else:\n \tprint(\"Episode reward: %f\" % episode_reward)", "def render_single(env, policy, max_steps=100):\n\n episode_reward = 0\n ob = env.reset()\n for t in range(max_steps):\n env.render()\n time.sleep(0.25)\n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n env.render();\n if not done:\n print(\"The agent didn't reach a terminal state in {} steps.\".format(max_steps))\n else:\n \tprint(\"Episode reward: %f\" % episode_reward)", "def run_policy(self, policy, max_steps=1000, render_during=False):\n policy = np.argmax(policy, axis=1)\n\n rewards = []\n\n # Clone the environment to get a fresh one\n env = self.get_environment().new_instance()\n state = env.reset()\n\n done = False\n steps = 0\n while not done and steps < max_steps:\n if render_during:\n env.render()\n\n action = policy[state]\n state, reward, done, info = env.step(action)\n rewards.append(reward)\n steps += 1\n\n if render_during:\n env.render()\n\n return np.array(rewards)", "async def sequence(self, frames: Union[int, np.ndarray]):\r\n await self.configure_acquisition(frames)\r\n\r\n # prepare the buffer\r\n if isinstance(frames, np.ndarray):\r\n n_frames = frames.shape[0]\r\n logger.info(f\"acquire {n_frames} frames to user buffer\")\r\n else:\r\n n_frames = int(frames)\r\n frames = np.empty((n_frames,) + self.buffer.shape, dtype=self.buffer.dtype)\r\n logger.info(f\"requested {n_frames} frames\")\r\n\r\n self.start_acquisition()\r\n for i in range(n_frames):\r\n yield await self.get_image(mode=BufferRetrieveMode.Next, out=frames[i, ...])\r\n self.stop_acquisition()\r\n\r\n await self.unconfigure_acquisition()", "def __init__(self, observation_space, action_space, config, unsupType='action', envWrap=False, designHead='universe', noReward=False):\n self.unsup = unsupType is not None\n self.cur_batch = None\n\n predictor = None\n numaction = action_space.n\n\n config = dict(ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG, **config)\n self.config = config\n self.sess = tf.get_default_session()\n\n # Setup the policy\n # =====================================================================\n self.observations = tf.placeholder(tf.float32, [None] + list(observation_space.shape))\n dist_class, logit_dim = ModelCatalog.get_action_dist(action_space, self.config[\"model\"])\n\n # NOTE: value function and trainable variables are defined in self.model\n # Define the policy network\n self.model = pi = ModelCatalog.get_model(self.observations, logit_dim, self.config[\"model\"])\n action_dist = dist_class(self.model.outputs)\n\n # Define S/S+A predictor network\n if self.unsup:\n with tf.variable_scope(\"predictor\"):\n if 'state' in unsupType:\n self.local_ap_network = predictor = StatePredictor(observation_space.shape, numaction, designHead, unsupType)\n else:\n self.local_ap_network = predictor = StateActionPredictor(observation_space.shape, numaction, designHead)\n\n # Setup the policy loss\n # =====================================================================\n if isinstance(action_space, gym.spaces.Box):\n ac_size = action_space.shape[0]\n actions = tf.placeholder(tf.float32, [None, ac_size], name=\"ac\")\n elif isinstance(action_space, gym.spaces.Discrete):\n actions = tf.placeholder(tf.int64, [None], name=\"ac\")\n else:\n raise UnsupportedSpaceException(\n \"Action space {} is not supported for A3C.\".format(\n action_space))\n advantages = tf.placeholder(tf.float32, [None], name=\"advantages\")\n self.v_target = tf.placeholder(tf.float32, [None], name=\"v_target\")\n\n # compute policy loss and predictor loss\n self.loss = A3CLoss(action_dist, actions, advantages, self.v_target,\n self.model.vf, unsupType, predictor, self.config[\"vf_loss_coeff\"],\n self.config[\"entropy_coeff\"])\n\n # Initialize TFPolicyGraph\n loss_in = [\n (\"obs\", self.observations),\n (\"actions\", actions),\n (\"advantages\", advantages),\n (\"value_targets\", self.v_target),\n ]\n LearningRateSchedule.__init__(self, self.config[\"lr\"],\n self.config[\"lr_schedule\"])\n TFPolicyGraph.__init__(\n self,\n observation_space,\n action_space,\n self.sess,\n obs_input=self.observations,\n action_sampler=action_dist.sample(),\n loss=self.loss.total_loss,\n loss_inputs=loss_in,\n state_inputs=self.model.state_in,\n state_outputs=self.model.state_out,\n seq_lens=self.model.seq_lens,\n max_seq_len=self.config[\"model\"][\"max_seq_len\"])\n\n self.stats_fetches = {\n \"stats\": {\n \"cur_lr\": tf.cast(self.cur_lr, tf.float64),\n \"policy_loss\": self.loss.pi_loss,\n \"policy_entropy\": self.loss.entropy,\n \"grad_gnorm\": tf.global_norm(self._grads),\n \"var_gnorm\": tf.global_norm(self.model.var_list),\n \"vf_loss\": self.loss.vf_loss,\n \"vf_explained_var\": explained_variance(self.v_target, self.model.vf),\n },\n }\n\n self.sess.run(tf.global_variables_initializer())", "def nchain_extras(env, gamma=0.99):\n\n # How to handle <TimeLimit<______>> and other Wrappers?\n # assert isinstance(env, gym.envs.toy_text.nchain.NChainEnv)\n\n # Action constants\n A_FORWARD = 0\n A_BACKWARD = 1\n\n states = np.arange(env.observation_space.n)\n actions = np.arange(env.action_space.n)\n\n p0s = np.zeros(env.observation_space.n)\n p0s[0] = 1.0\n\n # Populate dynamics\n t_mat = np.zeros(\n (env.observation_space.n, env.action_space.n, env.observation_space.n)\n )\n\n # Backward action moves to 0th state if it doesn't fail, forward if it does\n t_mat[:, A_BACKWARD, 0] = 1.0 - env.slip\n for s1 in states:\n t_mat[s1, A_BACKWARD, min(s1 + 1, env.observation_space.n - 1)] = env.slip\n\n # Forward action moves to next state if it doesn't fail, 0th if it does\n for s1 in states:\n t_mat[s1, A_FORWARD, min(s1 + 1, env.observation_space.n - 1)] = 1.0 - env.slip\n t_mat[:, A_FORWARD, 0] = env.slip\n\n terminal_state_mask = np.zeros(env.observation_space.n)\n\n xtr = DiscreteExplicitExtras(\n states, actions, p0s, t_mat, terminal_state_mask, gamma=gamma,\n )\n\n phi = Indicator(Indicator.Type.OBSERVATION_ACTION, xtr)\n\n state_action_rewards = np.zeros((env.observation_space.n, env.action_space.n))\n state_action_rewards[:, A_BACKWARD] = env.small\n state_action_rewards[env.observation_space.n - 1, A_FORWARD] = env.large\n reward = Linear(state_action_rewards.flatten())\n\n return (xtr, phi, reward)", "def gen_obs(self):\n\n #grid, vis_mask = self.gen_obs_grid()\n\n # Encode the partially observable view into a numpy array\n image = self.grid.encode(self.agent_pos,self.drone_pos)\n\n #assert hasattr(self, 'mission'), \"environments must define a textual mission string\"\n\n # Observations are dictionaries containing:\n # - an image (partially observable view of the environment)\n # - the agent's direction/orientation (acting as a compass)\n # - a textual mission string (instructions for the agent)\n obs = {\n 'image': image,\n #'direction': self.agent_dir,\n 'mission': self.mission\n }\n obs=image\n #print(obs.shape)\n return self.render(mode='rgb_array')\n #return obs", "def render_single(env, policy, max_steps=100):\n\n episode_reward = 0\n ob = env.reset()\n for t in range(max_steps):\n env.render()\n time.sleep(0.25)\n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n env.render();\n if not done:\n print(\"The agent didn't reach a terminal state in {} steps.\".format(max_steps))\n else:\n print(\"Episode reward: %f\" % episode_reward)", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n \n # create the buffer of two frame sizes\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(\n self,\n env_spec,\n policy,\n qf,\n replay_buffer,\n use_target=False,\n discount=0.99,\n n_epoch_cycles=20,\n max_path_length=None,\n n_train_steps=50,\n buffer_batch_size=64,\n min_buffer_size=int(1e4),\n rollout_batch_size=1,\n reward_scale=1.,\n input_include_goal=False,\n smooth_return=True,\n exploration_strategy=None,\n ):\n self.env_spec = env_spec\n self.policy = policy\n self.qf = qf\n self.replay_buffer = replay_buffer\n self.n_epoch_cycles = n_epoch_cycles\n self.n_train_steps = n_train_steps\n self.buffer_batch_size = buffer_batch_size\n self.use_target = use_target\n self.discount = discount\n self.min_buffer_size = min_buffer_size\n self.rollout_batch_size = rollout_batch_size\n self.reward_scale = reward_scale\n self.evaluate = False\n self.input_include_goal = input_include_goal\n self.smooth_return = smooth_return\n self.max_path_length = max_path_length\n self.es = exploration_strategy\n self.init_opt()", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def _preprocess_experience(self):\n observed_inputs = []\n observed_reward = []\n predicted_outputs = []\n distance_from_reward = []\n next_state = []\n # process inputs and outputs to train the net\n for episode in self.examples:\n episode_match, example_reward = episode\n last_step = True\n for n, step in enumerate(reversed(episode_match)):\n this_state = state_from_hash(step.state_t)\n next_state.append(state_from_hash(step.action_t))\n observed_inputs.append(np.hstack((this_state,\n this_state != next_state[-1]))\n .flatten())\n distance_from_reward.append(n)\n # now we have to evaluate max_{s'}[Q(a',s')]\n # let's see all possible actions two steps ahead\n two_ahead = []\n for possible_action in self.state_space[step.action_t].actions:\n possible_action = state_from_hash(possible_action)\n two_ahead.append(np.hstack((next_state[-1],\n next_state[-1] != possible_action))\n .flatten())\n if not two_ahead:\n # if it's a terminal state, no two-ahead, so set the max to 0\n max_next_state = 0\n else:\n # evaluate Q on the two-ahead actions\n two_ahead = np.array(two_ahead)\n two_ahead[two_ahead == 2] = -1\n max_next_state = self.sess.run(\n self.output,\n feed_dict={self.input: two_ahead}).flatten()\n\n # calc the maximum\n max_next_state = np.max(max_next_state)\n predicted_outputs.append(max_next_state)\n if last_step:\n # because we start from last step, `last_step` will be true\n observed_reward.append(example_reward)\n # then set it to false so non-last steps get reward 0\n last_step = False\n else:\n observed_reward.append(0)\n # Q-network output from the inputs\n predicted_outputs = self.discount * np.vstack(predicted_outputs).flatten()\n observed_inputs = np.array(observed_inputs)\n # possible max value in a state is 2, set all 2's to -1's\n observed_inputs[observed_inputs == 2] = -1\n observed_reward = np.vstack(observed_reward).flatten()\n return observed_inputs, observed_reward, predicted_outputs, distance_from_reward", "def run_one_step(self):\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n if self.env.check_terminal() is False:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n self.env.update(action)\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n self.agent.update_history(copy.copy(latest_experience))\n self.count += 1\n # If the latest history has a large enough batch, perform an update\n # CHECK IF THIS IS THE RIGHT METHOD\n if self.count % self.batch_size == 0:\n self.agent.update_policy_ordered(max(1, self.batch_size))\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.count = 0\n self.agent.policy.learner.update_target_network()\n else:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n # Perform an update on all of the previous experiences that haven't been updated\n if self.count % self.batch_size > 0:\n self.agent.update_policy_ordered((self.count % self.batch_size) + 1)\n self.count = 0\n return reward", "def experience_replay(batch_size):\n memory = []\n while True:\n experience = yield rsample(memory, batch_size) if batch_size <= len(memory) else None\n memory.append(experience)", "def __init__(\n self, brain, reward_buff_cap, trainer_parameters, training, load, seed, run_id\n ):\n super().__init__(brain, trainer_parameters, training, run_id, reward_buff_cap)\n self.param_keys = [\n \"batch_size\",\n \"beta\",\n \"buffer_size\",\n \"epsilon\",\n \"hidden_units\",\n \"lambd\",\n \"learning_rate\",\n \"max_steps\",\n \"normalize\",\n \"num_epoch\",\n \"num_layers\",\n \"time_horizon\",\n \"sequence_length\",\n \"summary_freq\",\n \"use_recurrent\",\n \"summary_path\",\n \"memory_size\",\n \"model_path\",\n \"reward_signals\",\n ]\n self.check_param_keys()\n\n # Make sure we have at least one reward_signal\n if not self.trainer_parameters[\"reward_signals\"]:\n raise UnityTrainerException(\n \"No reward signals were defined. At least one must be used with {}.\".format(\n self.__class__.__name__\n )\n )\n\n self.step = 0\n self.policy = PPOPolicy(seed, brain, trainer_parameters, self.is_training, load)\n\n stats = defaultdict(list)\n # collected_rewards is a dictionary from name of reward signal to a dictionary of agent_id to cumulative reward\n # used for reporting only. We always want to report the environment reward to Tensorboard, regardless\n # of what reward signals are actually present.\n self.collected_rewards = {\"environment\": {}}\n for _reward_signal in self.policy.reward_signals.keys():\n self.collected_rewards[_reward_signal] = {}\n\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.episode_steps = {}", "def generate_episode(env, args, render=False, test_mode=False):\n episode = []\n state, done = env.reset(), False\n observations = transform_obs(env.get_all_observations())\n n_steps = 0\n\n for agent in env.agents: # for agents where it matters,\n agent.set_hidden_state() # set the init hidden state of the RNN\n\n while not done:\n unavailable_actions = env.get_unavailable_actions()\n \n # compute action, keep record of hidden state of the agents to store in experience\n actions, hidden, next_hidden = {}, [], []\n for idx, agent in enumerate(env.agents):\n hidden.append(agent.get_hidden_state())\n actions[agent] = agent.act(observations[idx, :], test_mode=test_mode)\n next_hidden.append(agent.get_hidden_state())\n\n if render:\n print(f\"Step {n_steps}\")\n env.render()\n print([action.name for action in actions.values()])\n\n next_state, rewards, done, _ = env.step(actions)\n next_obs = transform_obs(env.get_all_observations())\n \n # episodes that take long are not allowed and penalized for both agents\n n_steps += 1\n if n_steps > args.max_episode_length:\n done = True\n rewards = {'blue': -1, 'red': -1}\n\n actions = torch.tensor([action.id for action in actions.values()])\n unavail_actions = torch.zeros((args.n_agents, args.n_actions), dtype=torch.long)\n for idx, agent in enumerate(env.agents):\n act_ids = [act.id for act in unavailable_actions[agent]]\n unavail_actions[idx, act_ids] = 1.\n \n episode.append(Experience(transform_state(state), actions, rewards, \n transform_state(next_state), done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\"\n episode.append(Experience(None, actions, rewards, \n None, done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\" \n state = next_state\n observations = next_obs\n \n if render:\n print(f\"Game won by team {env.terminal(next_state)}\")\n return episode", "def _forwardImplementation(self, inbuf, outbuf):\n assert self.module\n \n values = self.module.getActionValues(self.state) \n n_values = self.n_values.getActionValues(self.state)\n values = map(lambda x, y: x + self.exploration * (sqrt(2 * log(self.experiment.stepid, 2) / y) if y > 0 else 1000), values, n_values);\n \n actions = []\n for i in range(self.shield_options):\n new_action = where(values == max(values))[0]\n new_action = choice(new_action) \n values[new_action] = -10000\n actions.append(new_action)\n \n while len(actions) < self.outdim:\n actions.append(-1)\n \n outbuf[:] = actions", "def reward_buffer(self):\n return self._reward_buffer", "def collect_samples(self):\n # TODO refactor this to not to duplicate collect from DDPG\n # - not so easy due to logger :(\n collected = 0\n while collected < self.acm_pre_train_samples:\n obs = self.env.reset()\n end = False\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = obs.unsqueeze(0)\n\n prev_idx = self.replay_buffer.add_obs(obs)\n ep_len = 0\n\n while not end:\n acm_action = AcMTrainer.initial_act(self, obs)\n self.replay_buffer.add_acm_action(acm_action)\n obs, rew, done, _ = self.env.step(acm_action)\n ep_len += 1\n\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = obs.unsqueeze(0)\n\n end = done\n done = False if ep_len == self.max_ep_len else done\n\n next_idx = self.replay_buffer.add_obs(obs)\n self.replay_buffer.add_timestep(prev_idx, next_idx, obs, rew, done, end)\n prev_idx = next_idx\n collected += 1", "def build_experiments(self):\n\n # width=500, height=350, pos_x= 2.0, pos_y=0.0, pos_z= 1.4, angle=-30.0\n cameraRGB = Camera('Camera', PostProcessing='SceneFinal')\n cameraRGB.set_image_size(500, 350)\n cameraRGB.set_position(2.0, 0.0, 1.4)\n cameraRGB.set_rotation(-30.0, 0.0, 0.)\n cameraRGB.set(FOV=100)\n\n camera = Camera('CameraSem', PostProcessing='SemanticSegmentation')\n camera.set_image_size(320, 180)\n camera.set_position(2.0, 0.0, 1.4)\n camera.set_rotation(-30.0, 0.0, 0.)\n camera.set(FOV=100)\n\n if self._city_name == 'Town01':\n poses_tasks = self._poses_town01()\n vehicles_tasks = []\n pedestrians_tasks = []\n for i in range(len(poses_tasks)):\n vehicles_tasks.append(0)\n pedestrians_tasks.append(0)\n\n experiment_vector = []\n\n for weather in self.weathers:\n\n for iteration in range(len(poses_tasks)):\n poses = poses_tasks[iteration]\n vehicles = vehicles_tasks[iteration]\n pedestrians = pedestrians_tasks[iteration]\n\n conditions = CarlaSettings()\n conditions.set(\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=vehicles,\n NumberOfPedestrians=pedestrians,\n WeatherId=weather,\n QualityLevel=1\n )\n\n conditions.set(SynchronousMode=True)\n conditions.set(DisableTwoWheeledVehicles=True)\n\n conditions.add_sensor(camera)\n conditions.add_sensor(cameraRGB)\n\n experiment = Experiment()\n experiment.set(\n Conditions=conditions,\n Poses=poses,\n Task=iteration,\n Repetitions=1\n )\n\n experiment_vector.append(experiment)\n\n return experiment_vector", "def __init__(self, buffer_size, batch_size, seed):\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experiences\", field_names=[\"state\", \"state_full\", \"action\", \"reward\",\n \"next_state\", \"next_state_full\", \"done\"])\n self.seed = random.seed(seed)", "def process_experiences(\n self, current_info: AllBrainInfo, new_info: AllBrainInfo\n ) -> None:\n info = new_info[self.brain_name]\n for l in range(len(info.agents)):\n agent_actions = self.training_buffer[info.agents[l]][\"actions\"]\n if (\n info.local_done[l]\n or len(agent_actions) > self.trainer_parameters[\"time_horizon\"]\n ) and len(agent_actions) > 0:\n agent_id = info.agents[l]\n if info.max_reached[l]:\n bootstrapping_info = self.training_buffer[agent_id].last_brain_info\n idx = bootstrapping_info.agents.index(agent_id)\n else:\n bootstrapping_info = info\n idx = l\n value_next = self.policy.get_value_estimates(\n bootstrapping_info,\n idx,\n info.local_done[l] and not info.max_reached[l],\n )\n\n tmp_advantages = []\n tmp_returns = []\n for name in self.policy.reward_signals:\n bootstrap_value = value_next[name]\n\n local_rewards = self.training_buffer[agent_id][\n \"{}_rewards\".format(name)\n ].get_batch()\n local_value_estimates = self.training_buffer[agent_id][\n \"{}_value_estimates\".format(name)\n ].get_batch()\n local_advantage = get_gae(\n rewards=local_rewards,\n value_estimates=local_value_estimates,\n value_next=bootstrap_value,\n gamma=self.policy.reward_signals[name].gamma,\n lambd=self.trainer_parameters[\"lambd\"],\n )\n local_return = local_advantage + local_value_estimates\n # This is later use as target for the different value estimates\n self.training_buffer[agent_id][\"{}_returns\".format(name)].set(\n local_return\n )\n self.training_buffer[agent_id][\"{}_advantage\".format(name)].set(\n local_advantage\n )\n tmp_advantages.append(local_advantage)\n tmp_returns.append(local_return)\n\n global_advantages = list(np.mean(np.array(tmp_advantages), axis=0))\n global_returns = list(np.mean(np.array(tmp_returns), axis=0))\n self.training_buffer[agent_id][\"advantages\"].set(global_advantages)\n self.training_buffer[agent_id][\"discounted_returns\"].set(global_returns)\n\n self.training_buffer.append_update_buffer(\n agent_id,\n batch_size=None,\n training_length=self.policy.sequence_length,\n )\n\n self.training_buffer[agent_id].reset_agent()\n if info.local_done[l]:\n self.stats[\"Environment/Episode Length\"].append(\n self.episode_steps.get(agent_id, 0)\n )\n self.episode_steps[agent_id] = 0\n for name, rewards in self.collected_rewards.items():\n if name == \"environment\":\n self.cumulative_returns_since_policy_update.append(\n rewards.get(agent_id, 0)\n )\n self.stats[\"Environment/Cumulative Reward\"].append(\n rewards.get(agent_id, 0)\n )\n self.reward_buffer.appendleft(rewards.get(agent_id, 0))\n rewards[agent_id] = 0\n else:\n self.stats[\n self.policy.reward_signals[name].stat_name\n ].append(rewards.get(agent_id, 0))\n rewards[agent_id] = 0", "def genetic_algorithm(env: RailEnv):\r\n # Determine number of agent and number of combinations of randomized order\r\n n_agents = len(env.agents)\r\n n_combination = math.factorial(n_agents)\r\n agent_ids= list(range(n_agents))\r\n # Compute number of iteration for genetic algo\r\n # Run ten iterations\r\n n_iterations = 10\r\n\r\n # create an initial population. The population contains three combinations of randomized order\r\n population = []\r\n final_reward = dict()\r\n for i in range(3):\r\n randomized_order = random.sample(agent_ids, len(agent_ids))\r\n if not population.__contains__(randomized_order):\r\n population.append(randomized_order)\r\n\r\n highest_reward_of_each_iteration=[]\r\n action_plan = dict()\r\n # Run the genetic algorithm in n number of iteration\r\n for iteration in range(n_iterations):\r\n # Pass each randomized order to the modified a_star\r\n i=0\r\n reward = dict()\r\n print(\"*******************************************************New Population***********************************\")\r\n print(\"Iteration \",iteration)\r\n # For each planning order in population we run it to get the cumulative reward\r\n for randomized_order in population:\r\n schedules = search_a_for_genetic(env,randomized_order)\r\n action_plan[i] = schedules\r\n test_env = deepcopy(env)\r\n success = False\r\n sumreward = 0\r\n # apply to the copied environment to calculate the sumreward for fitness score\r\n for action in schedules:\r\n _, _reward_dict, _done, _ = test_env.step(action)\r\n success = all(_done.values())\r\n sumreward = sumreward + sum(_reward_dict.values())\r\n reward[i] = sumreward\r\n #print(\"Total Time Step: \",test_env._elapsed_steps)\r\n #print(\"Randomize order: \", randomized_order)\r\n #print(\"Sum reward: \",sumreward)\r\n i += 1\r\n final_reward = reward.copy()\r\n highest_reward_of_each_iteration.append(final_reward)\r\n # find the randomized order give us the min reward\r\n key_min = min(reward.keys(), key=(lambda k: reward[k]))\r\n min_randomized_order = population[key_min]\r\n del reward[key_min]\r\n\r\n # Crossover\r\n # Select the parent\r\n # Find two combinations of randomized order that give us highest reward\r\n max_key_a = max(reward.keys(), key=(lambda k: reward[k]))\r\n del reward[max_key_a]\r\n randomized_order_a = population[max_key_a]\r\n\r\n max_key_b = max(reward.keys(), key=(lambda k: reward[k]))\r\n del reward[max_key_b]\r\n randomized_order_b = population[max_key_b]\r\n\r\n # Crossover\r\n # Generate a random crossover\r\n cross_point = random.randrange(1,n_agents-1)\r\n first_part_order_a = randomized_order_a[:cross_point]\r\n second_part_order_a = randomized_order_a[cross_point:]\r\n\r\n first_part_order_b = randomized_order_b[:cross_point]\r\n second_part_order_b = randomized_order_b[cross_point:]\r\n\r\n # Create two children based on the genes of parent\r\n child_a = first_part_order_a + second_part_order_b\r\n child_b = first_part_order_b + second_part_order_a\r\n\r\n # Mutation\r\n mutation(child_a)\r\n mutation(child_b)\r\n\r\n # remove the two combinations in the population that have lower reward\r\n population.remove(min_randomized_order)\r\n population.remove(randomized_order_b)\r\n\r\n # add two new children to the population\r\n\r\n population.append(child_a)\r\n population.append(child_b)\r\n\r\n\r\n # Find the best schedule\r\n max_key_reward = max(final_reward.keys(), key=(lambda k: final_reward[k]))\r\n best_order = population[max_key_reward]\r\n\r\n print(\"--------------------------------Result-------------------------------------\")\r\n print(\"Best Order: \", best_order)\r\n print(\"Reward: \",highest_reward_of_each_iteration)\r\n return best_order,action_plan[max_key_reward]", "def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)", "def run_policy(env, policy, scaler, logger, episodes, task_reward_weight, imitation_reward_weight):\n total_steps = 0\n trajectories = []\n for e in range(episodes):\n observes, phases, actions, rewards, unscaled_obs, task_r, imitation_r, imitation_r_logs = run_episode(\n env, policy, scaler, task_reward_weight, imitation_reward_weight)\n total_steps += observes.shape[0]\n trajectory = {'observes': observes,\n 'phases': phases,\n 'actions': actions,\n 'rewards': rewards,\n 'unscaled_obs': unscaled_obs,\n 'task_r': task_r,\n 'imitation_r': imitation_r,\n 'imitation_r_logs': imitation_r_logs}\n trajectories.append(trajectory)\n unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])\n # update running statistics for scaling observations\n scaler.update(unscaled)\n logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),\n 'Steps': total_steps})\n\n return trajectories", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, idxs, weights = experiences\n \n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss MSE\n loss = (Q_expected - Q_targets.detach()).pow(2)\n # Add weights to loss\n loss = loss * weights\n # Add noise to loss to arrive at prior weights\n prios = loss + 1e-6\n # Take mean\n loss = loss.mean()\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update buffer priorities\n self.memory.update_priorities(zip(idxs, prios.data.cpu().numpy()))\n\n\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"", "def _forwardImplementation(self, inbuf, outbuf):\n assert self.module\n \n values = self.module.getActionValues(self.state) \n \n actions = []\n if random() <= self.exploration:\n for i in range(self.shield_options):\n new_action = choice(range(len(values))) \n np.delete(values, new_action)\n actions.append(new_action)\n else:\n for i in range(self.shield_options):\n new_action = where(values == max(values))[0]\n new_action = choice(new_action) \n np.delete(values, new_action)\n actions.append(new_action)\n \n while len(actions) < self.outdim:\n actions.append(-1)\n \n outbuf[:] = actions", "async def main():\n #launching the browser in headless mode\n browser = await launch({'headless': True})\n page = await browser.newPage()\n #removing the timeout\n page.setDefaultNavigationTimeout(100000)\n #adding the stealth mode to be undetected\n await stealth(page)\n global userAgent\n userAgent = await page.evaluate('navigator.userAgent')\n #capture the response of every request and save the ones we want\n page.on('response', lambda response: asyncio.ensure_future(interceptResponse(response)))\n await page.goto(urlChallenge)\n await page.waitFor(1000)\n #scroll down to trigger the requests to get video data\n for _ in range(5):\n await page.evaluate(\"\"\"{window.scrollBy(0, document.body.scrollHeight);}\"\"\")\n await page.waitFor(1000)\n await page.waitFor(3000)\n await browser.close()", "def __init__(self, buffer_size, batch_size):\n\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n # set.experience = namedtuple(\"Experience\", field_names=['state', 'action', 'reward', 'next_state', 'done'])" ]
[ "0.5506977", "0.5496875", "0.514291", "0.5113934", "0.49886903", "0.4943921", "0.49165386", "0.49162146", "0.48796564", "0.48758185", "0.48588508", "0.48554084", "0.4835768", "0.48339865", "0.4816758", "0.48112303", "0.47756678", "0.47557953", "0.47492647", "0.4732167", "0.47211218", "0.47182503", "0.47160736", "0.47087047", "0.46735516", "0.46719927", "0.4670823", "0.46672094", "0.4664535", "0.46585825", "0.46573445", "0.4653074", "0.46500686", "0.46484753", "0.46458322", "0.46395472", "0.4633052", "0.46133447", "0.4586223", "0.45784605", "0.45737484", "0.45731956", "0.45714766", "0.45654237", "0.45580274", "0.45533952", "0.45533141", "0.45461223", "0.45410284", "0.45390153", "0.4529935", "0.45218176", "0.45157975", "0.45101532", "0.45078203", "0.4499534", "0.449913", "0.44980872", "0.44975594", "0.44728398", "0.446676", "0.4465887", "0.4459768", "0.44597232", "0.4454962", "0.44516662", "0.44472042", "0.44468963", "0.44463617", "0.4445567", "0.4445427", "0.4445427", "0.44427073", "0.44415724", "0.44375038", "0.44365877", "0.4436019", "0.44333148", "0.4427478", "0.44212767", "0.44207102", "0.4420465", "0.44178697", "0.44177103", "0.44154555", "0.44128725", "0.44115865", "0.441022", "0.44079447", "0.44022903", "0.44004327", "0.43933487", "0.43877703", "0.43867645", "0.43777922", "0.43743357", "0.43742126", "0.43741712", "0.43724653", "0.4368419" ]
0.63224953
0
Performs an update of the QNetwork using the provided optimizer and buffer
def update_q_net( q_net: VisualQNetwork, optimizer: torch.optim, buffer: Buffer, action_size: int ): BATCH_SIZE = 1000 NUM_EPOCH = 3 GAMMA = 0.9 batch_size = min(len(buffer), BATCH_SIZE) random.shuffle(buffer) # Split the buffer into batches batches = [ buffer[batch_size * start : batch_size * (start + 1)] for start in range(int(len(buffer) / batch_size)) ] for _ in range(NUM_EPOCH): for batch in batches: # Create the Tensors that will be fed in the network obs = torch.from_numpy(np.stack([ex.obs for ex in batch])) reward = torch.from_numpy( np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1) ) done = torch.from_numpy( np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1) ) action = torch.from_numpy(np.stack([ex.action for ex in batch])) next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch])) # Use the Bellman equation to update the Q-Network target = ( reward + (1.0 - done) * GAMMA * torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values ) mask = torch.zeros((len(batch), action_size)) mask.scatter_(1, action, 1) prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True) criterion = torch.nn.MSELoss() loss = criterion(prediction, target) # Perform the backpropagation optimizer.zero_grad() loss.backward() optimizer.step()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_optimizer(self, context, optimizer, host):\n pass", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)", "def _refresh_buffers(self) -> None:", "def reload(self,offline_buffer):\n #loading online buffer from offline buffer by sampling (online_buffer.buffer_size) samples \n self.buffer = SumTree(self.buffer_size)\n names, idxs = offline_buffer.sample_batch(self.buffer_size)\n self.offline_idxs = idxs\n state , action , reward, done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[0])\n #loop on names and load in the online buffer\n for i in range(len(names)-1):\n next_state , next_action , next_reward , done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[i+1])\n #done = 0\n self.memorize(state, action, reward, done, next_state, error=[1])\n state , action , reward = next_state , next_action , next_reward", "def update(src):", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()", "def updateRPC(loc,weight): #status: Done, not tested\r\n pass", "def _update_optimizer(self, hyperparameters, score, fit=True):\n if self.do_maximize:\n score = -score\n self.optimizer_result = self.optimizer.tell(hyperparameters, score, fit=fit)", "def update(self, params):", "def update_parameters(parameters, grads, learning_rate):\n pass", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def run_optimization(self):\n # Get batch\n (obs, action, old_logp, old_value, return_, advantage) = self.buffer.eject()\n\n # Train pi\n print(\"-\" * 20 + \"\\nPi Update\" + \"\\n\" + \"-\" * 20)\n (policy_loss, entropy,\n kl_divergence, clipping_fraction, steps) = self.update_actor(obs, action, old_logp, advantage)\n\n # Train value function\n print(\"-\" * 20 + \"\\nValue Function Update\" + \"\\n\" + \"-\" * 20)\n (value_loss,\n explained_variance) = self.update_critic(obs, old_value, return_)\n\n # Logging\n self.update_counter += 1\n self.log_update(policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps)\n\n # Update learning rate\n self.decay_lr()\n\n # Save current weights (overwrites previous weights)\n self.save_weights()\n\n # Empty scenario counter\n self.scenario_counter = dict.fromkeys(self.scenario_counter, 0)", "def __init__(self, state_size, action_size, fc1_units, fc2_units, buffer_size, batch_size, alpha, gamma, tau,\n local_update_every, target_update_every, seed, a, b, b_increase, b_end, dbl_dqn=False, priority_rpl=False, duel_dqn=False):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Hyperparameters\n self.alpha = alpha # Learning rate\n self.gamma = gamma # Discount parameter\n self.tau = tau # Interpolation parameter\n self.local_update_every = local_update_every # Number of actions to take before updating local net weights\n self.target_update_every = target_update_every # Number of actions to take before updating target net weights\n self.batch_size = batch_size # Number of experiences to sample during learning\n self.buffer_size = buffer_size # Size of memory buffer\n self.a = a # Sampling probability (0=random | 1=priority)\n self.b = b # Influence of importance sampling weights over learning\n self.b_increase = b_increase # Amount to increase b by every learning step\n self.b_end = b_end # Maximum value for b\n\n # Agent modifications\n self.dbl_dqn = dbl_dqn # Double Q Learning\n self.priority_rpl = priority_rpl # Prioritised Experience Replay\n self.duel_dqn = duel_dqn # Duelling Q Networks\n\n # Q-Network\n if self.duel_dqn:\n self.qnetwork_local = DuellingQNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.qnetwork_target = DuellingQNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n else:\n self.qnetwork_local = QNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.alpha)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, buffer_size, batch_size, seed, priority_rpl)\n # Initialize time step (for updating every local_update_every/target_update_every steps)\n self.t_step = 0", "def update_params(self, learning_rate=0.1):\n\n self.params['W'] = self.params['W'] - learning_rate * self.dW # update weights\n self.params['b'] = self.params['b'] - learning_rate * self.db # update bias(es)", "def updateGraphs(self):\n # first update all three buffers\n tuiBufferName = self.dataClient.recv() # receive 'error'\n while tuiBufferName != 'end buffers':\n tuiData = self.dataClient.recv()\n self.logger.debug(f'Appending {tuiData} to buffer {tuiBufferName}')\n\n if(tuiBufferName == 'error'):\n self.model.errorBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output'):\n self.model.outputBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'reference'):\n self.model.referenceBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output-error'):\n self.model.errorPercentage = tuiData.flat[0]\n\n tuiBufferName = self.dataClient.recv()", "def update(self, ex):\r\n if not self.optimizer:\r\n raise RuntimeError('No optimizer set.')\r\n\r\n # Train mode\r\n self.network.train()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n # Run forward\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n # Compute loss and accuracies\r\n loss = self.criterion(score, label)\r\n\r\n if self.args.gradient_accumulation_steps > 1:\r\n loss = loss / self.args.gradient_accumulation_steps\r\n\r\n if self.args.fp16:\r\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n\r\n if (self.updates + 1) % self.args.gradient_accumulation_steps == 0:\r\n if self.args.fp16:\r\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.grad_clipping)\r\n else:\r\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\r\n\r\n self.optimizer.step()\r\n self.scheduler.step() # Update learning rate schedule\r\n self.optimizer.zero_grad()\r\n\r\n self.updates += 1\r\n\r\n return loss.item()", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def _update(self, nbrs, nbrs_y, query, query_y):\n\n # Set up the graph for our shared memory variables\n new_K, new_A, new_V = self.K, self.A, self.V\n\n # Condition (1): First returned neighbour shares the same query label\n correct_query = T.eq(nbrs_y[:, 0], query_y).nonzero()[0]\n correct_mem = nbrs[correct_query, 0] # Idx to memory keys\n\n normed_keys = tensor_norm(query[correct_query] + new_K[correct_mem])\n new_K = T.set_subtensor(new_K[correct_mem], normed_keys)\n new_A = T.set_subtensor(new_A[correct_mem], 0.)\n\n # Condition (2): First returned neighbour does not share query label.\n # Add the key and label from query to memory\n incorrect_mask = T.neq(nbrs_y[:, 0], query_y)\n incorrect_query = incorrect_mask.nonzero()[0]\n\n # We need to find len(incorrect_query) locations in memory to write to.\n # Noise is added to randomize selection.\n age_mask = T.ge(new_A, T.max(new_A) - self.C) #1d\n oldest_idx = tensor_choose_k(age_mask, self.rng,\n k=T.sum(incorrect_mask),\n random=True).flatten()\n\n new_K = T.set_subtensor(new_K[oldest_idx], query[incorrect_query])\n new_V = T.set_subtensor(new_V[oldest_idx], query_y[incorrect_query])\n new_A = T.set_subtensor(new_A[oldest_idx], 0.)\n\n # Increment the age of all non-updated indices by 1\n new_A = new_A + 1.\n new_A = T.inc_subtensor(new_A[correct_mem], -1.)\n new_A = T.inc_subtensor(new_A[oldest_idx], -1.)\n\n return OrderedDict({(self.K, new_K), (self.V, new_V), (self.A, new_A)})", "def update(self, niter, expert_gen, pq_buffer, batch_size, num_grad_steps):\n self.train()\n pqb_gen = pq_buffer.data_gen_infinite(min(batch_size, len(pq_buffer)))\n\n if niter <= self.warmup:\n num_grad_steps *= (self.warmup + 1 - niter)\n\n loss_val = 0\n n = 0\n for _ in range(num_grad_steps):\n\n expert_batch = next(expert_gen)\n pqb_batch = next(pqb_gen)\n\n expert_state = expert_batch[0]\n pqb_state = pqb_batch[0]\n\n pqb_out = self.tower(pqb_state)\n expert_out = self.tower(expert_state)\n\n reward_bias = - torch.clamp(pqb_out, max=0).mean(0) - torch.clamp(expert_out, max=0).mean(0)\n loss = pqb_out.mean(0) - expert_out.mean(0) + 2*reward_bias\n\n loss_val += loss.item()\n n += 1\n\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.parameters(), max_norm=10.)\n self.optimizer.step()\n\n # weight clamping to enforce the Lipchitz constraint\n for p in self.parameters():\n p.data.clamp_(-self.clip, self.clip)\n\n return loss_val / n", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def update(self, sess, batch, *args, **kwargs):\n # Calculated target Q values using target estimator\n assert \"state\" in batch and \"action\" in batch and \\\n \"reward\" in batch and \"next_state\" in batch and \\\n \"episode_done\" in batch\n target_q_val = self._target_estimator.estimate(\n batch[\"state\"], batch[\"action\"], batch[\"reward\"],\n batch[\"next_state\"], batch[\"episode_done\"])\n\n # Prepare data and fit Q network\n feed_dict = {self._input_target_q: target_q_val,\n self._input_action: batch[\"action\"]}\n if \"_weight\" in batch:\n feed_dict[self._input_sample_weight] = batch[\"_weight\"]\n feed_dict.update(self._q.input_dict(batch[\"state\"]))\n fetch_dict = {\n \"action\": batch[\"action\"], \"reward\": batch[\"reward\"],\n \"done\": batch[\"episode_done\"],\n \"q\": self.selected_q, \"target_q\": target_q_val,\n \"optimizer_loss\": self._sym_loss,\n \"td\": self._op_td,\n \"td_losses\": self._op_losses,\n \"td_losses_weighted\": self._op_losses_weighted}\n update_run = network.UpdateRun(feed_dict=feed_dict, fetch_dict=fetch_dict)\n\n return update_run", "def update(self):\n # pull all available chunks\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t\n\n # update graph handles\n if self.gbuffer.any():\n for k in range(0, self.channel_count):\n self.handles[k].setData(self.gtimes,\n self.gbuffer[k::self.channel_count])", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def update_send(self, parameters, loss): #parameters为训练网络的参数\n # Increase the clock value\n self.clock += 1\n\n # Serve the new parameters\n state = {'clock': self.clock, 'loss': loss}\n # 在rx线程中保存此时的loss和模型参数等\n self.rx.set_current_state(state, parameters)\n\n self.fetching = True\n self.tx.fetch_send()", "def update(self):\n self.arest.update()", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def update(oformat, param, stream, year, month, timestep, back, queue):\n ####I'm separating this in update and , so eventually update can check if no yr/mn passed or only yr passed which was the last month downloaded\n \n update = True\n if back:\n print('You cannot use the backwards option with update')\n sys.exit()\n if queue:\n dump_args(update, oformat, stream, list(param), year, list(month), timestep, back)\n else: \n api_request(update, oformat, stream, list(param), year, list(month), timestep, back)", "def update(self, gradient, optimizer=None, relink=None):\n # Recover the defaults, if missing\n optimizer = self._resolve_defaults(optimizer=optimizer)[0]\n # Set the gradient\n self.set_gradient(gradient, relink=(self._config.relink if relink is None else relink))\n # Perform the update step\n optimizer.step()", "def update_Q(self, reward):\n old_estimate = self.q_estimates[self.prev_action]\n self.q_estimates[self.prev_action] = old_estimate + 1/self.N[self.prev_action] * (reward - old_estimate)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state,action)\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state,action pair\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n weight = self.weights\n\n Q_Value = 0\n\n difference = (reward + discount_factor * nextState_QValue ) - (temporary_QValue) #refer to README_Reinforcement.txt for the formula\n\n for each_feature in features:\n\n #refer to README_Reinforcement.txt for the formula at line 20\n weight[each_feature] = weight[each_feature] + learning_rate * difference * features[each_feature]\n\n #util.raiseNotDefined()", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def update(self, reward):\n raise NotImplementedError", "def update(self, state, action, nextState, reward):\n # print \"Update\"\n difference = (reward + self.discount*self.compValFromState(nextState)) - self.getQValue(state, action)\n features = self.featExtractor.getFeatures(state, self.index)\n #print \"features\", features, \"difference\", difference, \"weights\", self.weights\n for key in self.weights:\n self.weights[key] = self.alpha * difference * features[key]", "def update(self, now=None):\n if self.protocol == \"udp\":\n # UDP only have 1 way attribute\n result = self._run_test(ATTR_DOWNLOAD)\n self.data[ATTR_DOWNLOAD] = self.data[ATTR_UPLOAD] = getattr(\n result, \"Mbps\", None\n )\n self.data[ATTR_VERSION] = getattr(result, \"version\", None)\n else:\n result = self._run_test(ATTR_DOWNLOAD)\n self.data[ATTR_DOWNLOAD] = getattr(result, \"received_Mbps\", None)\n self.data[ATTR_VERSION] = getattr(result, \"version\", None)\n self.data[ATTR_UPLOAD] = getattr(\n self._run_test(ATTR_UPLOAD), \"sent_Mbps\", None\n )\n\n dispatcher_send(self._hass, DATA_UPDATED, self.host)", "def _update_nn(self, bad_feats, good_feats, rate):\n self.nn.update(bad_feats, good_feats, rate)", "def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)", "def update(self, idx, new_error):\n self.buffer.update(idx, self.priority(new_error))", "def update_model(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_params = ['optimizer_params', 'optimizer']\n if any(item in kwargs.keys() for item in opt_params):\n self.get_unet_model()", "def update():", "def update():", "def _update(self):\n if (len(self.buffer) < self.batch_size):\n return\n self.training_iter += 1\n # Make sure actor_target and critic_target are in eval mode\n assert not self.model.q_target_1.training\n assert not self.model.q_target_2.training\n\n assert self.model.q_1.training\n assert self.model.q_2.training\n transitions = self.buffer.sample(self.batch_size)\n batch = self.buffer.transition(*zip(*transitions))\n state_batch = torch.tensor(batch.state, device=self.device).float()\n action_batch = torch.tensor(batch.action,\n device=self.device).unsqueeze(-1).long()\n reward_batch = torch.tensor(batch.reward,\n device=self.device).unsqueeze(-1).float()\n next_state_batch = torch.tensor(batch.next_state,\n device=self.device).float()\n is_done_batch = torch.tensor(batch.done,\n device=self.device).unsqueeze(-1).bool()\n with torch.no_grad():\n Q_next_1 = ((~is_done_batch)\n * (self.model.q_target_1(next_state_batch).min(dim=-1)[0].unsqueeze(-1)))\n Q_next_2 = ((~is_done_batch)\n * (self.model.q_target_2(next_state_batch).min(dim=-1)[0].unsqueeze(-1)))\n\n # Use max want to avoid underestimation bias\n Q_next = torch.max(Q_next_1, Q_next_2)\n Q_expected = reward_batch + self.gamma * Q_next\n\n Q_1 = self.model.q_1(state_batch).gather(-1, action_batch)\n Q_2 = self.model.q_2(state_batch).gather(-1, action_batch)\n L_1 = nn.MSELoss()(Q_1, Q_expected)\n L_2 = nn.MSELoss()(Q_2, Q_expected)\n self.loss.append([L_1.item(), L_2.item()])\n self.model.q_optimizer_1.zero_grad()\n self.model.q_optimizer_2.zero_grad()\n L_1.backward()\n L_2.backward()\n self.model.q_optimizer_1.step()\n self.model.q_optimizer_2.step()\n self.store_Q.append([Q_1.tolist(), Q_2.tolist(), Q_expected.tolist()])\n if (self.training_iter % self.update_freq) == 0:\n self.model.update_target_nn()", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def update(self, next_state, reward):\n pass", "def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()", "def get_updater(optimizer):\n return Updater(optimizer)", "def update_target(self, target, pred, update_rate):\n for target_param, pred_param in zip(target.parameters(), pred.parameters()):\n target_param.data.copy_((1.0 - update_rate)\n * target_param.data + update_rate * pred_param.data)", "def update(self, state, action, nextState, reward):\n candidateQ = reward + self.discount * \\\n self.computeValueFromQValues(nextState)\n currentQ = self.getQValue(state, action)\n difference = candidateQ - currentQ\n features = self.featExtractor.getFeatures(state, action)\n for feat in features:\n self.weights[feat] += self.alpha * difference * features[feat]", "def initialize(self, *args, **kwargs): \n super().initialize(*args, **kwargs)\n self.updates_per_optimize = 1", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update_weights(text):\n soup = bs4.BeautifulSoup(text, 'lxml')\n if ord_metric == OrderingMetric.BACK_LINK_COUNT:\n for l in soup.find_all('a', href=True):\n base = req_obj.get_base_url()\n link = urljoin(base, l['href'])\n if re.match(pattern, link) is not None:\n if link not in link_weights:\n link_weights[link] = 1\n else:\n link_weights[link] += 1\n\n elif ord_metric == OrderingMetric.FORWARD_LINK_COUNT:\n for l in soup.find_all('a', href=True):\n base = req_obj.get_base_url()\n link = urljoin(base, l['href'])\n if re.match(pattern, link) is not None:\n if link not in link_weights:\n link_weights[link] = get_num_forward_link(link)\n\n elif ord_metric == OrderingMetric.VSM:\n vsmObj = VectorSpaceModel()\n vsmObj.fetch_and_rank_summary(queue, link_weights)", "def update_params(self):\n for layer in range(self.num_layers):\n for direction in range(self.num_directions):\n for param_name in self._get_param_names(direction, layer):\n param = getattr(self.module_to_quantize, param_name)\n param.data = getattr(self, param_name).data\n\n _logger.debug('Updated params for QcQuantizeRNN')", "def update(self):\n try:\n with requests.Session() as sess:\n response = sess.send(self._request, timeout=10)\n self.raw_data = response.json()\n self.data_format()\n self.available = True\n except (ValueError, requests.exceptions.ConnectionError):\n _LOGGER.warning(\"Unable to fetch data from Google Wifi\")\n self.available = False\n self.raw_data = None", "def _update(self, count=True, forced=False):", "def query_str(self, new_query_str):\n self.query_buffer.text = new_query_str", "def buffer_update(buffer):\n\n\thdata = weechat.hdata_get(\"buffer\")\n\n\tbuffers = \",\".join(get_merged_buffers(buffer))\n\tname = \"%s_%s\" % (SCRIPT_NAME, buffers)\n\n\tif buffer_searching(buffer):\n\t\tif buffer_filtering(buffer):\n\t\t\tfilter_addreplace(name, buffers, \"*\", buffer_build_regex(buffer))\n\t\telif not buffer_filtering(buffer) and filter_exists(name):\n\t\t\tfilter_del(name)\n\telif filter_exists(name):\n\t\tfilter_del(name)\n\n\twhere = weechat.hdata_integer(hdata, buffer, \"text_search_where\")\n\tweechat.buffer_set(buffer, \"localvar_set_%s_warn\" % SCRIPT_LOCALVAR, \"1\" if where == 3 else \"0\") # warn about incorrect filter\n\n\tweechat.bar_item_update(SCRIPT_BAR_ITEM)", "def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())", "def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)", "def optimize(self):\n self.check_is_ready()\n self.check_infeasibility()\n solution_graph, obj_val = self.find_shortest_network_with_ADH((self.old_network_graph is not None))\n self.solution_graph = gnx.GeoMultiGraph(solution_graph, crs=self.optimization_graph.crs)", "def update(self, ob_no, next_ob_no, re_n, terminal_n):\n\n # TODO: Implement the pseudocode below:\n\n # do the following (self.num_grad_steps_per_target_update * self.num_target_updates) times:\n # every self.num_grad_steps_per_target_update steps (which includes the first step),\n # recompute the target values by\n #a) calculating V(s') by querying this critic network (ie calling 'forward') with next_ob_no\n #b) and computing the target values as r(s, a) + gamma * V(s')\n # HINT: don't forget to use terminal_n to cut off the V(s') (ie set it to 0) when a terminal state is reached\n # every time,\n # update this critic using the observations and targets\n # HINT1: need to sess.run the following:\n #a) critic_update_op\n #b) critic_loss\n # HINT2: need to populate the following (in the feed_dict):\n #a) sy_ob_no with ob_no\n #b) sy_target_n with target values calculated above\n\n for i in range(self.num_grad_steps_per_target_update * self.num_target_updates):\n if i % self.num_grad_steps_per_target_update == 0:\n next_V_n = self.forward(next_ob_no) * (1 - terminal_n)\n target_n = re_n + self.gamma * next_V_n\n loss, _ = self.sess.run([self.critic_loss, self.critic_update_op], feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target_n})\n\n return loss", "def update(\n self,\n batch: ModelInput,\n optimizer: torch.optim.Optimizer,\n target: Optional[torch.Tensor] = None,\n idx=None,\n next_obs=None,\n ) -> float:\n optimizer = cast(torch.optim.Optimizer, optimizer)\n self.train()\n optimizer.zero_grad()\n loss = self.loss(batch, target=target, idx=idx, next_obs=next_obs)\n loss.backward()\n optimizer.step()\n return loss.detach().item()", "def optimize_agent(self, itr, samples=None, sampler_itr=None, offline_samples=None):\n itr = itr if sampler_itr is None else sampler_itr # Async uses sampler_itr.=\n if samples is not None:\n if self.save_data:\n self.data_writer.write(samples)\n samples_to_buffer = self.samples_to_buffer(samples)\n self.replay_buffer.append_samples(samples_to_buffer)\n opt_info = ModelOptInfo(*([] for _ in range(len(ModelOptInfo._fields))))\n if not self.offline and itr < self.min_itr_learn:\n return opt_info\n for _ in range(1 if self.offline else self.updates_per_optimize):\n start = time.time()\n samples_from_replay = self.sample_batch()\n\n end = time.time()\n sample_time = end - start\n\n forward_time = time.time()\n rl_loss, td_abs_errors, goal_loss,\\\n t0_spr_loss, model_spr_loss, \\\n diversity, inverse_model_loss, bc_loss, \\\n goal_abs_errors \\\n = self.loss(samples_from_replay, self.offline)\n forward_time = time.time() - forward_time\n\n total_loss = self.rl_weight * rl_loss\n total_loss += self.spr_weight * model_spr_loss\n total_loss += self.goal_weight * goal_loss\n total_loss += self.inverse_model_weight * inverse_model_loss\n total_loss += self.bc_weight * bc_loss\n\n self.optimizer.zero_grad()\n total_loss.backward()\n stem_params, model_params = self.model.split_stem_model_params()\n if self.clip_grad_norm > 0:\n grad_norm = torch.nn.utils.clip_grad_norm_(stem_params,\n self.clip_grad_norm)\n else:\n grad_norm = 0\n if self.clip_model_grad_norm > 0:\n model_grad_norm = torch.nn.utils.clip_grad_norm_(model_params,\n self.clip_model_grad_norm)\n else:\n model_grad_norm = 0\n\n cnn_weight_norm = find_weight_norm(self.model.conv.parameters())\n\n self.optimizer.step()\n\n if not self.offline and self.prioritized_replay:\n self.replay_buffer.update_batch_priorities(td_abs_errors)\n opt_info.loss.append(rl_loss.item())\n opt_info.gradNorm.append(torch.tensor(grad_norm).item()) # grad_norm is a float sometimes, so wrap in tensor\n opt_info.GoalLoss.append(goal_loss.item())\n opt_info.modelGradNorm.append(torch.tensor(model_grad_norm).item())\n opt_info.T0SPRLoss.append(t0_spr_loss.item())\n opt_info.InverseModelLoss.append(inverse_model_loss.item())\n opt_info.BCLoss.append(bc_loss.item())\n opt_info.CNNWeightNorm.append(cnn_weight_norm.item())\n opt_info.SampleTime.append(sample_time)\n opt_info.ForwardTime.append(forward_time)\n opt_info.Diversity.append(diversity.item())\n opt_info.ModelSPRLoss.append(model_spr_loss.item())\n opt_info.tdAbsErr.extend(td_abs_errors[::8].cpu().numpy()) # Downsample.\n opt_info.GoalError.extend(goal_abs_errors[::8].cpu().numpy()) # Downsample.\n self.update_counter += 1\n if self.update_counter % self.target_update_interval == 0:\n self.agent.update_target(self.target_update_tau)\n self.update_itr_hyperparams(itr)\n return opt_info", "def _build_algorithm(self):\n self.optimizer = tf.train.AdamOptimizer(self._lr, epsilon=1.5e-8)\n trainable_variables = tf.trainable_variables(\"main/qnet\")\n\n # Compute the state value.\n batch_size = tf.shape(self._observation)[0]\n action_index = tf.stack([tf.range(batch_size), self._action], axis=1)\n action_q = tf.gather_nd(self._qvals, action_index)\n assert_shape(action_q, [None])\n\n # Compute back up.\n ave_q = tf.add_n(self._target_qvals) / self._n_net\n assert_shape(tf.reduce_max(ave_q, axis=1), [None])\n q_backup = tf.stop_gradient(self._reward + self._discount * (1 - self._done) * tf.reduce_max(ave_q, axis=1))\n\n # Compute loss and optimize the object.\n loss = tf.reduce_mean(tf.squared_difference(q_backup, action_q)) # 损失值。\n self._train_op = self.optimizer.minimize(loss, var_list=trainable_variables)\n\n # Update target network.\n update_target_operation = []\n for i in reversed(range(1, self._n_net)): # i=0表示最近的模型。\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(f\"target_{i}/qnet\", f\"target_{i-1}/qnet\"))\n\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(\"target_0/qnet\", \"main/qnet\"))\n\n self.update_target_op = update_target_operation\n self._log_op = {\"loss\": loss}", "def updatelearningrate(self, epoch):\n self.lr = getlearningrate(epoch=epoch, opt=self.opt)\n # update learning rate of model optimizer\n if isinstance(self.model, list):\n count = 0\n for param_group in self.optimzer.param_groups:\n # if type(model) is <list> then update modules with different learning rate\n param_group['lr'] = self.lr\n count += 1\n # print \">>> count is:\", count-1\n else:\n for param_group in self.optimzer.param_groups:\n param_group['lr'] = self.lr", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def update(self, *args, **kw):\n pass", "def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network", "def update(self, batch, batch_index):\n super(BayesianOptimization, self).update(batch, batch_index)\n self.state['n_evidence'] += self.batch_size\n\n params = batch_to_arr2d(batch, self.target_model.parameter_names)\n self._report_batch(batch_index, params, batch[self.target_name])\n\n optimize = self._should_optimize()\n self.target_model.update(params, batch[self.target_name], optimize)\n if optimize:\n self.state['last_GP_update'] = self.target_model.n_evidence", "def update(self, x_dict, y_dict, weight):\n assert len(x_dict) == len(y_dict), \"invalid # of qids\"\n \n qids = self.__get_shuffled_qids(x_dict, y_dict, weight.epoch)\n w = weight.get_dense_weight()\n for qid in tqdm(qids):\n w = approx_ap(x_dict[qid].toarray(), y_dict[qid], w, self.eta, self.alpha, self.beta)\n weight.set_weight(sp.csr_matrix(w.reshape((1, weight.dims))))\n weight.epoch += 1", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def update(self):\n self.brain.update()", "def update(self, BackoffIteration=None, CalculateJitter=None, CalculateLatency=None, ContentInformation=None, CountRandomLoadRate=None, DelayAfterTransmit=None, Duration=None, DynamicRateList=None, EnableBackoffIteration=None, EnableDataIntegrity=None, EnableExtraIterations=None, EnableFastConvergence=None, EnableLayer1Rate=None, EnableMinFrameSize=None, EnableOldStatsForReef=None, EnableSaturationIteration=None, EnableStopTestOnHighLoss=None, ExtraIterationOffsets=None, FastConvergenceDuration=None, FastConvergenceThreshold=None, FixedIteration=None, FixedLoadUnit=None, ForceRegenerate=None, FrameLossUnit=None, FramesPerBurstGap=None, Framesize=None, FramesizeFixedValue=None, FramesizeList=None, Gap=None, GenerateTrackingOptionAggregationFiles=None, IncrementLoadUnit=None, InitialIncrementLoadRate=None, InitialStepLoadRate=None, Ipv4rate=None, Ipv6rate=None, LatencyBins=None, LatencyBinsEnabled=None, LatencyType=None, LoadRateList=None, LoadRateValue=None, LoadType=None, MapType=None, MaxIncrementLoadRate=None, MaxRandomLoadRate=None, MaxStepLoadRate=None, MinFpsRate=None, MinKbpsRate=None, MinRandomLoadRate=None, Numtrials=None, PercentMaxRate=None, PortDelayEnabled=None, PortDelayUnit=None, PortDelayValue=None, ProtocolItem=None, RandomLoadUnit=None, RateSelect=None, ReportSequenceError=None, ReportTputRateUnit=None, Resolution=None, Rfc2889ordering=None, SaturationIteration=None, SkipDefaultPassFailEvaluation=None, StaggeredStart=None, StepFrameLossUnit=None, StepIncrementLoadRate=None, StepLoadUnit=None, StepStepLoadRate=None, StepTolerance=None, StopTestOnHighLoss=None, SupportedTrafficTypes=None, TestTrafficType=None, TimelineRateList=None, Tolerance=None, TxDelay=None, UsePercentOffsets=None):\n\t\tself._update(locals())", "def perform_update(self, gradient):\n w = sys.modules[self.shared_mem_name].__dict__[\"w\"]\n w -= self.learning_rate * gradient", "def update( ):\r\n pass", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def update(self, ex):\n if not self.optimizer:\n raise RuntimeError('No optimizer set.')\n\n # Train mode\n self.network.train()\n\n if self.use_cuda:\n for key in ex:\n #if isinstance(ex[key], torch.Tensor):\n try:\n ex[key] = ex[key].cuda(non_blocking=True)\n except:\n pass\n\n # Run forward\n net_loss = self.network(ex)\n\n loss = net_loss[\"total_loss\"]\n\n loss.backward()\n\n clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self.updates += 1\n return {\n 'loss': loss,\n \"loc_loss\": net_loss[\"loc_loss\"],\n \"fix_loss\": net_loss[\"target_loss\"],\n }", "def update(self, updateObj):\n #if we've allocated all free entries in tlb\n if len(self._allocatedQ) == self._maxSize:\n #remove the old entries from the tlb (fifo order)\n oldUpdateObj = self._allocatedQ.popleft()\n del self._addressMap[oldUpdateObj.requestAddr]\n\n reqAddr,tranAddr = updateObj.requestAddr, updateObj.translatedAddr\n \n self._addressMap[reqAddr] = tranAddr\n self._allocatedQ.append(updateObj)", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def update_op(self, loss, learning_rate,var):\n #train_op = None\n ####### Implementation Here ######\n #pass\n train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss = loss,var_list = var )\n return train_op", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]\n\n #if self.epsilon > self.epsilon_min:\n # self.epsilon *= self.epsilon_decay", "def update(self, *args, **kwargs):", "def update_if_necessary(self, timesteps_executed):\n if self.updating:\n # Are we allowed to update?\n if timesteps_executed > self.steps_before_update and \\\n (self.agent.observe_spec[\"buffer_enabled\"] is False or # no update before some data in buffer\n timesteps_executed >= self.agent.observe_spec[\"buffer_size\"]) and \\\n timesteps_executed % self.update_interval == 0: # update frequency check\n loss = 0\n for _ in range_(self.update_steps):\n #l, s_, a_, r_, t_ = self.agent.update()\n loss += self.agent.update()\n #self.logger.info(\"FROM MEM: s={} a={} r={} t={}\".format(s_, a_, r_, t_))\n #loss += l\n return loss\n\n return None", "def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)", "def update(self, arm, reward, context):", "def _request_and_measure(self, count):\n for i in range(count):\n self.rate_measurer.update_rate()\n\n def handle_request_error(result):\n self.rate_measurer.request_failed(result)\n write_failure(result)\n\n for i in range(self.request_rate):\n d = self.control_service.move_dataset(self.dataset_node.uuid,\n self.dataset_id)\n self.rate_measurer.request_sent()\n d.addCallbacks(self.rate_measurer.response_received,\n errback=handle_request_error)", "def update(self):\n result = [], 0, False\n\n if self.t % self.t_train_freq == 0:\n result = self.q_learning_minibatch()\n\n if self.t % self.t_target_q_update_freq == self.t_target_q_update_freq - 1:\n # Copy \n self.update_target_q_network()\n\n return result", "def update_policy(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update(self, buffer: ReplayBuffer) -> np.ndarray:\n raise NotImplementedError", "def test_optimize(self):\n u = stellr.UpdateCommand(TEST_HTTP)\n\n u.add_optimize()\n self.assertEqual(len(u._commands), 1)\n self.assertTrue('optimize' in u._commands[0])\n self.assertEqual(u.body, '{\"optimize\": {}}')", "def update_parameters(parameters, grads, learning_rate=0.01):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n ### END CODE HERE ###\n\n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n dW3 = grads[\"dW3\"]\n db3 = grads[\"db3\"]\n ## END CODE HERE ###\n\n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - (learning_rate * dW1)\n b1 = b1 - (learning_rate * db1)\n W2 = W2 - (learning_rate * dW2)\n b2 = b2 - (learning_rate * db2)\n W3 = W3 - (learning_rate * dW3)\n b3 = b3 - (learning_rate * db3)\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters", "def update_weights(architecture,grad_weights,grad_bias,m,v,t,lr,optimizer=\"adam\"):\n \n for layer in range(len(architecture)):\n if not (grad_weights['layer{}'.format(layer+1)] is None) and grad_bias['layer{}'.format(layer+1)] is not None:\n grad_weightsi = grad_weights['layer{}'.format(layer+1)]\n grad_weightsi /= bs\n grad_biasi = grad_bias['layer{}'.format(layer+1)]\n grad_biasi /= bs\n\n \n if optimizer.lower()==\"sgd\":\n # Mini-Batch SGD\n qw = lr*grad_weightsi\n qb = lr*grad_biasi\n else:\n # Mini-Batch Adam\n mw,mb = m['layer{}'.format(layer+1)]\n vw,vb = v['layer{}'.format(layer+1)]\n qw,mw,vw = adam(grad_weightsi,beta_1,beta_2,mw,vw,t,lr) # Have obtained dw\n qb,mb,vb = adam(grad_biasi,beta_1,beta_2,mb,vb,t,lr) # Have obtained db\n\n architecture['layer{}'.format(layer+1)][2].requires_grad = False\n architecture['layer{}'.format(layer+1)][3].requires_grad = False\n # Updating weights and biases now\n try:\n architecture['layer{}'.format(layer+1)][2] -= torch.Tensor(qw)\n except:\n architecture['layer{}'.format(layer+1)][2] -= torch.t(torch.Tensor(qw))\n try:\n architecture['layer{}'.format(layer+1)][3] -= torch.Tensor(qb)\n except:\n architecture['layer{}'.format(layer+1)][3] -= torch.t(torch.Tensor(qb))\n\n m['layer{}'.format(layer+1)][0] = torch.Tensor(mw)\n m['layer{}'.format(layer+1)][1] = torch.Tensor(mb)\n v['layer{}'.format(layer+1)][0] = torch.Tensor(vw)\n v['layer{}'.format(layer+1)][1] = torch.Tensor(vb)\n grad_weights['layer{}'.format(layer+1)] = torch.zeros(grad_weightsi.shape)\n grad_bias['layer{}'.format(layer+1)] = torch.zeros(grad_biasi.shape)\n return grad_weights,grad_bias,m,v", "def update_user(self, blob, update=True):\n # do we need to trigger rebuilding the cache\n if blob.get(\"remaining\", 0) == 1:\n self.rebuild = True\n if update:\n self.kwargs[\"rebuild\"] = self.rebuild\n self.rebuild = False\n\n # update walks to match target naccept\n accept_prob = max(0.5, blob[\"accept\"]) / self.kwargs[\"walks\"]\n delay = self.nlive // 10 - 1\n n_target = getattr(_SamplingContainer, \"naccept\", 60)\n self.walks = (self.walks * delay + n_target / accept_prob) / (delay + 1)\n self.kwargs[\"walks\"] = min(int(np.ceil(self.walks)), _SamplingContainer.maxmcmc)\n\n self.scale = blob[\"accept\"]", "def update(self) -> None:\n ...", "def update(self, value: Opt[bytes], wal: bool = True):\n self.value = value\n self.wal = wal", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)" ]
[ "0.6433153", "0.5484779", "0.5369425", "0.53328884", "0.52981716", "0.52732706", "0.5266272", "0.521479", "0.5200708", "0.51710343", "0.5088552", "0.50860375", "0.50787526", "0.507192", "0.507192", "0.50715756", "0.505829", "0.5000706", "0.4999721", "0.4979605", "0.496521", "0.49646664", "0.49576077", "0.49535808", "0.49422154", "0.49336103", "0.49218482", "0.49211517", "0.49178496", "0.49150893", "0.4908645", "0.49002224", "0.48647952", "0.48603314", "0.48516917", "0.48203352", "0.4808238", "0.48034343", "0.47970903", "0.47968054", "0.4790311", "0.4789689", "0.47875583", "0.47875583", "0.47842157", "0.47842097", "0.47790098", "0.47743097", "0.47734034", "0.47706425", "0.4766661", "0.4763236", "0.47615302", "0.47615302", "0.4760133", "0.47577354", "0.47542745", "0.47419026", "0.4731907", "0.4731888", "0.47305027", "0.47282827", "0.47239935", "0.47223446", "0.4721382", "0.47181678", "0.4714761", "0.47118253", "0.4701414", "0.46882766", "0.46876034", "0.46830103", "0.4675838", "0.46705914", "0.46665955", "0.46630135", "0.46595654", "0.4657934", "0.46506146", "0.46481836", "0.46424457", "0.46406454", "0.46392497", "0.4638865", "0.46368712", "0.46357712", "0.46336326", "0.46253762", "0.46194142", "0.46117166", "0.46101713", "0.46013418", "0.4600179", "0.45984498", "0.45968425", "0.4595771", "0.4594056", "0.4593322", "0.45868292", "0.45861608" ]
0.62525785
1
Search for lback index self._in_loop becomes true in the second state of the loop
def _get_lback_index(self, model, last) -> int: assert last > 0 # last state cannot be loop-back. assert model.get_value(self.totime(self._in_loop, last)).is_true() assert model.get_value(self.totime(self._in_loop, 0)).is_false() idx = last - 1 while model.get_value(self.totime(self._in_loop, idx)).is_true(): idx -= 1 assert idx >= 0 assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true() assert model.get_value(self.totime(self._in_loop, idx)).is_false() assert model.get_value(self.totime(self.start_loop, idx)).is_true() return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_loop(self):\n tortoise = self.head\n hare = self.head\n while hare:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return True\n return False", "def bookkeep(self) :\n\t\tself.loopiter += 1", "def KeepAdvancingSolutionLoop(self):\n return self.step < self.nsteps", "def step_back_while(cur_index, condition):\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index", "def loop():\n global loop_idx\n sys.stdout.write('loop index %d/%d\\r\\n' % (loop_idx, _LOOPS))\n time.sleep(0.5)\n loop_idx += 1\n return loop_idx > _LOOPS", "def end_loop(self):\n # if (not self.tape.current_cell()):\n # Jump to the start of the loop\n self.instruction_pointer = (self.jump_map[self.instruction_pointer]-1)\n #else:\n # pass", "def backward_step():\n #print 'a step backward'\n maze.turn_left()\n maze.turn_left()\n if maze.found():\n return maze.found()\n maze.go()\n maze.turn_left()\n maze.turn_left()", "def has_loop(self) -> bool:\n try:\n list(self)\n return False\n except ContainsLoopError:\n return True", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def goingToBreak(self):\n \n if (\n (self.current_loc == 0 and not self.direction_forward) or\n (self.current_loc == len(self.destinations)-1 and self.direction_forward)\n ):\n return True\n return False", "def endloop(self):\n try:\n n, start = self._loop_stack[-1]\n except IndexError:\n print(\"No loops remaining.\")\n return\n if n == 1:\n self._loop_stack.pop()\n else:\n self._loop_stack[-1][0] -= 1\n self._pc = start", "async def checkNewLoop(self):\n pass", "def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n #print(self.data_handler.get_latest_bar_datetime(self.symbol_list[0]))\n else:\n break\n while self.backtest:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n try:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n except EquityError:\n print('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n try:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n except EquityError:\n print ('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)", "def _is_at_end(self, binvect):\n last = max(k for (k, v) in enumerate(binvect) if v == 1)\n n_step = len(self.pas)\n steps_between = np.arange(last + 1, n_step)\n if 0 <= len(steps_between) <= self._n_to_end:\n self._set_label(binvect, still)\n for k in steps_between:\n self.labels[k] = still\n return True\n else:\n return False", "def _is_at_end(self):\n return self.current >= len(self.source)", "def one_step_back(self):\n if (self.row -1<0):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row - 1][self.column] == False):\n return False\n else:\n self.row -= 1\n self.battery -= 1\n return True", "def can_go_back(self):\n return self._pointer >= 1", "def recurrent(self):\n return False", "def train_loop_pre(self, current_step):\r\n pass", "def _do_iteration(self):\n return True", "def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)", "def has_previous(self):\n if self.idx < len(self.nodes):\n return True\n else:\n return False", "def detectIfListHasLoop (self):\n\t\tslow = self.head\n\t\tif slow is None:\n\t\t\treturn False\n\t\tfast = self.head\n\t\tlength = 0\n\t\twhile (fast is not None) and ((slow != fast) or (length == 0)):\n\t\t\tslow = slow.getNext()\n\t\t\tfast = fast.getNext()\n\t\t\tif fast is None:\n\t\t\t\tbreak\n\t\t\tfast = fast.getNext()\n\t\t\tlength += 1\n\t\tif (slow == fast) and (length > 0):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def is_up(self):\n self.loop = file_to_loop(self.loopFile)\n if len(self.loop) == 0:\n return False\n return True", "def backtrack(self):\n last_intersection = self.intersection.pop()\n retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)\n print retrace\n print \"Moving back...\"\n self.current = retrace.pop(0)\n if self.current in self.intersection:\n self.intersection.remove(self.current)\n while retrace:\n position = retrace.pop(0)\n self.move_to_position(position)\n if position in self.intersection:\n self.intersection.remove(position)", "def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret", "def algorithm_loop(self):", "def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n while True:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)", "def loop(self):\n while not rospy.is_shutdown():\n\n rospy.logdebug(\"Loop\")\n state = self.move_base.get_state()\n\n self.counter +=1\n if(self.counter>6 or state==3):\n rospy.logdebug(\"-------------------------\")\n rospy.logdebug(\"Recalculate Frontriers ! \")\n rospy.logdebug(\"-------------------------\")\n\n self.counter = 0\n frontiers_num = self.update()\n\n #break condition\n if frontiers_num==0 :\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"NO FRONTIERS FOUND EXPLORATION COMPLETE\")\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"---------------------------------------\")\n break\n\n\n\n rate.sleep()", "def sliding(self):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i] # current pos\n X, Y = self.tilePOS[self.tiles[i]] # target pos\n if x != X or y != Y:\n return True", "def loop_stop(self):\n super(HasLoops, self).loop_stop()\n loops = reversed(getattr(self, \"_loops\", []))\n for loop in loops:\n loop.loop_stop()", "def loops_back_to_screen(self):\r\n for segment in self.all_turtles:\r\n if segment.xcor() < -300 or segment.xcor() > 300:\r\n segment.goto(-segment.xcor(), segment.ycor())\r\n\r\n elif segment.ycor() < -300 or segment.ycor() > 300:\r\n segment.goto(segment.xcor(), -segment.ycor())", "async def queueloop(self, ctx: commands.Context) -> Optional[bool]:\n\n queue = self.queue[ctx.guild.id]\n\n queue.loop = (\n Loops.QUEUE_LOOP\n if self.queue[ctx.guild.id].loop != Loops.QUEUE_LOOP\n else Loops.NO_LOOP\n )\n\n if queue.loop == Loops.QUEUE_LOOP:\n queue.queue_loop_start = queue.pos\n\n return queue.loop == Loops.QUEUE_LOOP", "def _is_at_beginning(self, binvect):\n\n first = min(k for (k, v) in enumerate(binvect) if v == 1)\n steps_between = np.arange(first)\n if 0 <= len(steps_between) <= self._n_to_end:\n self._set_label(binvect, still)\n for k in steps_between:\n self.labels[k] = still\n return True\n else:\n return False", "def __win(self, a):\n for i in range(len(a)-self.k+1):\n flag = True\n for j in range(self.k):\n if not a[i+j]:\n flag = False\n break\n if flag: return True", "def can_go_forward(self):\n return self._pointer + 1 < len(self._items)", "def enter_loop():\n\n # Save whether we are currently in a loop or not.\n global in_loop\n in_loop_stack.append(in_loop)\n\n # We are now processing a loop body.\n in_loop = True", "def config_loop(self):\n loop_label = ['Loop OFF', 'Loop ON']\n group = self.make_ui_group(False, loop_label[self.loop])\n orig_loop = self.loop\n\n while True:\n action_left, action_right = (self.button_left.action(),\n self.button_right.action())\n if action_left is RichButton.HOLD:\n return self.loop is not orig_loop, False # Resume config\n if action_right is RichButton.HOLD:\n return self.loop is not orig_loop, True # Resume paint\n if RichButton.TAP in {action_left, action_right}:\n self.loop = not self.loop\n group.pop()\n group.append(centered_label(loop_label[self.loop], 40, 3))", "def breakout_loop(self):\n while self.playing:\n self.handle_events()\n self.update()\n if self.game_over:\n self.current_menu = self.fail_menu\n self.playing = False\n self.reset()\n self.draw()", "def __exit(self, loopRound):\r\n useSecondSecurityCode = False\r\n\r\n for i in range(10):\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if currentApplication == 'evo-home':\r\n # both, history and menu views are 'evo-home', check also dialer grid icon so we get to menu\r\n dialer = self.phone.uiState.currentState.find('//image-widget[contains(@image,\"%s\")]' % self.phone.uiState._getImage('dialer/grid-menu'))\r\n if dialer:\r\n x,y,w,h = [int(p) for p in dialer.getAttribute('coords').split(\",\")]\r\n\r\n # if dialer not on screen width area, dialer is not visible\r\n if x + w/2 < 0 or x + w/2 > self.phone.uiState.getScreenWidth():\r\n dialer = None\r\n\r\n if dialer:\r\n # check if we are in editing mode in evo-home\r\n scrollContainerForMainMenu = self.phone.uiState.currentStateFull.find('//scroll-container[@name=\"main-menu-scroller\"]')\r\n if scrollContainerForMainMenu:\r\n if scrollContainerForMainMenu.find('*/fast-menu') and scrollContainerForMainMenu.find('*/container[@visible=\"true\"]'):\r\n self.phone.comment('KBD_KEY_BACK pressed in order to return from editing state in evo-home')\r\n if self.phone.isLeader():\r\n self.__backToIdleWithBackPress(True)\r\n else:\r\n self.__backToIdleWithBackPress()\r\n\r\n # we are in evo-home if dialer was found --> return from the __exit method with True\r\n return True\r\n else:\r\n if self.phone.isLeader():\r\n self.__followerExit()\r\n else:\r\n # perform swipe to get back to menu\r\n self.__backToIdleWithSwipe()\r\n # wait for a while if messaging-daemon is application is active\r\n elif currentApplication=='messaging-daemon':\r\n self.phone.delay(1000, False)\r\n elif currentApplication=='voice-call-application':\r\n # end possible reference call\r\n if self.phone.referencePhone != None and \\\r\n core.FW_conf['settings'].TestRun.AutomaticRejectCallForReferencePhone:\r\n try:\r\n debug.brf('Rejecting reference phone call in exit...')\r\n self.phone.referencePhone.rejectCall(doNotReport = True)\r\n except Exception, ex:\r\n debug.err('Unable to reject reference phone call in exit: %s' % str(ex))\r\n\r\n # end call with reject button\r\n try:\r\n if self.phone.check('voice-call-application/reject-call', doNotReport = True):\r\n self.phone.select('voice-call-application/reject-call', doNotReport = True)\r\n self.phone.delay(1000, False)\r\n except TestException, err:\r\n debug.out('Error in rejecting call in Exit: %s' % str(err))\r\n # skip possible failure, Exit must be as robust as possible\r\n\r\n if self.phone.isLeader():\r\n self.__followerExit()\r\n else:\r\n try:\r\n # make sure that all phone calls are ended\r\n self.phone.srv.call.end()\r\n except TestException, err:\r\n debug.out('Error in ending calls with SX in Exit: %s' % str(err))\r\n # skip possible failure, Exit must be as robust as possible\r\n\r\n # first we try to exit with swipe\r\n self.__backToIdleWithSwipe()\r\n # handle welcome application by pressing values to it\r\n elif currentApplication=='welcome':\r\n # n0000000000000000000001 == \"English\"\r\n # nbcaaMNAsek1HQ8pr0u2obo == \"Sign in later\" Both added to welcomeAppItems because logical text is missing from Lanai SS 1330.20 SW\r\n # nzvP58DaYjU6BBq68WACfig == \"Continue\"\r\n # n6Vpdj39k8kO7BzxnrXOErg == \"Done\"\r\n # njaWEWAr3pkOaUtZxG70TpQ == \"OK\"\r\n # nv3QRA238SUKYbG-FDnpGTw == \"Yes\"\r\n # nFmoGfXMvT0OJ-PjvuK12NA == \"Give your PIN\"\r\n welcomeAppItems = [['n0000000000000000000001'],['nbcaaMNAsek1HQ8pr0u2obo'],['Sign in later'],['nzvP58DaYjU6BBq68WACfig'],['n6Vpdj39k8kO7BzxnrXOErg'],['njaWEWAr3pkOaUtZxG70TpQ'],\r\n ['GMT -11 Midway Island'],['nv3QRA238SUKYbG-FDnpGTw'],['YES'],[\"password-service/simwatermark\"],\r\n [\"password-service/sim1watermark\"],['password-service/sim2watermark'],[\"Give your security code\"]]\r\n retVal = self.phone.tryExpect(welcomeAppItems, doNotReport = True)\r\n\r\n if len(retVal):\r\n # check if we need to enter security code (it's the last list item)\r\n if welcomeAppItems[retVal[0]] == welcomeAppItems[-1]:\r\n self.__unlockPhone(useSecondSecurityCode)\r\n useSecondSecurityCode = True\r\n # check if PIN code is required\r\n elif welcomeAppItems[retVal[0]]==welcomeAppItems[-4] or welcomeAppItems[retVal[0]]==welcomeAppItems[-3]:\r\n self.__enterPIN() #SIM1\r\n elif welcomeAppItems[retVal[0]]==welcomeAppItems[-2]:\r\n self.__enterPIN(sim2=True) #SIM2\r\n else:\r\n # try to select text which is currently visible\r\n try:\r\n if welcomeAppItems[retVal[0]][0]=='Sign in later' or welcomeAppItems[retVal[0]][0]=='nbcaaMNAsek1HQ8pr0u2obo':\r\n itemCoords = self.phone.uiState.isItemVisible(welcomeAppItems[retVal[0]][0], refresh=False)[1]\r\n self.phone.select((itemCoords[0]/2,itemCoords[1]), doNotReport = True) # Currently text width in xml dump is too wide\r\n else:\r\n self.phone.select(welcomeAppItems[retVal[0]][0], doNotReport = True)\r\n\r\n # select \"Continue\" if \"English\" was selected before\r\n if welcomeAppItems[retVal[0]][0] == 'n0000000000000000000001':\r\n self.phone.select('nzvP58DaYjU6BBq68WACfig', doNotReport = True)\r\n except TestException, err:\r\n debug.out('Error in Exit: %s' % str(err))\r\n # skip possible failure, Exit must be as robust as possible\r\n pass\r\n else:\r\n retVal = self.phone.tryExpect('dialogs/spinner-large-light', doNotReport = True)\r\n if not retVal:\r\n # try to exit with long back key press\r\n self.__backToIdleWithBackPress(True)\r\n\r\n # wait for a while just be sure that application changes\r\n self.phone.delay(1000, False)\r\n\r\n # in the last loop round we must exit from welcome application by force\r\n if currentApplication == self.phone.uiState.getCurrentApplication() and loopRound == 2 and i == 9:\r\n self.__handleJavaApplication(currentApplication)\r\n # check if ntf-drawer application has been activated\r\n elif currentApplication == \"ntf-drawer\":\r\n # check that are there item on the screen\r\n # if not, we must kill the application\r\n if not self.phone.isLeader() and \\\r\n len(self.phone.read(doNotReport = True)) == 0:\r\n self.phone.warn('%s application was killed using SX' % currentApplication)\r\n self.phone.sx(self.__killCommand)\r\n else:\r\n # Press back key to deactivate ntf-drawer\r\n self.__backToIdleWithBackPress()\r\n self.phone.delay(1000, False)\r\n # check if video-player application has been activated\r\n elif currentApplication == \"video-player\":\r\n # Press back key to deactivate video-player\r\n if self.phone.isLeader():\r\n self.__backToIdleWithBackPress(True)\r\n else:\r\n self.__backToIdleWithBackPress()\r\n self.phone.delay(300, False)\r\n # keylock must be deactivated with swipe\r\n elif currentApplication == 'keylock':\r\n # press keylock key to activate touch\r\n if self.phone.isLeader():\r\n self.__followerExit()\r\n else:\r\n self.phone._pressKey('KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone._run('Press keylock key to activate touch')\r\n if self.phone.isLeader(): # if in leader mode, follower does not always swipe after keylock toggle\r\n self.phone.delay(200, testStepReporting = False)\r\n # go back to idle with touch swipe\r\n self.__backToIdleWithSwipe()\r\n # manual deactivation of phonelock\r\n elif currentApplication == 'phonelock':\r\n self.__unlockPhone(useSecondSecurityCode)\r\n useSecondSecurityCode = True\r\n continue # Jump at the beginning of the loop to keep securityCode flag-value\r\n # java application (for example Twitter) can have 'No' button to press\r\n elif currentApplication == 'java':\r\n javaAppItems = [['No'],['NO'],['OK']]\r\n retVal = self.phone.tryExpect(javaAppItems, doNotReport = True)\r\n\r\n if len(retVal):\r\n try:\r\n # select text which is currently visible\r\n self.phone.select(javaAppItems[retVal[0]][0], doNotReport = True)\r\n except TestException, err:\r\n debug.out('Error in Exit: %s' % str(err))\r\n # skip possible failure, Exit must be as robust as possible\r\n else:\r\n self.__handleJavaApplication(currentApplication)\r\n\r\n # wait for a while just be sure that application changes\r\n self.phone.delay(1000, False)\r\n # usb-manager (choose \"Modem\" if it's visible, otherwise kill the application)\r\n elif currentApplication == 'usb-manager':\r\n usbAppItems = [['n-bNuEfehnkav7TuxaBHeiA']] # Modem\r\n retVal = self.phone.tryExpect(usbAppItems, doNotReport = True)\r\n\r\n if len(retVal):\r\n # select \"Modem\"\r\n try:\r\n self.phone.select(usbAppItems[retVal[0]][0], doNotReport = True)\r\n except TestException, err:\r\n debug.out('Error in Exit: %s' % str(err))\r\n # skip possible failure, Exit must be as robust as possible\r\n\r\n self.phone.delay(1000, False)\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n # kill the application if selecting didn't work or was not possible\r\n if currentApplication == 'usb-manager':\r\n if self.phone.isLeader():\r\n core.FW_conf['mail_sender'].holdExecutionAndSendMail(message=\"\"\"\r\n MTBF leader requests usb-manager kill via SX.\r\n Execution is on hold\"\"\", subject='MTBF EXECUTION ON HOLD')\r\n else:\r\n self.phone.warn('%s application was killed using SX' % currentApplication)\r\n self.phone.sx(self.__killCommand)\r\n\r\n # wait for a while just be sure that application changes\r\n self.phone.delay(1000, False)\r\n\r\n # check app-stack-container state if application was not found\r\n elif currentApplication is None:\r\n # try to get the app-stack-container. Execute the call in a try-catch clause\r\n # due to in a situaton like this, the xml-dump may be totally messed up\r\n appStackNode = None\r\n try:\r\n appStackNode = self.phone.uiState.currentStateFull.find('//app-stack-container')\r\n except Exception:\r\n pass\r\n\r\n # in the last loop round we must exit from this erroneous situation by force\r\n if not appStackNode or not appStackNode.getChildNodes() and loopRound == 2 and i == 9:\r\n # try to take error capture before reset\r\n self.phone.takeCaptureFromFailure()\r\n\r\n self.__invalidPhoneStateCounter += 1\r\n\r\n # if this would be the third time, let's just give up (result == NO RESULT)\r\n if self.__invalidPhoneStateCounter == 3:\r\n raise Exception('Invalid phone state detected: No running applications found!')\r\n else:\r\n self.phone.resetPhone('Invalid phone state detected: No running applications found!')\r\n\r\n # wait for a while to give time to the phone to recover\r\n self.phone.delay(2000, False)\r\n # some other application active\r\n else:\r\n # reset invalid phone state counter when we have valid application\r\n self.__invalidPhoneStateCounter = 0\r\n\r\n if self.phone.isLeader():\r\n self.__followerExit()\r\n else:\r\n # first we try to exit with swipe\r\n self.__backToIdleWithSwipe()\r\n\r\n # wait for a while just be sure that application changes\r\n self.phone.delay(500, False)\r\n\r\n # get current application\r\n prevApplication = currentApplication\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n longPress = True\r\n for i in range(2):\r\n if currentApplication == prevApplication:\r\n # try to exit with long and then with normal back key press\r\n self.__backToIdleWithBackPress(longPress)\r\n if longPress:\r\n longPress = False\r\n\r\n # wait for a while just be sure that application changes\r\n self.phone.delay(1000, False)\r\n\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n else:\r\n break\r\n\r\n # if swipe or long back key press don't work, we must kill the application with SX\r\n # we can't kill certain applications\r\n if currentApplication == prevApplication and \\\r\n not currentApplication in self.__appsThatCantBeKilled:\r\n if self.phone.isLeader():\r\n core.FW_conf['mail_sender'].holdExecutionAndSendMail(message=\"\"\"\r\n MTBF leader requests kill via SX: Swipe or long back did not work -> sx kill cannot be done in Leader mode. \\\r\n Execution is on hold\"\"\", subject='MTBF EXECUTION ON HOLD')\r\n\r\n else:\r\n self.phone.warn('%s application was killed using SX' % currentApplication)\r\n self.phone.sx(self.__killCommand)\r\n\r\n # wait for a while after killing application with SX\r\n self.phone.delay(1000, False)\r\n\r\n # we could not reach evo-home\r\n return False", "def storage_final(index):\n i, t = index[0], NN - 1\n return storage_state[i, t] >= storage_start_state[i]", "def prev(self):\n return bool(self._ll_tree.prev())", "def loop(self):\r\n\r\n self._next_state = self._current_state", "def atTail(self):\n return self.cursor == self.tail", "def _run_backtest(self):\n i = 0\n\n while True:\n i += 1\n print(i)\n\n # Update the market bars\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n\n # Handle the Events\n while True:\n try:\n event = self.events.get(False)\n except queue.Empty:\n break\n else:\n # The inner-loop acts on the events by calling the appropriate method of the appropriate object\n if event is not None:\n if event.type == 'MARKET':\n self.strategy.calculate_signals(event)\n self.portfolio.update_timeindex(event)\n\n elif event.type == 'SIGNAL':\n self.signals += 1\n self.portfolio.update_signal(event)\n\n elif event.type == 'ORDER':\n self.orders += 1\n self.execution_handler.execute_order(event)\n\n elif event.type == 'FILL':\n self.fills += 1\n self.portfolio.update_fill(event)\n\n # Pauses for a duration of self.heartbeat seconds\n time.sleep(self.heartbeat)", "def see_behind(self):\n return True", "def stop(self):\n try:\n return self.index[-1]\n except:\n pass", "def go_again(self):\n return False", "def is_started(self):\n return self.currIndex >= 0", "def train_loop_post(self, current_step):\r\n pass", "def has_next():", "def go_again(self):\n return True", "def _removeTripleLoop(self):\n loop = self._detectTripleLoop()\n if not loop:\n return False\n\n self._swap1(*loop)\n return True", "def hasVisitedNext(self):\n if len(self.remaining_dest) == 1:\n self.remaining_dest = []\n else:\n self.remaining_dest = self.remaining_dest[1:]", "def _run_backtest(self):\n i = 0\n \n while True:\n i += 1\n print(i)\n \n # Update the market bars\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n \n # Handle the Events\n while True:\n try:\n event = self.events.get(False)\n except queue.Empty:\n break\n else:\n # The inner-loop acts on the events by calling the appropriate method of the appropriate object\n if event is not None:\n if event.type == 'MARKET':\n self.strategy.calculate_signals(event)\n self.portfolio.update_timeindex(event)\n \n elif event.type == 'SIGNAL':\n self.signals += 1\n self.portfolio.update_signal(event)\n \n elif event.type == 'ORDER':\n self.orders += 1\n self.execution_handler.execute_order(event)\n \n elif event.type == 'FILL':\n self.fills += 1\n self.portfolio.update_fill(event)\n \n # Pauses for a duration of self.heartbeat seconds\n time.sleep(self.heartbeat)", "def left(self, obs, object):\n for i in range(int((self.no_rays-1)/2)):\n if(obs[self.listOfObjects.index(object)][i] > 0):\n # print(\"found \" + str(object) + \" left\")\n return True\n return False", "def running_loop(self, run_check_ms=None):\r\n if self.board.area.down_click_call is None:\r\n raise SelectError(\"board.area.down_click_call is not set\")\r\n if self.numgame is not None and self.ngame >= self.numgame:\r\n SlTrace.lg(f\"running_loop: ngame={self.ngame} > numgame {self.numgame}\")\r\n self.running = False\r\n self.run = False\r\n return\r\n \r\n self.running = True # Still in game\r\n self.run = True # progressing (not paused)\r\n self.first_time = True \r\n self.game_start_ts = SlTrace.getTs(6)\r\n self.game_control_updates()\r\n if run_check_ms is not None:\r\n self.run_check_ms = run_check_ms\r\n BlinkerMultiState.enable()\r\n \r\n while self.running:\r\n SlTrace.lg(\"running_loop\", \"running_loop\")\r\n self.mw.update()\r\n if ActiveCheck.not_active():\r\n break\r\n SlTrace.lg(\"running_loop active\", \"running_loop\")\r\n self.mw.update_idletasks()\r\n if self.event_check():\r\n continue # Gobble up pending events\r\n \r\n if (self.cmd_stream is not None\r\n and not self.cmd_stream.is_eof()):\r\n self.run_file()\r\n self.first_time = False # Assume file did that\r\n continue # Check if more\r\n else:\r\n if self.first_time:\r\n if not self.start_game():\r\n break\r\n self.first_time = False\r\n if not self.make_move():\r\n break \r\n \r\n SlTrace.lg(\"running_loop after loop\", \"running_loop\")\r\n BlinkerMultiState.disable()\r\n \r\n if self.on_end is not None:\r\n SlTrace.lg(\"running_loop doing on_end\", \"running_loop\")\r\n self.mw.after(0, self.on_end) # After run processing\r", "def next_iter(self):\n self.is_first_iter = False\n self.handles_to_post_forward_order_index.clear()\n self.handles_post_forward_order.clear()\n if self._checking_order:\n self.current_order_index = 0\n if self.warn_status == _ExecOrderWarnStatus.WARNING:\n self.warn_status = _ExecOrderWarnStatus.WARNED", "def is_on(self):\n return self._cur != -1", "def _is_in_outmost_while_loop(self, op):\n ctxt = self._get_op_control_flow_context(op)\n outer_while_context = control_flow_util.GetContainingWhileContext(ctxt)\n return outer_while_context == control_flow_util.GetContainingWhileContext(\n self._outmost_context)", "def update2_trace(self):\r\n tmp = [row.copy() for row in self.grid]\r\n changed = False\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if self.grid[y][x] == '#' and sum(self.is_occupied(p) for p in self.neighbours[(x, y)]) >= 5:\r\n tmp[y][x] = 'L'\r\n changed = True\r\n elif self.grid[y][x] == 'L' and self.is_available2_trace(x, y):\r\n tmp[y][x] = '#'\r\n changed = True\r\n self.grid = tmp\r\n return changed", "def goBack(self):\r\n if self.currLoc > 0:\r\n self.currLoc -= 1\r\n return self.history[self.currLoc]", "def step_back(self):\n if len(self.history) > 0:\n (\n self.round,\n r_raised,\n self.game_pointer,\n self.round_counter,\n d_deck,\n self.public_card,\n self.players,\n ps_hand,\n ) = self.history.pop()\n self.round.raised = r_raised\n self.dealer.deck = d_deck\n for i, hand in enumerate(ps_hand):\n self.players[i].hand = hand\n return True\n return False", "def while_loop_op(op):\n return (control_flow_util.IsLoopSwitch(op) or\n control_flow_util.IsLoopMerge(op) or\n control_flow_util.IsLoopEnter(op) or\n control_flow_util.IsLoopExit(op) or\n TensorTracer.loop_cond_op(op) or\n op.type in ('RefNextIteration', 'NextIteration'))", "def next(self):\n while not self.is_stable():\n self.step()", "def GAMEOVER_LOOP():\n pass", "def check_sequence(self, start_index):\n st = start_index\n fin = start_index\n ball_type = self.balls[start_index].type\n i = start_index - 1\n while i >= 0:\n if self.balls[i].type == ball_type or self.balls[i].status == 3:\n st -= 1\n else:\n break\n i -= 1\n i = start_index + 1\n while i < len(self.balls):\n if self.balls[i].type == ball_type or self.balls[i].status == 3:\n fin += 1\n else:\n break\n i += 1\n return st, fin", "def iteration(self) -> int:\n return len(self._history) - 1", "def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500", "def stopCond(self):\n\t\treturn False", "def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return", "def stop(self):\n return not self.iteration < self.options['max_iters']", "def is_halted(self):\n\t\treturn self.pos == -1", "def _check_alternative1_stop_conditions(self, changed):\n searching = self._iteration < self._max_iterations\n if not searching:\n self._notify(message=LocalSearchMessage.Stopped)\n elif self._target_fitness:\n if self._solution.fitness >= self._target_fitness:\n self._notify(message=LocalSearchMessage.StoppedTargetAchieved)\n return False\n return searching", "def onGoal(self):\n return self.index == len(self.path)", "def running(self):\r\n return self.__maxlen__ > 0", "def if_end(self, **kwargs):\n\n index = self.get('_index')\n\n if index and index >= len(self.steps)-1:\n return True # all steps have been used\n\n return False", "def exit_loop():\n global in_loop\n if (len(in_loop_stack) > 0):\n in_loop = in_loop_stack.pop()\n else:\n log.warning(\"exit_loop() called with no matching enter_loop() call.\")\n in_loop = False", "def goBackInTime(self):\n if (len(self.history) == 0):\n return\n notBusy, notVisible = self.history.pop()\n for cell in notVisible:\n for item in cell[0] + cell[1]:\n self.canvas.delete(item)\n for x, y in notBusy:\n self.gridBusy[x][y] = 0\n self.onBoard -= 1\n self.refreshScore()", "def toggle_loop(self):\n if not self.can_alter_line():\n return\n if self.loop:\n self.loop = None\n else:\n self.caller.location.ndb.event_line_loop = True\n self.msg(\"Line looping set to: %s\" % str(bool(self.loop)))", "def postLoopFunctions(self):\n\t\treturn", "def recurrent(self):\n pass", "def _find_front(self):\n self.front = (laplace(self.working_mask) > 0).astype('uint8')\n # TODO: check if scipy's laplace filter is faster than scikit's", "def _behind(self):\n if(self._prev is self._head):\n return self._head._prev\n return self._prev", "def i_am_at_the_front(self):\n return self is self.hist._queue[0]", "def _has_left(self, j):\n return (2 * j + 1) < len(self)", "def guard_liberate_transition(self):\n if self.get_free_positions:\n return True", "def first_loop_end(self) -> int:\n return self.__first_loop_end", "def method1(automaton, level):\r\n\r\n old_bad_twin = automaton\r\n i = 1\r\n while i <= level:\r\n new_bad_twin = generate_bad_twin(old_bad_twin, i)\r\n good_twin = generate_good_twin(new_bad_twin)\r\n synchronized, ambiguous_transitions = synchronize_1(new_bad_twin, good_twin)\r\n for src_name, dst_name in ambiguous_transitions:\r\n states = synchronized.get_states()\r\n if find_loops(states[dst_name], {src_name}):\r\n return i - 1\r\n old_bad_twin = new_bad_twin\r\n i += 1\r\n return True", "def __loop_detection(self, route: Route) -> bool:\n if self.node_id in route.path:\n return True\n return False", "def __loop_detection(self, route: Route) -> bool:\n if self.node_id in route.path:\n return True\n return False", "def findLoopStart (self):\n\t\tslow = self.head\n\t\tif slow is None:\n\t\t\treturn None\n\t\tfast = self.head\n\t\tlength = 0\n\t\twhile (fast is not None) and ((slow != fast) or (length == 0)):\n\t\t\tslow = slow.getNext()\n\t\t\tfast = fast.getNext()\n\t\t\tif fast is None:\n\t\t\t\tbreak\n\t\t\tfast = fast.getNext()\n\t\t\tlength += 1\n\t\tif (slow == fast) and (length > 0):\n\t\t\t# has loop\n\t\t\tslow = self.head\n\t\t\twhile slow != fast:\n\t\t\t\tslow = slow.getNext()\n\t\t\t\tfast = fast.getNext()\n\t\t\treturn fast\n\t\telse:\n\t\t\treturn None", "def lulzloop(self):\n self.sentinel = False\n while self.sentinel == False:\n for i in range(len(self.strip)):\n self.strip[i] = (random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255))\n self.strip.show()\n time.sleep(0.025)\n self.killedevent.set()", "def calc_position_change(self, game_state: dict):\n current_position = game_state['self'][3]\n # print(f'Current Position: {current_position}')\n while len(self.positions) > 3:\n self.positions.pop(0)\n\n if current_position in self.positions:\n return True\n else:\n return False", "def _get_next_state(self):\n self.string_level_blocks.popleft()\n self.sprite_level_blocks.popleft()\n self._generate_next_blocks()\n self.is_start = False", "def watch_loop(self):\n # Double threaded function that allows to stop the loop mid execution\n def repeatIt():\n # reset UI and flag before starting loop\n self.resetLabels()\n self.reset_scrollbar()\n # enable stop button\n self.btnStop.config(state=\"normal\")\n # disable button while loop is running\n self.btnStart.config(state=\"disabled\")\n self.txtLoop.config(state=\"disabled\", textvariable=self.loopCounterUI)\n self.labelLoop.config(text=\"Loop Count: \")\n\n while self.loopCount.get() > 0:\n # move scrollbar to bottom\n self.testCanvas.yview_moveto(0)\n # count the loop\n self.loopCounterUI.set(self.loopCounterUI.get() + 1)\n\n # Run the test cases\n self.runThis()\n\n # Below are just to reset the UI\n if not self.stopLoop:\n print(\"loop not stopped so proceed\")\n # let user know script is stopping\n x = Label(\n self.testFrame, text=f'End of Loop',\n background=self.bgChooser(),\n foreground=\"#630984\",\n font=self.boldFont)\n x.pack(fill=X)\n # flag gor BG and labels\n self.bgCounter += 1\n self.LabelLists.append(x)\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n else:\n print(\"loop has been stopped so not gonna print End of Loop\")\n\n # pause before restarting loop\n self.loopCount.set(self.loopCount.get()-1)\n time.sleep(1)\n\n # disable stop button\n self.btnStop.config(state=\"disabled\")\n # re-enable button after loop is done\n self.btnStart.config(state=\"normal\")\n self.txtLoop.config(state=\"normal\", textvariable=self.loopCount)\n self.labelLoop.config(text=\"Enter Loop Count: \")\n # self.testCanvas.yview_moveto(0)\n # Let user know the script is done\n if not self.stopLoop:\n # loop did not stopped\n x = Label(\n self.testFrame, text=f'Test is done!',\n background=self.bgChooser(),\n foreground=\"#057224\",\n font=self.boldFont)\n x.pack(fill=X)\n self.bgCounter += 1\n else:\n x = Label(\n self.testFrame, text=f'Test stopped!',\n background=self.bgChooser(),\n foreground=\"#057224\",\n font=self.boldFont)\n x.pack(fill=X)\n self.bgCounter += 1\n self.btnStart.config(state=\"normal\")\n self.txtLoop.config(state=\"normal\", textvariable=self.loopCount)\n self.labelLoop.config(text=\"Enter Loop count: \")\n self.loopCount.set(50000)\n self.LabelLists.append(x)\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n thread = threading.Thread(target=repeatIt)\n thread.start()", "def hasNext(self) -> bool:\n return self.stack", "def is_almost_finished(self, index_delta):\n return self.currIndex + index_delta >= self.data.shape[0]", "async def _run(self):\n while True:\n for count in range(self._nSamples):\n if self._currentValue == self._pin.value():\n break\n else:\n await asyncio.sleep_ms(self._interval)\n if count == self._nSamples - 1:\n # Edge detected\n self._currentValue = not self._currentValue\n if self._currentValue:\n # Raising edge\n self._count += 1\n\n await asyncio.sleep_ms(self._interval)", "def previous(self):\n if self.current and self.current.prev:\n self.current = self.current.prev\n return True\n return False" ]
[ "0.6223293", "0.6218441", "0.6189063", "0.6179153", "0.608254", "0.59855604", "0.5839952", "0.58170766", "0.57790136", "0.57635117", "0.5739355", "0.57139474", "0.57138425", "0.5695078", "0.56928277", "0.56292295", "0.5568077", "0.55579966", "0.55414546", "0.55264825", "0.5519991", "0.5516658", "0.5503106", "0.5499639", "0.5484257", "0.54798913", "0.54762626", "0.54580015", "0.5457094", "0.54530954", "0.54446024", "0.54391646", "0.54376346", "0.5436965", "0.5433751", "0.5424511", "0.5401921", "0.53451943", "0.5339047", "0.5335369", "0.53248775", "0.5319717", "0.5301705", "0.53004205", "0.5291294", "0.5282336", "0.528209", "0.52812225", "0.5280812", "0.52686733", "0.52654994", "0.52647287", "0.5261701", "0.5230972", "0.52190685", "0.5216214", "0.5209446", "0.52035344", "0.52015424", "0.52010596", "0.5201039", "0.51989", "0.5187563", "0.5184843", "0.51838607", "0.5183619", "0.5182613", "0.517963", "0.5176763", "0.51738733", "0.5170756", "0.51704496", "0.5165458", "0.5160339", "0.51544064", "0.51507896", "0.5134886", "0.5134713", "0.5134307", "0.5113004", "0.5111596", "0.51115227", "0.5110385", "0.5096334", "0.5093025", "0.50905997", "0.50859374", "0.5082916", "0.50804496", "0.50801504", "0.50801504", "0.5079662", "0.5074402", "0.5074091", "0.50739384", "0.5073167", "0.50646716", "0.5059906", "0.505982", "0.5059745" ]
0.684357
0
returns list of active Hints and sequence of `states`. For each state reports location of each active hint and type of the transition to reach the following state
def _model2hint_comp(self, model, first: int, last: int) \ -> Tuple[List[Hint], List[List[Tuple[int, bool, TransType]]], List[Tuple[RankFun, int, int]]]: assert isinstance(first, int) assert isinstance(last, int) assert hasattr(model, "get_value") assert 0 <= first < last assert all(h.ts_lvals is not None for h in self.hints) assert all(h.ts_loc_symbs is not None for h in self.hints) # set of active hints should be constant in the loop. assert all(all(model.get_value(self.totime(is_active, step)).is_true() for step in range(first, last+1)) or all(model.get_value(self.totime(is_active, step)).is_false() for step in range(first, last+1)) for idx, is_active in enumerate(self.hint_active)) # hint_active predicates should be frozen. assert all(self.totime(act, first) == act for act in self.hint_active) # Filter active hints active_hints = [self.hints[idx] for idx, is_active in enumerate(self.hint_active) if model.get_value(is_active).is_true()] # No hints used in the current trace. if len(active_hints) == 0: return [], [], [] locval2idx_lst = [{val: idx for idx, val in enumerate(h.ts_lvals)} for h in active_hints] x_loc_idxs: List[int] = [] for h, locval2idx in zip(active_hints, locval2idx_lst): val = self.i_mgr.And( s if model.get_value(self.totime(s, first)).is_true() else self.i_mgr.Not(s) for s in h.ts_loc_symbs) assert val in locval2idx x_loc_idxs.append(locval2idx[val]) hints_steps = [[] for _ in range(first, last)] hints_rfs = [] last_rf = None last_rf_start_idx = None for curr, step in zip(hints_steps, range(first, last)): # fill curr with info of active_hints loc_idxs = x_loc_idxs x_loc_idxs = [] assert len(active_hints) == len(locval2idx_lst) assert len(active_hints) == len(loc_idxs) for h, locval2idx, loc_idx in zip(active_hints, locval2idx_lst, loc_idxs): # find location of h at next step val = self.i_mgr.And( s if model.get_value(self.totime(s, step + 1)).is_true() else self.i_mgr.Not(s) for s in h.ts_loc_symbs) assert val in locval2idx x_loc_idx = locval2idx[val] assert isinstance(x_loc_idx, int) assert 0 <= x_loc_idx < len(h) x_loc_idxs.append(x_loc_idx) trans_type = None is_ranked = False if model.get_value(self.totime(h.t_is_stutter, step)).is_true(): trans_type = TransType.STUTTER if h[loc_idx].rf is not None: rf_pred = self.totime(h[loc_idx].rf.is_ranked, step) is_ranked = model.get_value(rf_pred).is_true() elif model.get_value(self.totime(h.t_is_ranked, step)).is_true(): trans_type = TransType.RANKED is_ranked = True rf = h[loc_idx].rf assert rf is not None if model.get_value(self.totime(self.i_mgr.Not(rf.is_ranked), step + 1)).is_true(): if not last_rf: assert last_rf_start_idx is None last_rf = rf last_rf_start_idx = step - first assert last_rf is not None assert last_rf_start_idx is not None assert 0 <= last_rf_start_idx <= step - first hints_rfs.append((last_rf, last_rf_start_idx, step - first)) last_rf = None last_rf_start_idx = None else: assert last_rf is None or last_rf == rf last_rf = rf last_rf_start_idx = step - first + 1 else: assert model.get_value(self.totime(h.t_is_progress, step)).is_true() trans_type = TransType.PROGRESS curr.append((loc_idx, is_ranked, trans_type)) if __debug__: assert step < last # check model is in the identified restricted region. formula = self.totime(h[loc_idx].region, step) assert model.get_value(formula).is_true() formula = self.totime(h[loc_idx].assume, step) assert model.get_value(formula).is_true() formula = self.totime(h[x_loc_idx].region, step + 1) assert model.get_value(formula).is_true() formula = self.totime(h[x_loc_idx].assume, step + 1) assert model.get_value(formula).is_true() # check that the identified transition holds in model. if trans_type == TransType.STUTTER: assert x_loc_idx == loc_idx trans = h[loc_idx].stutterT formula = self.totime(trans, step) assert model.get_value(formula).is_true() if h[loc_idx].rf is not None: rf = h[loc_idx].rf.expr formula = self.i_mgr.Equals(self.totime(rf, step), self.totime(rf, step + 1)) assert model.get_value(formula).is_true() elif trans_type == TransType.RANKED: assert h[loc_idx].rf is not None assert x_loc_idx == loc_idx trans = h[loc_idx].rankT formula = self.totime(trans, step) assert model.get_value(formula).is_true() formula = self.totime(h[loc_idx].rf.progress_pred(), step) assert model.get_value(formula).is_true() else: assert trans_type == TransType.PROGRESS assert x_loc_idx in h[loc_idx].dsts trans = self.totime(h[loc_idx].progress(x_loc_idx), step) assert model.get_value(trans).is_true() if h[x_loc_idx].rf is not None: ranked = self.totime( self.i_mgr.Not(h[loc_idx].rf.is_ranked), step) assert model.get_value(ranked).is_true() # end debug return active_hints, hints_steps, hints_rfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_states(self):\n return self.get_next_states()", "def get_all_states(self):\n return tuple(self._transition_probs.keys())", "def next_states(self):\n return self._states[1:]", "def get_reward_states(self):\n state1 = State(7, 7)\n return [state1]", "def get_states(self):\n raise NotImplementedError()", "def get_list_of_states(self):\n return self.states", "def all_states(self) -> Tuple[State, ...]:\n return self.influence_graph.all_states()", "def get_states(self):\n return product(*[phi.automaton().states for phi in self])", "def get_active_states(self):\n raise NotImplementedError()", "def states(self):\n knownstates = set(self.keys())\n for possiblestates in self.values():\n for i in possiblestates:\n knownstates.add(i)\n return list(knownstates)", "def get_next_states(self):\n return self.__next_state", "def state_list(self) -> Sequence[TState]:\n pass", "def _getStates(self):\n feature_states = []\n # for i, sim in enumerate(self.sims):\n # state = sim.getState()\n\n # long_id = self._make_id(state.scanId, state.location.viewpointId)\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n for i in range(self.batch_size):\n while not self.qout[i].empty():\n self.qout[i].get()\n while not self.qtraj[i].empty():\n self.qtraj[i].get()\n\n self.qin[i].put(('state',None))\n \n for i in range(self.batch_size):\n state = self.qout[i].get()\n # print(state)\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n\n return feature_states", "def _getStates(self):\n feature_states = []\n # for i, sim in enumerate(self.sims):\n # state = sim.getState()\n\n # long_id = self._make_id(state.scanId, state.location.viewpointId)\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n for i in range(self.batch_size):\n while not self.qout[i].empty():\n self.qout[i].get()\n while not self.qtraj[i].empty():\n self.qtraj[i].get()\n\n self.qin[i].put(('state',None))\n \n for i in range(self.batch_size):\n state = self.qout[i].get()\n # print(state)\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n\n return feature_states", "def get_active_transitions(self):\n return [t for st in self.get_active_states() for t in st.transitions]", "def get_transitions(self):\n transitions = []\n for row in self.states:\n t_row = []\n for column in self.states:\n t_row.append([row, column])\n transitions.append(t_row)\n return sorted(transitions)", "def next_states(self, state):\n import copy\n\n ans = []\n current_array = state.board.array\n space_pos = state.board.space\n\n up_pos = [space_pos[0] - 1, space_pos[1]]\n down_pos = [space_pos[0] + 1, space_pos[1]]\n left_pos = [space_pos[0], space_pos[1] - 1]\n right_pos = [space_pos[0], space_pos[1] + 1]\n\n # down position\n if self.__is_valid(down_pos):\n down_array = [copy.copy(row) for row in current_array]\n down_board = Board(array=down_array, space=space_pos.copy())\n down_board.swap(down_pos)\n ans.append(State(board=down_board, came_from=state, move='U'))\n\n # up position\n if self.__is_valid(up_pos):\n up_array = [copy.copy(row) for row in current_array]\n up_board = Board(array=up_array, space=space_pos.copy())\n up_board.swap(up_pos)\n ans.append(State(board=up_board, came_from=state, move='D'))\n\n # right position\n if self.__is_valid(right_pos):\n right_array = [copy.copy(row) for row in current_array]\n right_board = Board(array=right_array, space=space_pos.copy())\n right_board.swap(right_pos)\n ans.append(State(board=right_board, came_from=state, move='L'))\n\n # left position\n if self.__is_valid(left_pos):\n left_array = [copy.copy(row) for row in current_array]\n left_board = Board(array=left_array, space=space_pos.copy())\n left_board.swap(left_pos)\n ans.append(State(board=left_board, came_from=state, move='R'))\n\n return ans", "def get_states(self):\n states = []\n for chords in self.training_data:\n chunks = [chords[x:x+self.order] for x in range(0,\n len(chords), self.order)]\n for chunk in chunks:\n chunk_string = \" \".join(chunk)\n if chunk_string not in states:\n states.append(chunk_string)\n return sorted(states)", "def get_next_transitions(\n self, state: State\n ) -> Collection[Tuple[Character, float, State]]:\n _check_is_legal_state(state, self.nb_states)\n return {\n (character, probability, successor)\n for character, (successor, probability) in self.transition_dict[\n state\n ].items()\n }", "def _cells_state_info(cells):\n\n return list(itertools.chain(*[c.state_info for c in cells]))", "def S(self):\n return self._states", "def next_possible_states(path, check_dict, check):\r\n \r\n current_state_tuple = path[-1]\r\n state_container = []\r\n x = current_state_tuple[1][0]\r\n y = current_state_tuple[1][1]\r\n current_state = current_state_tuple[0]\r\n\r\n # Down\r\n if y < 3:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y + 1][x]\r\n new_state[y + 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y + 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Up\r\n if y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n if y == 1 and x == 0:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if is_goal(new_state):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n elif y > 1:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Left\r\n if x > 0 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x - 1]\r\n new_state[y][x - 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x - 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Right\r\n if x < 2 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x + 1]\r\n new_state[y][x + 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x + 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n return state_container", "def complete_list_of_states():\n # funny way of getting all the states that are defined in ConcertClientState.msg\n return concert_msgs.ConductorGraph.__slots__", "def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__", "def get_states():\n # Getting all hidden state through time\n all_hidden_states = tf.scan(GRU, processed_input, \n initializer=initial_hidden, name='states')\n return all_hidden_states", "def states_list(self, states):\n self.log('List of states: [{}]'.format(\n ' | '.join([(lambda x: x[1:])(s) for s in\n states.keys()])))\n return", "def active_states(self):\n return self.states.get_active_states()", "def get_possible_states(self) -> List[State]:\n next_states = []\n for action in self._legal_moves():\n next_states.append(self.move(action))\n return next_states", "def get_all_states(self):\n return self._states", "def states(self):\n return self._x_list", "def state_addresses(self):\n return self.switch.state_addresses()", "def successorStates(self, state):\r\n\r\n successors = []\r\n\r\n for action in Directions.CARDINAL:\r\n x, y = state\r\n dx, dy = Actions.directionToVector(action)\r\n nextx, nexty = int(x + dx), int(y + dy)\r\n\r\n if (not self.walls[nextx][nexty]):\r\n nextState = (nextx, nexty)\r\n cost = self.costFn(nextState)\r\n\r\n successors.append((nextState, action, cost))\r\n\r\n # Bookkeeping for display purposes (the highlight in the GUI).\r\n self._numExpanded += 1\r\n if (state not in self._visitedLocations):\r\n self._visitedLocations.add(state)\r\n self._visitHistory.append(state)\r\n\r\n return successors", "def all_states(self):\n return self._states", "def build_state_info(paths, outputs, inputs):\n states = []\n state_vars = {\"yield_state\"}\n for path in paths:\n if isinstance(path[0], HeadBlock):\n start_yield_id = -1\n else:\n start_yield_id = path[0].yield_id\n end_yield_id = path[-1].yield_id\n state = State(start_yield_id, end_yield_id, path)\n for i in range(0, len(path)):\n block = path[i]\n if isinstance(block, Branch):\n global __unique_cond_id\n __unique_cond_id += 1\n cond = block.cond\n if path[i + 1] is block.false_edge:\n # cond = ast.Call(ast.Name(\"not_\", ast.Load()), [cond], [])\n cond = ast.UnaryOp(ast.Invert(), cond)\n else:\n assert path[i + 1] is block.true_edge\n names = collect_names(cond)\n for name in names:\n if outputs and name not in outputs and \\\n inputs and name not in inputs:\n state_vars.update(names)\n # state.statements.append(ast.parse(f\"__silica_cond_{__unique_cond_id} = {astor.to_source(cond).rstrip()}\").body[0])\n # state.conds.append(ast.parse(f\"__silica_cond_{__unique_cond_id}\").body[0].value)\n state.conds.append(cond)\n # join_block = find_branch_join(block)\n # skip_cond = True\n # for path_ in paths:\n # if block in path_ and (join_block not in path_ or path_.index(join_block) < path_.index(block)):\n # skip_cond = False\n # if not skip_cond:\n # state.conds.append(cond)\n elif isinstance(block, BasicBlock):\n state.statements.extend(block.statements)\n elif isinstance(block, HeadBlock):\n pass\n # state.statements.extend(block.initial_statements)\n states.append(state)\n return states, state_vars", "def state_transitions(self, state):\n return self.states(\"ANY PreviousStates.identifier = '%s'\" % _obj_id(state))", "def solution_path(self) -> list[State]:", "def states(self):\n num_states = 2**self.num_vars\n return (int2tuple(s, self.num_vars) for s in range(num_states))", "def iter_states(self):\n return iter(self._states_)", "def states(self) -> Type[Any]:\n return []", "def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())", "def _iter_transitions_all_(self):\n for state in self.iter_states():\n for t in state.transitions:\n yield t", "def get_switch_states(self):\n switches_states = []\n for connection in self.connections:\n if connection.start.is_switch_output():\n switches_states.append((connection.start.switch,\n connection.start.output_nr))\n if connection.end.is_switch_output():\n switches_states.append((connection.end.switch,\n connection.end.output_nr))\n return switches_states", "def Get_States(self):\n\n # Getting all hidden state throuh time\n all_hidden_states = tf.scan(self.LSTM,\n self.processed_input,\n initializer=self.initial_hidden,\n name='states')\n all_hidden_states=all_hidden_states[:,0,:,:]\n \n return all_hidden_states", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n return feature_states", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n return feature_states", "def get_step_actions(self):\n return self.actor(tf.numpy_function(self.get_states, [], self.states[0].dtype))", "def get_states_from_graph(graph):\n return [\n (y, x, td)\n for y in range(graph.size)\n for x in range(graph.size)\n for td in range(2)\n if graph.E[(0, y, x, td)].state\n ]", "def drive(self, goalstates, inputs):\n # extract start and goal states from environment\n path_list = []\n destination_reached = []\n action_order = []\n path_lengths = []\n start = self.state\n\n # for all goal states do\n # goalReached,path= A Star(start, goal)\n for goal in goalstates:\n goalReached, path = self.AStar(start[\"location\"], goal, inputs)\n destination_reached.append(goalReached)\n path_list.append(path)\n path_lengths.append(len(path))\n\n # Find best path from all paths received [1 path received for 1 goal]\n if True in destination_reached:\n # Best path, would the shortest path in case of goal is reachable \n best_path = [path_list[i] for i in range(len(path_list)) if (destination_reached[i] == True) and (len(path_list[i]) == min(path_lengths))]\n action_order.extend(best_path)\n else:\n # otherwise it would be the longest path, how far traveled before being blocked \n longest_path = [path_list[i] for i in range(len(path_list)) if (len(path_list[i]) == max(path_lengths))]\n action_order.extend(longest_path)\n\n # Compute action sequence for best path\n movements = {\n (0, 3): \"forward-3x\", \n (0, 2): \"forward-2x\", \n (0, 1): \"forward\", \n (-1, 1): \"left\", \n (1, 1): \"right\", \n (0, 0): None}\n try:\n action_sequence = [movements[(action_order[0][i+1][0] - action_order[0][i][0], action_order[0][i+1][1] - action_order[0][i][1])] for i in range(len(action_order[0])-1)]\n except:\n action_sequence = [None]\n # return action sequence\n return action_sequence", "def getExploredStates(node):\r\n path = []\r\n while node.parent:\r\n path.insert(0, node.state)\r\n node = node.parent\r\n\r\n return path", "def transitions(self, state):\n if len(set(state)) < len(state):\n yield self.STOP_STATE\n return\n for hidx in xrange(self.num_players):\n for lidx in xrange(hidx):\n (lower, higher) = (state[lidx], state[hidx])\n yield self.makestate(((2*lower) if (i == lidx) else ((higher - lower) if (i == hidx) else s)) for (i, s) in enumerate(state))", "def state_to_locations(state: list) -> list:\n\n locations = []\n for i in range(0, 16):\n locations.append((0, 0))\n # Each tuple represents a location on the board as (row, column)\n\n \"\"\" \"locations\" keeps track of all fifteen numbers in the given state and the goal \n state. The location of the blank in the state is stored as the tuple at locations[0], \n the location of the number 1 is stored as locations[1], so on and so forth.\"\"\"\n\n \"\"\" Due to the nature of indices on a list, when a location is stored as a tuple \n (row, column), the four rows and four columns are represented as indices from 0 \n to 3, even though the numbers 1 through 15 are represented as indices from 1 to \n 15 on the list.\"\"\"\n\n for i in range(0, 4):\n for j in range(0, 4):\n \"\"\" The loop scans the given state and reads the integer at [i][j]. The number \n is stored at its corresponding index in the list \"locations\". By the time the \n loop finishes, the locations of all fifteen numbers as well as the blank in \n the given state will have been stored in the list.\"\"\"\n num = state[i][j]\n locations[num] = (i, j)\n\n return locations", "def get_sink_states(self):\n state1 = State(4, 2)\n return [state1]", "def final_states(self):\n return list(self.iter_final_states())", "def next_state(self) -> Set[Position]:\n return self._next_state", "def neighboring_states(self):\n index = self.state.index(0)\n\n if index == 0:\n return [self.move(movement) for movement in ['down', 'right']]\n elif index == 1:\n return [self.move(movement) for movement in ['down', 'left', 'right']]\n elif index == 2:\n return [self.move(movement) for movement in ['down', 'left']]\n elif index == 3:\n return [self.move(movement) for movement in ['up', 'down', 'right']]\n elif index == 4:\n return [self.move(movement) for movement in ['up', 'down', 'left', 'right']]\n elif index == 5:\n return [self.move(movement) for movement in ['up', 'down', 'left']]\n elif index == 6:\n return [self.move(movement) for movement in ['up', 'right']]\n elif index == 7:\n return [self.move(movement) for movement in ['up', 'left', 'right']]\n else:\n # index == 8\n return [self.move(movement) for movement in ['up', 'left']]", "def states(self) -> Optional[Sequence['outputs.FeatureStateState']]:\n return pulumi.get(self, \"states\")", "def getAllStates(self):\n return list(itertools.product(self.getAllWorldStates(), self.getAllTheta()))", "def predict_next_states(self, states, actions, time_steps, train=True):\n # encoding and decoding\n hs, next_states = self.encode_latent_traj(states, actions, time_steps, train=train) # [N, T+1, D_latent]\n return next_states, hs[:, :-1, :]", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n print('search2Agents state',state)\n state1,goals = state\n x,y = state1\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state1 not in self._visited:\n self._visited[state1] = True\n self._visitedlist.append(state1)\n\n return successors", "def getstates(s) -> \"[str]\":\n pass", "def get_output_states(self):\n return self.states[-self.num_output_states:]", "def transitions(self) -> List[Dict]:\n return []", "def get_terminal_states(self) -> Set[S]:\n sink = self.get_sink_states()\n return {s for s in sink if\n all(is_approx_eq(r, 0.0) for _, r in self.rewards[s].items())}", "def getstate(self):\n return [elem.getstate() for elem in self]", "def transitions(self, from_state=None):\n return list(self.iter_transitions(from_state))", "def state_addresses(self):\n state_addresses = []\n # state_addresses.extend(self.speed.state_addresses())\n return state_addresses", "def states_at_times(self):\n if not hasattr(self, \"_states_at_times\"):\n self._states_at_times = interpolate_states(self.states, self.times)\n return self._states_at_times", "def getSuccessors(self, state): \n ##print \"state: \\n\" , state\n successors = []\n if state[\"HOLDING\"] is None:\n # pick up a block that is clear\n for obj in state:\n if obj == \"HOLDING\":\n continue\n successor = pickUp(state,obj)\n if not successor == None:\n successors.append((successor,\"PICK UP \" + obj,1)) \n else:\n # put down the block that is currently being held and put it on top of a clear block or the table\n for obj in state:\n if obj == \"HOLDING\":\n continue\n successor = putDown(state,obj)\n if not successor is None:\n successors.append((successor,\"PUT DOWN \" + state[\"HOLDING\"] + \" ON \" + obj,1))\n if tableIsFree(state, self.tableSpace): \n\t\t\t\tsuccessor = putDown(state,\"TABLE\")\n\t\t\t\tif not successor is None:\n\t\t\t\t\tsuccessors.append((successor,\"PUT DOWN \" + state[\"HOLDING\"] + \" ON TABLE\",1))\n ##print \"successors: \\n\" , successors \n self.nodesExpanded +=1\n return successors", "def setup_transition_list():\n xn_list = []\n\n xn_list.append( Transition(3, 4, 2., 'left ejection') )\n xn_list.append( Transition(12, 2, 2., 'right ejection') )\n xn_list.append( Transition(19, 20, 2.e8, 'downward ejection, left') )\n xn_list.append( Transition(19, 24, 2.e8, 'downward ejection, right') )\n xn_list.append( Transition(28, 17, 1., 'upward ejection, left') )\n xn_list.append( Transition(28, 18, 1., 'upward ejection, right') )\n xn_list.append( Transition(11, 15, 3.0e7, 'demobilization (right wall)') )\n xn_list.append( Transition(13, 15, 3.0e7, 'demobilization (left wall)') )\n xn_list.append( Transition(29, 31, 2.0e6, 'demobilization (friction)') )\n xn_list.append( Transition(30, 31, 2.0e6, 'demobilization (friction)') )\n xn_list.append( Transition(1, 4, 3.0e8, 'leftward motion') )\n xn_list.append( Transition(8, 2, 3.0e8, 'rightward motion') )\n xn_list.append( Transition(20, 17, 2.0e6, 'upward motion') )\n xn_list.append( Transition(24, 18, 2.0e6, 'upward motion') )\n xn_list.append( Transition(18, 24, 2.0e8, 'downward motion') )\n xn_list.append( Transition(17, 20, 2.0e8, 'downward motion') )\n\n if _DEBUG:\n print()\n print('setup_transition_list(): list has',len(xn_list),'transitions:')\n for t in xn_list:\n print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)\n\n return xn_list", "def actions(self, state):\n words = get_words(state)\n derived = []\n for i in range (len(words)):\n wi = words[i]\n fills = self.possibleFills(wi)\n for f in fills:\n derived.append((f, i))\n return derived", "def determinisation(self):\n if any(len(t.word_in) > 1 for t in self.iter_transitions()):\n return self.split_transitions().determinisation()\n\n epsilon_successors = {}\n direct_epsilon_successors = {}\n for state in self.iter_states():\n direct_epsilon_successors[state] = set(\n t.to_state\n for t in self.iter_transitions(state)\n if not t.word_in)\n epsilon_successors[state] = set([state])\n\n old_count_epsilon_successors = 0\n count_epsilon_successors = len(epsilon_successors)\n\n while old_count_epsilon_successors < count_epsilon_successors:\n old_count_epsilon_successors = count_epsilon_successors\n count_epsilon_successors = 0\n for state in self.iter_states():\n for direct_successor in direct_epsilon_successors[state]:\n epsilon_successors[state] = epsilon_successors[state].union(epsilon_successors[direct_successor])\n count_epsilon_successors += len(epsilon_successors[state])\n\n def set_transition(states, letter):\n result = set()\n for state in states:\n for transition in self.iter_transitions(state):\n if transition.word_in == [letter]:\n result.add(transition.to_state)\n result = result.union(*(epsilon_successors[s] for s in result))\n return (frozenset(result), [])\n\n result = self.empty_copy()\n new_initial_states = [frozenset(set().union(\n *(epsilon_successors[s]\n for s in self.iter_initial_states()\n )))]\n result.add_from_transition_function(set_transition,\n initial_states=new_initial_states)\n\n for state in result.iter_states():\n state.is_final = any(s.is_final for s in state.label())\n if all(s.color is None for s in state.label()):\n state.color = None\n else:\n state.color = frozenset(s.color for s in state.label())\n\n return result", "def getCurrentState (events_counters, states):\n gamma_raw = 0\n if events_counters[0] + events_counters[2] == 0:\n gamma_raw = -1000\n else:\n gamma_raw = float (events_counters[0]) / (float (events_counters[0]) +\n float (events_counters[2])) \n\n theta_raw = 0\n if events_counters[1] + events_counters[3] == 0:\n theta_raw = -1000\n else: \n theta_raw = float (events_counters[1]) / (float (events_counters[1]) +\n float (events_counters[3]))\n\n #print (\"gamma_raw = {}; theta_raw = {}\".format (gamma_raw, theta_raw))\n min_dist1 = 1\n target_ind1 = 0\n min_dist2 = 1\n target_ind2 = 0 \n for ind1 in range (len (states[0])):\n if math.fabs (states[0][ind1] - gamma_raw) <= min_dist1:\n min_dist1 = math.fabs (states[0][ind1] - gamma_raw)\n target_ind1 = ind1\n\n for ind2 in range (len (states[1])):\n if math.fabs (states[1][ind2] - theta_raw) <= min_dist2:\n min_dist2 = math.fabs (states[1][ind2] - theta_raw)\n target_ind2 = ind2\n #print (\"gamma = {}; theta = {}\".format (states[0][target_ind1], states[1][target_ind2]))\n return (target_ind1, target_ind2)", "def get_action_outcomes(self, state, action):\r\n temp_state = tuple([max(0, min(self.pond_size[i]-1, state[i] + self.action_directions[action][i]))\r\n for i in range(2)])\r\n return self.transition_lists[temp_state]", "def successorStates(self, state):\n currentState = state[1]\n successors = []\n for action in Directions.CARDINAL:\n x, y = state[0] # currentPosition\n print(\"State: {}\".format(state[0]))\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n # Implement a successor discovery, check if any corners are satisfied\n # and update values as they are satisfied\n if (not hitsWall):\n successorsState = []\n nextxy = (nextx, nexty)\n if nextxy == self.corners[0]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[0])\n if nextxy == self.corners[1]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[1])\n if nextxy == self.corners[2]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[2])\n if nextxy == self.corners[3]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[3])\n # Put all updated values of 4 corners to a variable\n successorPost = (successorsState[0], successorsState[1],\n successorsState[2], successorsState[3])\n # Append to go to the next move\n successors.append(((nextxy, successorPost), action, 1))\n\n self._numExpanded += 1 # Count the number of nodes expanded\n return successors", "def final_states(self) -> Tuple[tuple, ...]:\n return self._final_states", "def generate_states(esncell, xs, h0):\n (map_ih, (Whh, shape), bh) = esncell\n def _step(h, x):\n #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x) + bh)\n h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x))\n return (h, h)\n (h, hs) = lax.scan(_step, h0, xs)\n return (h, hs)", "def predict_next_state_gt(self, states, actions):\n # TODO: write your code here\n\n # return [self.env.get_nxt_state(states[i], actions) for i in range(self.num_particles)]\n return np.array([[self.env.get_nxt_state(states[j][i], actions[j]) for i in range(self.num_particles)] for j in range(self.popsize)])", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x, y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append((nextState, action, cost))\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors", "def state_info_specs(self):\n return list()", "def successors(self):\n next_states = []\n if self.stock_pile:\n tableau = self.tableau\n stock = self.stock_pile[1:]\n waste = self.stock_pile[0]\n next_states.append(State(tableau, stock, waste))\n for i, card in enumerate(self.tableau):\n if self.is_face_up(i) and self.can_be_moved(card):\n tableau = tuple(c if c != card else None for c in self.tableau)\n stock = self.stock_pile\n waste = card\n next_states.append(State(tableau, stock, waste))\n return next_states", "def get_next_states(self, short=False):\n if short or self.tail_batch is None:\n return self.memory.get('next_states', self.s, self.e)\n else:\n return np.concatenate(\n (\n self.memory.get('next_states', self.s, self.e),\n self.tail_batch.next_states\n ), axis=0\n )", "def get_states():\n try:\n ''' Returns a list of states in list named result '''\n data = State.select()\n return ListStyle.list(data, request), 200\n except Exception as e:\n abort(500)", "def get_start_state_data(start_state: int, states: [State]) -> tuple:\n first_node = 0\n for state in states:\n if state.trigs:\n for trig in state.trigs:\n if trig.source == start_state:\n first_node = trig.target\n return (get_state_by_id(states, first_node, \"new\").new_id, get_state_by_id(states, first_node, \"old\").y,\n (get_state_by_id(states, first_node, \"new\").x - 2))", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x,y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n # Bookkeeping for display purposes\n self._expanded += 1\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors", "def states(self) -> Set[State]:\n return set(range(self.nb_states))", "def get_initial_states(self):\n raise NotImplementedError()", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x,y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors", "def get_successors(state): \n \n child_states = []\n \n size = len(state)\n i = 0\n j = 0\n for i in range (size):\n if 0 in state[i]:\n for j in range (size):\n if state[i][j] == 0:\n break \n break\n\n if j != size-1:\n child_states.append ((\"Left\", swap_cells(state, i, j, i, j+1)))\n if j != 0:\n child_states.append ((\"Right\", swap_cells(state, i, j, i, j-1)))\n if i != size-1:\n child_states.append ((\"Up\", swap_cells(state, i, j, i+1, j)))\n if i != 0:\n child_states.append ((\"Down\", swap_cells(state, i, j, i-1, j)))\n \n return child_states", "def _statck_state(self, list_states):\n batch_last_docs_prob = []\n batch_last_turn = []\n\n for ele in list_states:\n batch_last_docs_prob.append(ele.last_docs_prob)\n batch_last_turn.append(ele.last_turn)\n\n # (batch, num_docs)\n batch_last_docs_prob = torch.cat(batch_last_docs_prob, dim=0)\n # (batch, turn_len)\n batch_last_turn = torch.cat(batch_last_turn, dim=0)\n\n return DialogState(batch_last_docs_prob, batch_last_turn)", "def getState(self):\n return tuple([robot.getState() for robot in self.states.robots])", "def _extract_states(self, state):\n conf = self._config\n\n # c_prev is `m` (cell value), and\n # m_prev is `h` (previous output) in the paper.\n # Keeping c and m here for consistency with the codebase\n c_prev = [None] * conf.num_dims\n m_prev = [None] * conf.num_dims\n\n # for LSTM : state = memory cell + output, hence cell_output_size > 0\n # for GRU/RNN: state = output (whose size is equal to _num_units),\n # hence cell_output_size = 0\n total_cell_state_size = self._cell_state_size()\n cell_output_size = total_cell_state_size - conf.num_units\n\n if self._state_is_tuple:\n if len(conf.recurrents) != len(state):\n raise ValueError('Expected state as a tuple of {} '\n 'element'.format(len(conf.recurrents)))\n\n for recurrent_dim, recurrent_state in zip(conf.recurrents, state):\n if cell_output_size > 0:\n c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state\n else:\n m_prev[recurrent_dim] = recurrent_state\n else:\n for recurrent_dim, start_idx in zip(conf.recurrents,\n range(0, self.state_size,\n total_cell_state_size)):\n if cell_output_size > 0:\n c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n m_prev[recurrent_dim] = array_ops.slice(\n state, [0, start_idx + conf.num_units], [-1, cell_output_size])\n else:\n m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n return c_prev, m_prev, cell_output_size", "def _get_state(self):\n # COMPUTE CLASSIFIER_STATE\n predictions = self.model.predict_proba(self.dataset.state_data)[:,0]\n predictions = np.array(predictions)\n idx = np.argsort(predictions)\n # the state representation is the *sorted* list of scores \n classifier_state = predictions[idx]\n \n # COMPUTE ACTION_STATE\n unknown_data = self.dataset.train_data[self.indeces_unknown,:]\n # prediction (score) of classifier on each unlabelled sample\n a1 = self.model.predict_proba(unknown_data)[:,0]\n # average distance to every unlabelled datapoint\n a2 = np.mean(self.dataset.distances[self.indeces_unknown,:][:,self.indeces_unknown],axis=0)\n # average distance to every labelled datapoint\n a3 = np.mean(self.dataset.distances[self.indeces_known,:][:,self.indeces_unknown],axis=0)\n next_action_state = np.concatenate(([a1], [a2], [a3]), axis=0)\n return classifier_state, next_action_state", "def print_prior_states(self):\n rospy.logdebug(\"Prior States:\")\n for state in self.previous_states:\n enum = self._state_enums(state)\n rospy.logdebug(enum)", "def initial_states(self):\n return list(self.iter_initial_states())", "def get_parent_list(state: State, states: [State])->[State]:\n curr_state = state.parent\n parents = []\n while curr_state:\n parents.append(curr_state)\n curr_state = curr_state.parent\n return parents", "def get_states(self):\n\n try:\n response = requests.get(self.ROOT_URL + self.ALL_STATES_ENDPOINT)\n response = response.json()\n except ValueError:\n raise OpenSkyApiException(self.PARSE_ERROR)\n except RequestException:\n raise OpenSkyApiException(self.REQUEST_ERROR)\n return self.parse_response(response)", "def get_states(request):\n if 'cursor' in request.GET:\n cursor = request.GET['cursor']\n else:\n cursor = str(uuid4())\n if cursor in CURSORS:\n it = CURSORS[cursor]\n else:\n board = GAMES[request.matchdict['game']]\n it = CURSORS[cursor] = board.lookahead_boards(\n request.GET.get('lookahead', 1))\n slen = request.GET.get('count', 20)\n states = list(islice(it, slen))\n if len(states) < 20:\n cursor = None\n return {\n 'cursor': cursor,\n 'boards': [[b.board for b in btup] for btup in states]}", "def test_input_stream_state_statewp():\n state_t1 = StateTask1(Direction.EAST, 0, 0)\n state_t2 = StateTask2([1, 10], [0, 0])\n\n instructions = tuple(read_instructions(input_stream()))\n assert state_t1.manhatam_distance == 0\n\n assert instructions[0] == Instruction(Direction.FWD, 10)\n state_t1.apply(instructions[0])\n state_t2.apply(instructions[0])\n assert state_t1.north == 0 and state_t1.east == 10\n assert state_t2.waypoint == [1, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[1] == Instruction(Direction.NORTH, 3)\n state_t1.apply(instructions[1])\n state_t2.apply(instructions[1])\n assert state_t1.north == 3 and state_t1.east == 10\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[2] == Instruction(Direction.FWD, 7)\n state_t1.apply(instructions[2])\n state_t2.apply(instructions[2])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [38, 170]\n\n assert instructions[3] == Instruction(Turn.RIGHT, 90)\n state_t1.apply(instructions[3])\n state_t2.apply(instructions[3])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [38, 170]\n\n assert instructions[4] == Instruction(Direction.FWD, 11)\n state_t1.apply(instructions[4])\n state_t2.apply(instructions[4])\n assert state_t1.north == -8 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [-72, 214]", "def scan(text,transition_table,accept_states):\n\t\n\t# initial state\n\tpos = 0\n\tstate = 'q0'\n\t\n\twhile True:\n\t\t\n\t\tc = getchar(text,pos)\t# get next char\n\t\t\n\t\tif state in transition_table and c in transition_table[state]:\n\t\t\n\t\t\tstate = transition_table[state][c]\t# set new state\n\t\t\tpos += 1\t# advance to next char\n\t\t\t\n\t\telse:\t# no transition found\n\n\t\t\t# check if current state is accepting\n\t\t\tif state in accept_states:\n\t\t\t\treturn accept_states[state],pos \t#if current state is accepting, scan() returns it.\n\n\t\t\t# current state is not accepting\n\t\t\treturn 'ERROR_TOKEN',pos \t#if current state is not accepting, scan() returns 'ERROR_TOKEN'.\t", "def predict_next_states(self, states, actions, time_steps, lengths, train=True):\n # encoding and decoding\n zs, means_z0, stds_z0, next_states = self.encode_latent_traj(states, actions, time_steps, lengths,\n train=train) # [N, T+1, D_latent]\n return next_states, zs[:, :-1, :], means_z0, stds_z0", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n features = []\n for j in range(36):\n long_id = self._make_id(state.scanId, state.location.viewpointId,str(j+1))\n feature = self.features[long_id]\n pad_num = 64-len(feature)\n \n if pad_num > 0: # padding the feature to [64, 2051]\n padding = np.zeros([pad_num, 2051]) \n feature = np.concatenate((feature,padding))\n\n features.append(feature)\n \n feature_states.append((features, state))\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n\n return feature_states # [([64,2051]*36), sim_state] * batch_size" ]
[ "0.6335304", "0.6253233", "0.6187453", "0.60887307", "0.6078322", "0.606271", "0.6043362", "0.60380375", "0.59895456", "0.59725285", "0.5967622", "0.5963157", "0.59513724", "0.59513724", "0.58921933", "0.58813024", "0.5870557", "0.58609515", "0.58528286", "0.58261985", "0.57832074", "0.5747154", "0.5746819", "0.57409805", "0.57303506", "0.57241535", "0.57139075", "0.5702404", "0.56842875", "0.56133366", "0.5589894", "0.5586676", "0.557714", "0.557582", "0.55750203", "0.5568374", "0.5564977", "0.5560277", "0.5549213", "0.5546951", "0.55445224", "0.55424845", "0.55268353", "0.5516172", "0.5516172", "0.54729956", "0.54673135", "0.54326093", "0.5432315", "0.5412575", "0.53964376", "0.53941965", "0.5384709", "0.5356601", "0.5350328", "0.5333311", "0.53277636", "0.53237486", "0.53163826", "0.5306734", "0.5298234", "0.5294504", "0.5292501", "0.5288741", "0.52875876", "0.5282594", "0.5278142", "0.5276397", "0.52759975", "0.52755684", "0.5267632", "0.5257208", "0.5252769", "0.5242085", "0.523812", "0.52257895", "0.52236557", "0.52180463", "0.5210746", "0.5210388", "0.5205172", "0.52008885", "0.51974183", "0.51862913", "0.5184428", "0.51824844", "0.51704687", "0.5170001", "0.51644015", "0.5158509", "0.5157154", "0.51455986", "0.5135871", "0.51332027", "0.5128929", "0.51279753", "0.51195574", "0.51184213", "0.51183563", "0.51149493", "0.5110403" ]
0.0
-1
Build dictionary from predicates to the corresponding truth assignment as prescribed by the selected hints.
def _hint_comp2assume(self, hints: List[Hint], steps: List[List[Tuple[int, bool, TransType]]], first: int) -> Tuple[FrozenSet[FNode], FrozenSet[FNode]]: assert all(isinstance(h, Hint) for h in hints) assert all(isinstance(s, list) for s in steps) assert all(len(s) == len(hints) for s in steps) assert all(isinstance(s, tuple) for step in steps for s in step) assert all(len(s) == 3 for step in steps for s in step) assert all(isinstance(s[0], int) for step in steps for s in step) assert all(isinstance(s[1], bool) for step in steps for s in step) assert all(isinstance(s[2], TransType) for step in steps for s in step) assert isinstance(first, int) assert first >= 0 if len(hints) == 0: return frozenset(), frozenset() def assign_true(pred: FNode, res: Set[FNode]): assert isinstance(pred, FNode) assert isinstance(res, set) preds = [pred] while preds: pred = preds.pop() if pred.is_and(): preds.extend(pred.args()) elif pred.is_not(): assign_false(pred.arg(0), res) elif not pred.is_true(): assert not pred.is_false() res.add(self.cn(pred)) def assign_false(pred: FNode, res: Set[FNode]): assert isinstance(pred, FNode) assert isinstance(res, set) preds = [pred] while preds: pred = preds.pop() if pred.is_or(): preds.extend(pred.args()) elif pred.is_not(): assign_true(pred.arg(0), res) elif not pred.is_false(): assert not pred.is_true() if pred.is_lt() or pred.is_le(): res.add(self.cn(not_rel(self.i_env, pred))) else: res.add(self.cn(self.i_mgr.Not(pred))) res_regions_trans: Set[FNode] = set() res_assumes: Set[FNode] = set() for step_idx, step in enumerate(steps): c_time = step_idx + first x_step_idx = (step_idx + 1) % len(steps) for hint_idx, (hint, (loc_idx, is_ranked, trans_t)) in enumerate( zip(hints, step)): assert isinstance(hint, Hint) assert isinstance(loc_idx, int) assert isinstance(trans_t, TransType) loc = hint[loc_idx] assign_true(self.totime(loc.region, c_time), res_regions_trans) assign_true(self.totime(loc.assume, c_time), res_assumes) if loc.rf is not None: if is_ranked: assign_true(self.totime(loc.rf.is_ranked, c_time), res_regions_trans) else: assign_false(self.totime(loc.rf.is_ranked, c_time), res_regions_trans) x_loc_idx = steps[x_step_idx][hint_idx][0] assert isinstance(x_loc_idx, int) if trans_t == TransType.PROGRESS: trans = loc.progress(x_loc_idx) elif trans_t == TransType.STUTTER: trans = loc.stutterT else: assert trans_t == TransType.RANKED trans = loc.rankT assert trans is not None assert isinstance(trans, FNode) assert not trans.is_false() assert trans in self.i_mgr.formulae.values() assign_true(self.totime(trans, c_time), res_regions_trans) assert all(self.cn(p) == p for p in res_regions_trans) assert all(self.cn(p) == p for p in res_assumes) return frozenset(res_regions_trans), frozenset(res_assumes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildPredicateHash(self, subject):\n properties = {}\n for s,p,o in self.store.triples((subject, None, None)):\n oList = properties.get(p, [])\n oList.append(o)\n properties[p] = oList\n return properties", "def predicate_nodes(self) -> Dict[str, Dict[str, Any]]:\n\n return {nid: attrs for nid, attrs\n in self.graph.nodes.items()\n if attrs['domain'] == 'semantics'\n if attrs['type'] == 'predicate'}", "def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n op0 = self.args[0].operator_dict(index, vars, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in set().union(op0, op1):\n if (var in op0) and (var in op1):\n out[var] = add_sparse(op0[var], op1[var])\n elif (var in op0):\n out[var] = op0[var]\n else:\n out[var] = op1[var]\n return out", "def applypredicates(predicate,\n objects_dic,\n predicates_rules,\n gstate):\n\n pname = predicate[\"name\"]\n predicate_rule = predicates_rules[pname]\n objects_list_ref = predicate_rule[\"objects\"]\n # objects in the real pddl file\n objects = copy.deepcopy(predicate[\"objectNames\"])\n if \"custom_obj\" in predicate_rule:\n # addtional custom object not in the real pddl file\n custom_obj = predicate_rule[\"custom_obj\"]\n # complete object list\n object_list = objects + custom_obj\n objects_list_ref = objects_list_ref + custom_obj\n else:\n object_list = objects\n\n obj_ref_dic = dict(zip(objects_list_ref, object_list))\n for rulename in predicate_rule[\"rules\"]:\n if \"value\" in predicate_rule[rulename]:\n rule = predicate_rule[rulename]\n left, propertyname = get_objname_property(rule[\"left\"], obj_ref_dic)\n value = predicate_rule[rulename][\"value\"]\n if \"function\" in value:\n fproperty = value[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n result = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n update_object(objects_dic[left], propertyname, gstate, fname, result)\n elif \"equal\" in value:\n right_value = value[\"equal\"]\n if type(right_value) is not dict:\n objects_dic[left][propertyname[0]] = right_value\n else:\n if \"r\" in right_value: # for color\n objects_dic[left][propertyname[0]] = right_value\n else:\n right_object, right_property = get_objname_property(right_value, obj_ref_dic)\n objects_dic[left][propertyname[0]] = objects_dic[right_object][right_property]\n\n elif \"add\" in value:\n rightvalue = 0\n for additem in value[\"add\"]:\n if type(additem) is dict:\n\n right_object, right_property = get_objname_property(additem, obj_ref_dic)\n addvalue = objects_dic[right_object][right_property]\n rightvalue += addvalue\n else:\n rightvalue += additem\n objects_dic[left][propertyname[0]] = rightvalue\n else:\n # if the rule is action rule\n action = predicate_rule[rulename][\"action\"]\n if \"function\" in action:\n fproperty = action[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n\n key, value = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n objects_dic[key] = value", "def get_eval_flag_dict(eval_mode):\n\n # Base dictionary with all flags set to True.\n dict = {}\n for key in EVAL_FLAGS:\n dict[key] = True\n\n # Auto-annotations.\n if eval_mode == \"draft\":\n dict[\"check_locus_tag\"] = False\n dict[\"check_trna\"] = False\n dict[\"import_locus_tag\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_coords\"] = False\n\n # Manual annotations.\n elif eval_mode == \"final\":\n dict[\"import_locus_tag\"] = False\n\n # SEA-PHAGES GenBank records.\n elif eval_mode == \"auto\":\n dict[\"check_locus_tag\"] = False\n dict[\"check_description_field\"] = False\n dict[\"check_replace\"] = False\n dict[\"check_trna\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_description_tally\"] = False\n dict[\"check_gene\"] = False\n dict[\"check_coords\"] = False\n\n # Non-SEA-PHAGES GenBank records.\n elif eval_mode == \"misc\":\n dict[\"check_locus_tag\"] = False\n # TODO below should probably be True, but it causes problems\n # when checking the current genome, GNM2_001, since these are not 'draft'\n # genomes.\n dict[\"check_replace\"] = False\n dict[\"check_trna\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_description_tally\"] = False\n dict[\"check_gene\"] = False\n\n # Custom QC settings. User can select the settings, so it is initialized as\n # a copy of the base eval_mode. The user can provide the\n # customized combination of options.\n elif eval_mode == \"custom\":\n for key in dict.keys():\n prompt = f\"Eval_flag: {key}. {EVAL_FLAGS[key]}\"\n response = basic.ask_yes_no(prompt=prompt, response_attempt=3)\n if response is None:\n print(\"The default setting for this eval_flag will be used.\")\n else:\n dict[key] = response\n\n elif eval_mode == \"base\":\n pass\n else:\n print(\"A valid eval_mode has not been selected.\")\n return dict", "def data(self):\n data = {}\n if self.base_rule:\n data.update(self.base_rule.data)\n for condition in list(chain.from_iterable(self._conditions.itervalues())):\n data.setdefault(condition.key, []).append(condition)\n for action in list(chain.from_iterable(self._actions.itervalues())):\n data[action.key] = [action] # you can only take a given action _once_\n return data", "def get_dict_of_bool2(self):\n pass", "def make_dict(\n nn,\n q_id,\n polarity,\n context_cond,\n cat,\n subcat,\n answer_info,\n bias_targets,\n version,\n notes,\n context,\n question,\n ans_list,\n ans_place,\n):\n this_dict = {\n \"example_id\": nn,\n \"question_index\": q_id,\n \"question_polarity\": polarity,\n \"context_condition\": context_cond,\n \"category\": cat,\n \"answer_info\": answer_info,\n \"additional_metadata\": {\n \"subcategory\": subcat,\n \"stereotyped_groups\": bias_targets,\n \"version\": version,\n \"source\": notes,\n },\n \"context\": context.strip(),\n \"question\": question.strip(),\n \"ans0\": ans_list[0],\n \"ans1\": ans_list[1],\n \"ans2\": ans_list[2],\n \"label\": ans_place,\n }\n return this_dict", "def _index_symbols(predicates, functions):\n symbols, symbol_types = OrderedDict(), {}\n\n for s in predicates:\n argtypes = [t.type for t in s.arguments]\n symbols[s.name] = base.Predicate(s.name, argtypes)\n symbol_types[s.name] = 'bool'\n\n for s in functions:\n if s.name != 'total-cost': # Ignore the \"fake\" total-cost function\n argtypes = [t.type for t in s.arguments]\n symbols[s.name] = base.Function(s.name, argtypes, s.type)\n symbol_types[s.name] = s.type\n\n return symbols, symbol_types", "def exp_learnedPredicates(self, **kwargs):\n\n entries={}\n for condition in self.exp_predicates.keys():\n text = '%s: %s' % (condition, self.exp_predicates[condition])\n entries[text] = self.readCommandLine\n\n title = \"Ce qu'a appris Baxter pour effectuer le deplacement\" \n self.mm.addGenericMenu(\"expMenu\", self.mm.cur_page, title, entries)\n self.mm.loadMenu(\"expMenu\")", "def create_experiment_dict(algorithm_name, thresholds):\n experiment_dict = {}\n for threshold in thresholds:\n experiment_dict[str(threshold)] = {}\n experiment_dict[str(threshold)][algorithm_name] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n list_dict = {'TP': 0, 'FP': 1, 'TN': 2, 'FN': 3, 'TPR': 4, 'TNR': 5, 'PRECISION': 6, 'RECALL': 7,\n 'F1 MEASURE': 8, 'ACCURACY': 9}\n return experiment_dict, list_dict", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def _populate_rules(self,rules):\n rule2_index = {}\n for rule in rules:\n relation = rule.get_attribute('relation')[0] # vals are now lists\n ##: Time signals of IS_INCLUDED should not be used in relative time evaluation. They may cause confusion.\n ##: E.g., ... after 3 days in hospital.... \"3 days in\" is picked instead of \"after 3 days\" \n if relation=='IS_INCLUDED':\n continue\n \n signal = rule.get_attribute('signal')[0]\n confidence = float(rule.get_attribute('confidence')[0])\n rule2_index[signal] = (relation, confidence)\n return rule2_index", "def get_assignment_map_from_checkpoint(tvars, init_checkpoint, name_to_variable=None):\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable2 = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable2[name] = var\n\n if name_to_variable is not None:\n print(\"DOESNT WORK\")\n print(name_to_variable)\n print(\"DOES WORK\")\n print(name_to_variable2)\n else:\n name_to_variable = name_to_variable2\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n print(name)\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)\n\n\n # def _make_polyak_averaging(embeddings, features, label_logits, mode, polyak, make_label_logits, params):", "def _get_answer_map(self):\r\n answer_map = {}\r\n for inputfield in self.inputfields:\r\n correct_option = self._find_option_with_choice(\r\n inputfield, 'correct')\r\n if correct_option is not None:\r\n input_id = inputfield.get('id')\r\n answer_map[input_id] = correct_option.get('description')\r\n return answer_map", "def Qs(self, observation, actions):\n return {a: self[observation, a] for a in actions}", "def get_visualisation_dic(predicates, animation_profile, actionlist, problem_dic):\n\n object_list = copy.deepcopy(predicates[\"objects\"])\n stages = copy.deepcopy(predicates[\"stages\"])\n predicates_rules = animation_profile[\"predicates_rules\"]\n objects_dic = Initialise.initialise_objects(object_list, animation_profile)\n gstate = Initialise.initialise_custom_functions()\n add_custome_objects(objects_dic, animation_profile)\n result = solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic)\n\n return result", "def sample_search(self):\n result = dict()\n for mutable in self.mutables:\n if isinstance(mutable, LayerChoice):\n gen_index = torch.randint(high=len(mutable), size=(1, ))\n result[mutable.key] = F.one_hot(gen_index, num_classes=len(mutable)).view(-1).bool()\n elif isinstance(mutable, InputChoice):\n if mutable.n_chosen is None:\n result[mutable.key] = torch.randint(high=2, size=(mutable.n_candidates,)).view(-1).bool()\n else:\n perm = torch.randperm(mutable.n_candidates)\n mask = [i in perm[:mutable.n_chosen] for i in range(mutable.n_candidates)]\n result[mutable.key] = torch.tensor(mask, dtype=torch.bool) # pylint: disable=not-callable\n return result", "def setup_dict(self, keys=None):\n keys = keys or []\n return {key: True for key in keys}", "def reset_hints(self):\n self._global_hints={}\n self._hints={}\n for sq in self.sqs:\n outs=[dv for dv in range(4) if add(sq, dv) in self.allsqs]\n pos=self.local2global(sq)\n self._hints[sq]=set()\n self._global_hints[pos]=set()\n for o in outs:\n for i in outs:\n if o==i:\n continue\n i=(i+2)%4\n self._global_hints[pos].add((i,o))\n self._hints[sq].add((i,o))\n for gsq, ns in self.gates:\n outs=[dv for dv in range(4)]\n outs.append('OUT')\n pos=self.local2global(gsq)\n self._hints[gsq]=set()\n self._global_hints[pos]=set()\n for o in outs:\n for i in outs:\n if o==i:continue\n if i=='OUT':\n continue\n else:\n i=(i+2)%4\n self._global_hints[pos].add((i,o))\n self._hints[gsq].add((i,o))", "def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match('^(.*):\\\\d+$', name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n\n assignment_map[name] = name\n assignment_map[name] = name_to_variable[name]\n initialized_variable_names[name] = 1\n initialized_variable_names[name + ':0'] = 1\n\n tf.logging.info('**** Trainable Variables ****')\n for var in tvars:\n init_string = ''\n if var.name in initialized_variable_names:\n init_string = ', *INIT_FROM_CKPT*'\n tf.logging.info(\n ' name = %s, shape = %s%s', var.name, var.shape, init_string\n )\n\n return (assignment_map, initialized_variable_names)", "def generate(dictalg):\n\n # dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose)\n res = {}\n for f, i in pproc.dictAlgByFun(dictalg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n tmp = BestAlgSet(j)\n res[(d, f)] = tmp\n return res", "def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n ops = self.operator_form(index)\n op0 = self.args[0].operator_dict(index, vars, **kw)\n for var in op0:\n out[var] = ops * op0[var]\n return out", "def solvepredicates(predicates, objects_dic, predicates_rules, gstate):\n \"\"\"This function will pop an predicate from a list of predicates, and try to solve\n it, the predicate will be put back to the predicates list if it can not be solved at\n one turn. The funtion will return true if all the predicates has been solved.\n Args:\n predicates(list of String): a list of predicates that need to be solved.\n objects_dic(dictionary): a dictionary of objects that its attribtes has to be solved\n predicates_rules(dictonaru): animation rules of predictates.\n space(array):an array that will be used for distributex funtion, it remeber the current obj\n that in the space.\n\n \"\"\"\n i = 0\n while (predicates and i < 2000):\n predicate = predicates.pop(0)\n if predicate[\"name\"] not in predicates_rules:\n continue\n if check_rule_complete(predicate, objects_dic, predicates_rules):\n\n applypredicates(predicate, objects_dic, predicates_rules, gstate)\n else:\n if not predicates: # if the last predicate can not be solved\n return False\n predicates.append(predicate)\n i += 1\n return True", "def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\r\n assignment_map = {}\r\n initialized_variable_names = {}\r\n\r\n name_to_variable = collections.OrderedDict()\r\n for var in tvars:\r\n name = var.name\r\n m = re.match(\"^(.*):\\\\d+$\", name)\r\n if m is not None:\r\n name = m.group(1)\r\n name_to_variable[name] = var\r\n\r\n init_vars = tf.train.list_variables(init_checkpoint)\r\n\r\n assignment_map = collections.OrderedDict()\r\n for x in init_vars:\r\n (name, shape) = (x[0], x[1])\r\n if name not in name_to_variable:\r\n print ('{}: not load'.format(name))\r\n continue\r\n if name_to_variable[name].shape != shape:\r\n print ('{}: not load, shape not match {} -> {}'.format(name, shape, name_to_variable[name].shape))\r\n continue\r\n assignment_map[name] = name\r\n initialized_variable_names[name] = 1\r\n initialized_variable_names[name + \":0\"] = 1\r\n return (assignment_map, initialized_variable_names)", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def generate_true_dict(all_triples):\n heads, tails = {(p, o) : [] for _, p, o in all_triples}, {(s, p) : [] for s, p, _ in all_triples}\n\n for s, p, o in all_triples:\n heads[p, o].append(s)\n tails[s, p].append(o)\n\n return heads, tails", "def _create_output_alternatives(self, predictions):\n return {self.head_name: (self._problem_type, predictions)}", "def _make_answer_dict(self, choice_list):\r\n\r\n answer_dict = {}\r\n for index, choice_answers_pair in enumerate(choice_list):\r\n # Choice is whether this choice is correct\r\n # Answers contains a list of answers to textinpts for the choice\r\n choice, answers = choice_answers_pair\r\n\r\n if choice:\r\n # Radio/Checkbox inputs in choicetext problems follow\r\n # a naming convention that gives them names ending with \"bc\"\r\n choice_id = \"1_2_1_choiceinput_{index}bc\".format(index=index)\r\n choice_value = \"choiceinput_{index}\".format(index=index)\r\n answer_dict[choice_id] = choice_value\r\n # Build the names for the numtolerance_inputs and add their answers\r\n # to `answer_dict`.\r\n for ind, answer in enumerate(answers):\r\n # In `answer_id` `index` represents the ordinality of the\r\n # choice and `ind` represents the ordinality of the\r\n # numtolerance_input inside the parent choice.\r\n answer_id = \"1_2_1_choiceinput_{index}_numtolerance_input_{ind}\".format(\r\n index=index,\r\n ind=ind\r\n )\r\n answer_dict[answer_id] = answer\r\n\r\n return answer_dict", "def parse_program(program, bools_dict):\n bools = {'A':True,'B':True,'C':True,'D':True,'T':False,'J':False}\n bools.update(bools_dict)\n print(bools)\n for instruction in program:\n parts = instruction.split(' ')\n if parts[0] == 'WALK':\n return\n instr, arg0, arg1 = parts[0], parts[1], parts[2]\n if instr == 'NOT':\n new_bool = not bools[arg0]\n print(f'{instruction}: {arg1}={new_bool}')\n elif instr == 'OR':\n new_bool = bools[arg0] or bools[arg1]\n print(f'{instruction} : {arg1}={new_bool}')\n elif instr == 'AND':\n new_bool = bools[arg0] and bools[arg1]\n print(f'{instruction}: {arg1}={new_bool}')\n bools[arg1] = new_bool", "def generate_dict(values):\n if values[\n 0\n ]: # Checks if the checkbox is true or false, so if the measurement should be condcuted or not\n return {\n \"measure_every\": values[1],\n \"start_strip\": values[2],\n \"end_strip\": values[3],\n }\n else:\n return {}", "def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n # Freeze arg1 metadata for caching ncc matrices\n frozen_arg1_basis_meta = freeze_meta(self.args[1].meta)[-1]\n op0 = self.args[0].as_ncc_operator(frozen_arg1_basis_meta, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in op1:\n out[var] = op0 * op1[var]\n return out", "def make_train_dict(input_ids, attention_masks, labels):\n return {'input_ids': input_ids, 'attention_mask': attention_masks, 'labels': labels}", "def convert_flags_to_boolean_dict(flags):\n return {f: True for f in flags}", "def _get_input_dict(input_ids: List[Tensor], attention_mask: List[Tensor]) ->Dict[str, Tensor]:\n output_dict = {'input_ids': torch.cat(input_ids), 'attention_mask': torch.cat(attention_mask)}\n return output_dict", "def preference_predicate(interactions_df, obs_interactions, target_interactions, truth_interactions, fold, setting):\n print(\"predicate_construction: preference_predicate:\")\n\n def write(s, p):\n print(\"predicate_construction: preference_predicate: writing: \" + \n './goodreads/' + str(fold) + '/' + setting + '/preference_' + p + '.txt') \n s.to_csv('./goodreads/' + str(fold) + '/' + setting + '/preference_' + p + '.txt',\n sep='\\t', header=False, index=True)\n\n # target predicates\n partition = 'targets'\n preference_df = pd.DataFrame(index=obs_interactions.union(target_interactions))\n write(preference_df, partition)", "def _get_criterions(self):\n # Fast-path already loaded\n if self.__criterions is not None:\n return self.__criterions\n # Initialize the dictionary\n self.__criterions = {\n \"top-k\": self._TopkCriterion,\n \"sigmoid\": self._SigmoidCriterion }\n # Return the dictionary\n return self.__criterions", "def get_association_dict_split_by_category(protein_ans_list):\n etype_2_association_dict = {}\n for etype in variables.entity_types:\n etype_2_association_dict[etype] = {}\n result = get_results_of_statement(\"SELECT protein_2_function.an, protein_2_function.function, protein_2_function.etype FROM protein_2_function WHERE protein_2_function.an IN({});\".format(str(protein_ans_list)[1:-1]))\n for res in result:\n an, associations_list, etype = res\n etype_2_association_dict[etype][an] = set(associations_list)\n return etype_2_association_dict", "def evaluate_mapped_inputs(self,**kwargs):\n result = {}\n for v,t,o,p,n in zip(self.values,self.thresholds,self.operations,self.proportions,self.output_names):\n value = kwargs.get(v)\n if isinstance(t,basestring):\n threshold = kwargs.get(t)\n else:\n threshold = t\n if o == \"lt\":\n result[n] = (value < threshold * p)\n elif o == \"gt\":\n result[n] = (value > threshold * p)\n elif o == \"lte\":\n result[n] = (value <= threshold * p)\n elif o == \"gte\":\n result[n] = (value >= threshold * p)\n return result", "def __init_probability_functions(self):\n probability_functions = {}\n for state in self.non_terminal_spaces:\n for action in self.action_space:\n resulting_state = state + self.action_space[action]\n if self.__off_grid_move(resulting_state, state):\n key = (state, -1, state, action)\n else:\n key = (resulting_state, -1, state, action)\n probability_functions[key] = 1\n return probability_functions", "def get_hints(self, student_answers, new_cmap, old_cmap):\r\n hintgroup = self.xml.find('hintgroup')\r\n if hintgroup is None:\r\n return\r\n\r\n # hint specified by function?\r\n hintfn = hintgroup.get('hintfn')\r\n if hintfn:\r\n # Hint is determined by a function defined in the <script> context; evaluate\r\n # that function to obtain list of hint, hintmode for each answer_id.\r\n\r\n # The function should take arguments (answer_ids, student_answers, new_cmap, old_cmap)\r\n # and it should modify new_cmap as appropriate.\r\n\r\n # We may extend this in the future to add another argument which provides a\r\n # callback procedure to a social hint generation system.\r\n\r\n global CORRECTMAP_PY\r\n if CORRECTMAP_PY is None:\r\n # We need the CorrectMap code for hint functions. No, this is not great.\r\n CORRECTMAP_PY = inspect.getsource(correctmap)\r\n\r\n code = (\r\n CORRECTMAP_PY + \"\\n\" +\r\n self.context['script_code'] + \"\\n\" +\r\n textwrap.dedent(\"\"\"\r\n new_cmap = CorrectMap()\r\n new_cmap.set_dict(new_cmap_dict)\r\n old_cmap = CorrectMap()\r\n old_cmap.set_dict(old_cmap_dict)\r\n {hintfn}(answer_ids, student_answers, new_cmap, old_cmap)\r\n new_cmap_dict.update(new_cmap.get_dict())\r\n old_cmap_dict.update(old_cmap.get_dict())\r\n \"\"\").format(hintfn=hintfn)\r\n )\r\n globals_dict = {\r\n 'answer_ids': self.answer_ids,\r\n 'student_answers': student_answers,\r\n 'new_cmap_dict': new_cmap.get_dict(),\r\n 'old_cmap_dict': old_cmap.get_dict(),\r\n }\r\n\r\n try:\r\n safe_exec.safe_exec(\r\n code,\r\n globals_dict,\r\n python_path=self.context['python_path'],\r\n slug=self.id,\r\n random_seed=self.context['seed'],\r\n unsafely=self.capa_system.can_execute_unsafe_code(),\r\n )\r\n except Exception as err:\r\n _ = self.capa_system.i18n.ugettext\r\n msg = _('Error {err} in evaluating hint function {hintfn}.').format(err=err, hintfn=hintfn)\r\n sourcenum = getattr(self.xml, 'sourceline', _('(Source code line unavailable)'))\r\n msg += \"\\n\" + _(\"See XML source line {sourcenum}.\").format(sourcenum=sourcenum)\r\n raise ResponseError(msg)\r\n\r\n new_cmap.set_dict(globals_dict['new_cmap_dict'])\r\n return\r\n\r\n # hint specified by conditions and text dependent on conditions (a-la Loncapa design)\r\n # see http://help.loncapa.org/cgi-bin/fom?file=291\r\n #\r\n # Example:\r\n #\r\n # <formularesponse samples=\"x@-5:5#11\" id=\"11\" answer=\"$answer\">\r\n # <textline size=\"25\" />\r\n # <hintgroup>\r\n # <formulahint samples=\"x@-5:5#11\" answer=\"$wrongans\" name=\"inversegrad\"></formulahint>\r\n # <hintpart on=\"inversegrad\">\r\n # <text>You have inverted the slope in the question. The slope is\r\n # (y2-y1)/(x2 - x1) you have the slope as (x2-x1)/(y2-y1).</text>\r\n # </hintpart>\r\n # </hintgroup>\r\n # </formularesponse>\r\n\r\n if (self.hint_tag is not None\r\n and hintgroup.find(self.hint_tag) is not None\r\n and hasattr(self, 'check_hint_condition')):\r\n\r\n rephints = hintgroup.findall(self.hint_tag)\r\n hints_to_show = self.check_hint_condition(\r\n rephints, student_answers)\r\n # can be 'on_request' or 'always' (default)\r\n\r\n hintmode = hintgroup.get('mode', 'always')\r\n for hintpart in hintgroup.findall('hintpart'):\r\n if hintpart.get('on') in hints_to_show:\r\n hint_text = hintpart.find('text').text\r\n # make the hint appear after the last answer box in this\r\n # response\r\n aid = self.answer_ids[-1]\r\n new_cmap.set_hint_and_mode(aid, hint_text, hintmode)\r\n log.debug('after hint: new_cmap = %s', new_cmap)", "def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates", "def _get_scoring_map(self):\r\n scoring = self.default_scoring\r\n choices = dict([(choice, choice) for choice in scoring])\r\n scoring_map = {}\r\n\r\n for inputfield in self.inputfields:\r\n option_scoring = dict([(\r\n option['id'],\r\n {\r\n 'correctness': choices.get(option['choice']),\r\n 'points': scoring.get(option['choice'])\r\n }\r\n ) for option in self._find_options(inputfield)])\r\n\r\n scoring_map[inputfield.get('id')] = option_scoring\r\n\r\n return scoring_map", "def build_intrenal_hap_dict(self, alleles, group2):\n\n hap_dict = self.hap_dict_per_group[group2]\n\n internal = {}\n for i, haplotype in enumerate(alleles):\n if type(haplotype[0])==tuple: #Checks if X is a tuple/list of alleles.\n n = len(haplotype)\n if n==1:\n internal[1 << i] = hap_dict[haplotype[0]]\n elif n==2:\n internal[1 << i] = hap_dict[haplotype[0]] & hap_dict[haplotype[1]]\n else:\n internal[1 << i] = reduce(and_,itemgetter(*haplotype)(hap_dict))\n\n elif type(haplotype[0])==int: #Checks if X is a single allele.\n internal[1 << i] = hap_dict[haplotype]\n else:\n raise Exception('error: joint_frequencies only accepts alleles and tuple/list of alleles.')\n\n return internal", "def __init__(self, predicates_behaviours, default_behaviour=None):\n self._predicates_behaviours = predicates_behaviours\n self._default_behaviour = default_behaviour or _default # makes testing easier", "def getDict():\n\t\n\t# Shuffle the list:\n\trandom.shuffle(condList)\n\t\n\t# Make a dictionary:\n\trandomDict = {}\n\t\n\tfor i in range(len(condList)):\n\t\trandomDict[letterList[i]] = condList[i]\n\t\n\treturn randomDict", "def Qs(self, state, actions):\n return {a: self[state, a] for a in actions}", "def predicates(self):\n return list(self._preds)", "def extract_constants_and_predicates(planning_problem: PlanningProblem) -> Tuple[List[Expr],\n List[Tuple[Expr, int]],\n Dict[str, Expr]]:\n seen_predicates = set()\n seen_constants = set()\n constants_per_predicate = collections.defaultdict(list)\n\n initial_predicates = planning_problem.initial\n # Make all predicates positive so we can extract the name via predicate.op\n goal_predicates = list(map(make_positive, planning_problem.goals))\n precondition_predicates = list(map(make_positive, [p for a in planning_problem.actions for p in a.precond]))\n postcondition_predicates = list(map(make_positive, [e for a in planning_problem.actions for e in a.effect]))\n\n all_predicates = initial_predicates + goal_predicates + precondition_predicates + postcondition_predicates\n\n for predicate in all_predicates:\n if predicate.op not in seen_predicates and not is_variable(predicate.op):\n seen_predicates.add((predicate.op, len(predicate.args)))\n for arg in predicate.args:\n if arg not in seen_constants and not is_variable(arg):\n seen_constants.add(arg)\n constants_per_predicate[predicate.op].append(arg)\n\n return list(seen_constants), list(seen_predicates), constants_per_predicate", "def get_valid_values_map(self, condition=False, remove_special=True):\n pkmap = {}\n for selection in self.selections.normal_values():\n rmap_pkmap = selection.get_valid_values_map(condition)\n for key in rmap_pkmap:\n if key not in pkmap:\n pkmap[key] = set()\n pkmap[key] |= set(rmap_pkmap[key])\n for key in self.get_parkey_map():\n if key not in pkmap:\n pkmap[key] = [] # flag a need for an unconstrained input\n if remove_special:\n specials = {\"ANY\",\"N/A\"}\n for key in pkmap: # remove specials like ANY or N/A\n if pkmap[key]:\n pkmap[key] = pkmap[key] - specials\n for key in pkmap: # convert to sorted lists\n pkmap[key] = sorted(pkmap[key])\n return pkmap", "def _answer(self, members, callables):\n answers = {m: getattr(self, m, None) for m in members}\n answers.update({k: c(self) for k, c in callables.iteritems()})\n return answers", "def precomp_target_queries(self, triplet_queries):\n self.eval()\n\n triplet_queries_idx = np.zeros((len(triplet_queries),3), dtype=np.int)\n queries_sro = Variable(torch.zeros(len(triplet_queries),3)).long()\n\n for count,triplet_query in enumerate(triplet_queries):\n\n subjectname, predicate, objectname = triplet_query.split('-')\n sub_cat = self.classes.word2idx[subjectname]\n obj_cat = self.classes.word2idx[objectname]\n rel_cat = self.predicates.word2idx[predicate]\n\n triplet_queries_idx[count,0] = sub_cat\n triplet_queries_idx[count,1] = rel_cat\n triplet_queries_idx[count,2] = obj_cat\n\n queries_sro[count,0] = self.idx_to_vocab['s'][sub_cat]\n queries_sro[count,2] = self.idx_to_vocab['o'][obj_cat]\n queries_sro[count,1] = self.idx_to_vocab['r'][rel_cat]\n\n if torch.cuda.is_available():\n queries_sro = queries_sro.cuda() \n\n\n return queries_sro, triplet_queries_idx", "def query(network, query_var, evidence):\n \n queryTrue = {query_var: True}\n queryTrue.update(evidence)\n probsTrue = [] \n queryFalse = {query_var: False}\n queryFalse.update(evidence)\n probsFalse = []\n \n hidden_vars = network.keys() - evidence.keys() - {query_var}\n for values in itertools.product((True, False), repeat=len(hidden_vars)):\n hidden_assignments = {var:val for var,val in zip(hidden_vars, values)}\n currentTrue = {**queryTrue, **hidden_assignments}\n probsTrue.append(joint_prob(network, currentTrue))\n currentFalse = {**queryFalse, **hidden_assignments}\n probsFalse.append(joint_prob(network, currentFalse)) \n alpha = 1 / (sum(probsTrue) + sum(probsFalse))\n probTrue = alpha * sum(probsTrue)\n probFalse = alpha * sum(probsFalse)\n \n return {True: probTrue, False: probFalse}", "def get_ask_mapping(cls):\n mapping = {}\n for field in cls._meta.fields:\n if isinstance(field, AskForField):\n mapping[field.allows_field] = field.name\n return mapping", "def as_configmap_data(self) -> dict[str, str]:\n data = {}\n for k, v in self.dict(by_alias=True).items():\n if isinstance(v, bool):\n # True -> \"true\", False -> \"false\"\n v = str(v).lower()\n data[k] = str(v)\n return data", "def _buildSpecializeMap(cls, namespaces, interwikimap):\n\n from mwlib.lang import languages\n \n res = {}\n\n def reg(name, num):\n name = name.lower()\n if num == namespace.NS_CATEGORY:\n res[name] = (CategoryLink, num)\n elif num == namespace.NS_FILE:\n res[name] = (ImageLink, num)\n else:\n res[name] = (NamespaceLink, num)\n\n for name, num in namespaces.iteritems():\n if isinstance(name, basestring):\n reg(name, num)\n else:\n for n in name:\n reg(n, num)\n\n for prefix, d in interwikimap.items():\n if 'language' in interwikimap[prefix] or prefix in languages:\n res[prefix] = (LangLink, prefix)\n else:\n res[prefix] = (InterwikiLink, d.get('renamed', prefix))\n \n return res", "def part1():\n memory = {}\n for line in lines:\n if line[:4] == 'mask':\n mask = line.split('=')[1].strip()\n on_mask = int(mask.replace('X', '0'), 2)\n off_mask = int(mask.replace('X', '1'), 2)\n else:\n a = int(line.split('[')[1].split(']')[0])\n to_write = int(line.split('=')[1])\n\n memory[a] = to_write & off_mask | on_mask # apparently the AND needs to come before the OR?\n\n #print(a, to_write, memory[a])\n\n answer = 0\n for v in memory.values():\n answer += v\n print(answer)", "def get_hints(entry):\n hints = {}\n if entry['approach_hints'] and entry['approach_hints'] != \"[]\":\n hints['approach'] = ast.literal_eval(str(entry['approach_hints']))\n if entry['syntactic_hints'] and entry['syntactic_hints'] != \"[]\":\n hints['syntactic'] = ast.literal_eval(str(entry['syntactic_hints']))\n if entry['skeleton_hints']:\n hints['skeleton'] = entry['skeleton_hints']\n return hints", "def create_feed_dict(sources, targets,source_ph,target_ph):\n source_graphs = utils_np.networkxs_to_graphs_tuple(sources)\n target_graphs = utils_np.networkxs_to_graphs_tuple(targets)\n feed_dict = {source_ph: source_graphs, target_ph: target_graphs}\n return feed_dict", "def calculate_histograms_from_assignments(self, funcs, bin_size=1):\n result = defaultdict(lambda: defaultdict(\n lambda: Histogram(bin_size)\n ))\n for assignment in self.get_assignment_reader():\n for name, func in funcs.iteritems():\n value = func(assignment)\n if value is None:\n continue\n result[name][assignment.source].add(value)\n return result", "def lookups(self, request, model_admin):\n return (\n ('yes', 'yes'),\n ('no', 'no'),\n )", "def get_dict(self, internal: bool = True) -> Dict[str, Any]:\n d = [\n (\"ID\", self.id),\n (\"Name\", self.name),\n (\"Reference\", self.reference),\n (\"Turbo\", self.is_turbo),\n ]\n\n if internal:\n d += [\n (\"Special\", self.is_special),\n (\"Class\", self.class_def),\n (\"Equality\", self.eq_function),\n (\"Args\", self.args),\n (\"Tune Grid\", self.tune_grid),\n (\"Tune Distributions\", self.tune_distribution),\n (\"Tune Args\", self.tune_args),\n (\"SHAP\", self.shap),\n (\"GPU Enabled\", self.is_gpu_enabled),\n (\"Tunable Class\", self.tunable),\n ]\n\n return dict(d)", "def _parseGoal(self, goal, true_goal, domain):\n goal[domain] = {}\n goal[domain] = {'informable': {}, 'requestable': [], 'booking': []}\n if 'info' in true_goal[domain]:\n if domain == 'train':\n # we consider dialogues only where train had to be booked!\n if 'book' in true_goal[domain]:\n goal[domain]['requestable'].append('reference')\n if 'reqt' in true_goal[domain]:\n if 'id' in true_goal[domain]['reqt']:\n goal[domain]['requestable'].append('id')\n else:\n if 'reqt' in true_goal[domain]:\n for s in true_goal[domain]['reqt']: # addtional requests:\n if s in ['phone', 'address', 'postcode', 'reference', 'id']:\n # ones that can be easily delexicalized\n goal[domain]['requestable'].append(s)\n if 'book' in true_goal[domain]:\n goal[domain]['requestable'].append(\"reference\")\n\n for s, v in true_goal[domain]['info'].items():\n s_, v_ = clean_slot_values(domain, s, v, self.mapping_pair_path)\n if len(v_.split()) > 1:\n v_ = ' '.join([token.text for token in self.reader.nlp(v_)]).strip()\n goal[domain][\"informable\"][s_] = v_\n\n if 'book' in true_goal[domain]:\n goal[domain][\"booking\"] = true_goal[domain]['book']\n return goal", "def boolify_scheduling_problem(student_preferences, session_capacities):\n #first constraint for assignment\n #clause will consist of name_preference = True (or)\n cnf = []\n names = student_preferences.keys()\n for name in names:\n clause = []\n preferences = student_preferences[name]\n for preference in preferences:\n literal = (str(name) + '_' + str(preference)), 'True'\n clause.append(literal)\n cnf.append(clause)\n #concatenate other 2 constraints, as detailed by the helper functions below\n cnf += one_session(student_preferences, session_capacities)\n cnf += oversubscribed(student_preferences, session_capacities)\n return cnf", "def get_dict(self, internal: bool = True) -> Dict[str, Any]:\n d = [\n (\"ID\", self.id),\n (\"Name\", self.name),\n (\"Reference\", self.reference),\n (\"Turbo\", self.is_turbo),\n ]\n\n if internal:\n d += [\n (\"Special\", self.is_special),\n (\"Class\", self.class_def),\n (\"Equality\", self.eq_function),\n (\"Args\", self.args),\n (\"Tune Grid\", self.tune_grid),\n (\"Tune Distributions\", self.tune_distribution),\n (\"Tune Args\", self.tune_args),\n (\"GPU Enabled\", self.is_gpu_enabled),\n (\"Tunable Class\", self.tunable),\n ]\n\n return dict(d)", "def get_default_filters_dict(class_of_filters,measure,**filters):\n\tif \"datadrop__in\" in filters:\n\t\tfilters.pop(\"datadrop__in\")\n\tif class_of_filters==\"short_student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'All Lower':{'upn__wide_banding':\"L\"},\n\t\t\t'All Middle':{'upn__wide_banding':\"M\"},\n\t\t\t'All Higher':{'upn__wide_banding':\"H\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"}\n\t\t\t}\n\telif class_of_filters==\"student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':\"Lx\"},\n\t\t\t'Lower':{'upn__narrow_banding':\"L\"},\n\t\t\t'Middle':{'upn__narrow_banding':\"M\"},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':\"Ml\"},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':\"Mh\"},\n\t\t\t'Higher':{'upn__narrow_banding':\"H\"},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':\"Hx\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"},\n\t\t\t'Low Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\"},\n\t\t\t'Middle Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\"},\n\t\t\t'High Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\"},\n\t\t\t'Low Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\"},\n\t\t\t'Middle Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'Low PP Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Middle PP Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'High PP Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Low PP Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'Middle PP Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'High PP Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t}\n\telif class_of_filters==\"att8bucket\":\n\t\treturnDict= {'All':{},\n\t\t\t'Maths':{'subject__attainment8bucket':'ma'},\n\t\t\t'English':{'subject__attainment8bucket':'en'},\n\t\t\t'EBacc':{'subject__attainment8bucket':'eb'},\n\t\t\t'Open':{'subject__attainment8bucket':'op'},\n\t\t\t}\n\telif class_of_filters==\"banding\":\n\t\treturnDict= {'All':{},\n\t\t\t'All Lower':{'upn__wide_banding':'L'},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':'Lx'},\n\t\t\t'Lower':{'upn__narrow_banding':'L'},\n\t\t\t'All Middle':{'upn__wide_banding':'M'},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':'Ml'},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':'Mh'},\n\t\t\t'All Higher':{'upn__wide_banding':'H'},\n\t\t\t'Higher':{'upn__narrow_banding':'H'},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':'Hx'},\n\t\t\t'No Banding':{'upn__wide_banding':'N'},\n\t\t\t}\n\telif class_of_filters==\"subject_blocks\":\n\t\treturnDict= {'All':{},\n\t\t\t'Core':{'subject__option_subject':False},\n\t\t\t'Option':{'subject__option_subject':True},\n\t\t\t'EBacc':{'subject__ebacc_subject':True},\n\t\t\t'Non-EBacc':{'subject__ebacc_subject':False},\n\t\t\t}\n\telif \"staff\" in class_of_filters:\n\t\tfilters.pop('datadrop',None)\n\t\tfilters.pop('datadrop__name',None)\n\t\tif \"classgroup\" in filters:\n\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\tfilters.pop('classgroup',None)\n\t\treturnDict={'All':{}}\n\t\tstaff_set=set(classgroup.objects.filter(**filters).exclude(staff=\"---\")\n\t\t\t.values_list('staff').distinct())\n\t\tstaff_list=[]\n\t\tfor st in staff_set:\n\t\t\tfor s in st:\n\t\t\t\tstaff_list.append(s)\n\t\tstaff_list.sort()\n\t\tfor code in staff_list:\n\t\t\tclasses=classgroup.objects.filter(staff=code,**filters).distinct()\n\t\t\tif \"short\" not in class_of_filters:\n\t\t\t\tfor cl in classes:\n\t\t\t\t\treturnDict[code+\" \"+cl.class_code]={\"classgroup\":cl}\n\t\t\treturnDict['All ' +code]={\"classgroup__in\":classes}\n\telse:\n\t\t\"\"\"if not a fixed set of filters, populate from objects in db based on\n\t\tclass, code specific to each class removes invalid filters and replaces\n\t\tthem with valid ones where possible\"\"\"\n\t\tif class_of_filters==\"classgroup\" :\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"subject\" or class_of_filters==\"faculty\":\n\t\t\tif \"subject\" in filters:\n\t\t\t\tfilters['name']=filters['subject'].name\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['name']=filters['subject__name']\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tif \t\"datadrop__name\" in filters:\n\t\t\t\tfilters['name']=filters['datadrop__name']\n\t\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"datadrop\" in filters:\n\t\t\t\tfilters['id']=filters['datadrop'].id\n\t\t\t\tfilters.pop('datadrop',None)\n\t\t\tif \"subject\" in filters or \"faculty\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject=filters['subject'])\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject__name__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['cohort']=filters['classgroup'].cohort\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"yeargroup\" :\n\t\t\tif \"subject__name\" in filters and measure==\"progress\":\n\t\t\t\tfilters['subject__in']=subject.objects.filter(\n\t\t\t\t\tname__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"cohort\" in filters and measure==\"progress\":\n\t\t\t\tfilters['cohort']=filters['cohort'].cohort\n\t\t\tfilters.pop('subject',None)\n\n\t\t#get queryset or set of objects from db based on filters\n\t\tif class_of_filters in ['yeargroup','datadrop','subject',\n\t\t'classgroup']:\n\t\t\tqset=apps.get_model('analysis',class_of_filters).\\\n\t\t\t\tobjects.filter(**filters)\n\t\telif class_of_filters==\"faculty\":\n\t\t\tqset=['Maths','English','Science','Humanities','MFL',\n\t\t\t\t'Arts','Technology','IT',None]\n\t\t\tfor sub in subject.objects.filter(**filters):\n\t\t\t\tif sub.faculty not in qset:\n\t\t\t\t\tqset.add(sub.faculty)\n\n\t\t#sorting set for each class\n\t\tif class_of_filters==\"yeargroup\":\n\t\t\tclass_of_filters=\"subject__cohort\"\n\t\t\tqset=qset.order_by('cohort')\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tqset=qset.order_by('cohort','-date')\n\t\telif class_of_filters==\"subject\":\n\t\t\tqset=qset.order_by('name','faculty')\n\t\telif class_of_filters==\"classgroup\":\n\t\t\tqset=qset.order_by('class_code')\n\t\telif class_of_filters==\"faculty\":\n\t\t\tclass_of_filters=\"subject__faculty\"\n\t\t#populate returning dictionary with set/queryset\n\t\treturnDict={}\n\t\treturnDict['All']={}\n\t\tif class_of_filters==\"subject\":\n\t\t\tfor q in qset:\n\t\t\t\treturnDict[q.name]={'subject__name':q.name}\n\t\telse:\n\t\t\tfor q in qset:\n\t\t\t\tif q is None and \"faculty\" in class_of_filters:\n\t\t\t\t\treturnDict[\"Other\"]={class_of_filters:q}\n\t\t\t\telse:\n\t\t\t\t\treturnDict[q.__str__()]={class_of_filters:q}\n\tif measure in avg_headline_measures or measure in pct_headline_measures:\n\t\tfor outerkey,dict in returnDict.items():\n\t\t\tdict=clean_filters(dict)\n\treturn returnDict", "def predicates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"predicates\")", "def predicates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"predicates\")", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def _create_original_queries(self):\n for key in self._trips_dict.keys():\n if key in self.get_acceptable_modes():\n if key == 'transit':\n self._trips_dict[key] = self._transit_trip_factory()\n else:\n self._trips_dict[key] = self._leg_factory(key)", "def graph_by_clauses(clauses: List[Tuple[int, int]]) -> DefaultDict[int, List]:\n graph = defaultdict(list)\n for clause in clauses:\n x, y = clause # encoded literals\n graph[negate(x)].append(y)\n graph[negate(y)].append(x)\n graph[x].extend([])\n graph[y].extend([])\n return graph", "def rules(cls):\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes", "def get_behavior_dict(d, names, subject_ids=None):\n subject_list = []\n if subject_ids is None:\n subject_list = list(d.values())\n else:\n for sub_id in subject_ids:\n subject_list.append(d[sub_id])\n\n behavior_list = list(zip(*subject_list))\n behavior_dict = {}\n for idx, name in enumerate(names):\n behavior_dict[name] = behavior_list[idx]\n\n return behavior_dict", "def make_keydict(self, analyte=None):\n if analyte is None:\n analyte = self.analytes\n elif isinstance(analyte, str):\n analyte = [analyte]\n\n out = {}\n for a in analyte:\n key = []\n for f in self.components.keys():\n if self.switches[a][f]:\n key.append(f)\n out[a] = ' & '.join(sorted(key))\n self.keydict = out\n return out", "def set_list_predicates(self):\n results = self.rml.query(\"\"\"\n SELECT DISTINCT ?subj_class ?list_field\n {\n ?bn rr:datatype rdf:List .\n ?bn rr:predicate ?list_field .\n ?s ?p ?bn .\n ?s rr:subjectMap ?sm_bn .\n ?sm_bn rr:class ?subj_class .\n }\"\"\")\n list_preds = [(Uri(row[0]).sparql, Uri(row[1]).sparql)\n for row in results]\n array_fields = {}\n for tup in list_preds:\n try:\n array_fields[tup[0]].append(tup[1])\n except KeyError:\n array_fields[tup[0]] = [tup[1]]\n self.array_fields = array_fields", "def _create_p13n_fn_dict(learning_rate):\n p13n_fn_dict = collections.OrderedDict()\n\n adam_opt_fn = lambda: tf.keras.optimizers.Adam(learning_rate=learning_rate)\n p13n_fn_dict['adam_opt'] = lambda: _build_personalize_fn(adam_opt_fn)\n\n sgd_opt_fn = lambda: tf.keras.optimizers.SGD(learning_rate=learning_rate)\n p13n_fn_dict['sgd_opt'] = lambda: _build_personalize_fn(sgd_opt_fn)\n\n return p13n_fn_dict", "def to_obj(self) -> Dict[str, Union[List[str],\n List[Dict[str, str]],\n List[HintRowObjType]]]:\n\n return {\n \"sources\": self.sources,\n \"source_parameters\": [\n sp.to_obj()\n for sp\n in self.source_parameters\n ],\n \"hints\": [h.to_obj() for h in self.hints],\n }", "def tests(self):\n\n return {\n 'variable_boolean': self.variable_boolean\n }", "def learn(primer, dependencies):\n knowledge_map = defaultdict(dict)\n for row in primer:\n for dvcol, ivcol in dependencies.items():\n # knowledge of the dependent value is mapped to the value\n # of the independent value col\n #\n # notice:\n # - if the knowledge_map has no entry for the dv col,\n # a dict is constructed automatically\n # - the value of the iv col is used\n # - overwrites the previous known relationship\n knowledge_map[dvcol][row[ivcol]] = row[dvcol]\n return knowledge_map", "def __init__(self, n_states: int, n_actions: int):\n self._p = {s: {a: [] for a in range(n_actions)} for s in range(n_states)}", "def __generate_dict_of_keys_to_classification__(self):\n dict_of_assigned_citations = {}\n # duplicating citation dataset to filter as matches go on meaning\n # it should result in quicker allocation\n # can be removed to reduce memory load at expense of speed\n list_of_unassigned = []\n for key in self.dict_of_keywords:\n list_of_current_key = []\n for citation_instance in self.array_of_citations:\n if key == citation_instance.get_classification():\n list_of_current_key.append(citation_instance)\n if \"Unassigned\" == citation_instance.get_classification():\n list_of_unassigned.append(citation_instance)\n dict_of_assigned_citations[key] = list_of_current_key\n dict_of_assigned_citations[\"Unassigned\"] = list_of_unassigned\n return dict_of_assigned_citations", "def get_instructions(prog):\n insts = {}\n for i in range(prog.InstructionCount()):\n insts[i] = prog.setParam(i)\n return insts", "def get_num_of_states_and_facts(self):\n\n public_predicates = {}\n private_predicates = {}\n sum_of_facts = 0\n\n for domain in self._domains:\n\n problem = list(filter(lambda prob: prob.domain == domain.name, self._problems))[0]\n last_index = problem.name.rfind('-')\n agent_name = problem.name[last_index + 1:]\n\n for predicate in domain.predicates:\n\n predicate_has_private_arg = False\n arg_types = list(map(lambda arg: Agent.get_all_subtypes(arg.type, domain.type_hierarchy),\n predicate.args))\n ordered_objs = []\n\n # go over each arg type (type of first param, second param ...)\n for possible_types in arg_types:\n objs_of_subtype = []\n\n # for each arg add all the types it can be as a list of types\n for type in possible_types:\n if problem.objects.__contains__(type):\n if problem.objects[type][0].private:\n predicate_has_private_arg = True\n objs_of_subtype += problem.objects[type]\n\n ordered_objs.append(objs_of_subtype)\n\n param_combinations = list(itertools.product(*ordered_objs))\n\n if (predicate.is_private or predicate_has_private_arg) and predicate.name not in private_predicates:\n private_predicates[predicate.name + '-' + agent_name] = len(param_combinations)\n sum_of_facts += len(param_combinations)\n elif predicate.name not in public_predicates:\n public_predicates[predicate.name] = len(param_combinations)\n sum_of_facts += len(param_combinations)\n\n total_assignments = list(public_predicates.values()) + list(private_predicates.values())\n num_of_states = reduce(lambda a, b: a * b, total_assignments, 1)\n return num_of_states, sum_of_facts", "def initialize_assignment(self):\n # Initialize empty frozensets for each agent\n init_assignment = frozendict({a:frozenset() for a in self.agents})\n \n # Add hard assignments\n if self.hard_assignment:\n init_dict = dict(init_assignment)\n for a, t in self.hard_assignment.items():\n init_dict[a] = init_dict[a] | t\n init_assignment = frozendict(init_dict)\n \n return init_assignment", "def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def mk_id_lookups(self):\n id_lookups = {}\n for ns in self.ddef.keys():\n id_lookups[ns] = self.mk_id_lookup(ns)\n return id_lookups", "def get_gold_pred_idx_dict(self, y_true, y_pred):\n gold_pred_idx_dict = defaultdict(lambda: defaultdict(list))\n gold_pred_ct_dict = defaultdict(lambda: defaultdict(int)) \n\n for gold_idx in range(3,self.nerTags.size):\n gold_filter = (y_true == gold_idx).astype(\"int\") # 1/0 all rows with that gold_idx\n for pred_idx in range(3,self.nerTags.size):\n pred_filter = (y_pred == pred_idx).astype(\"int\") # 1/0 all rows with that ner_idx\n match_ner_idx = np.nonzero(np.all([gold_filter, pred_filter],axis=0).astype(\"int\"))[0]\n gold_pred_idx_dict[gold_idx][pred_idx] = match_ner_idx \n gold_pred_ct_dict[gold_idx][pred_idx] = match_ner_idx.shape[0] \n\n return gold_pred_idx_dict, gold_pred_ct_dict", "def facts(self): # pylint: disable=invalid-overridden-method\n return {}", "def __init__(self, num_vars=3, bin_ops=(sympy.And, sympy.Or), target_tt=None, mutate_prob=0.05, weight_num_agree=20, weight_num_gates=-0.1, nsymbols=5):\n self.num_vars = num_vars\n\n # symbols we use in the problem\n self.syms = sympy.symbols('s:' + str(num_vars)) # creates a tuple of (s0, s1, ...s{NUM_VARS-1})\n\n # operations we use in the problem\n self.bin_ops = bin_ops\n self.ops = bin_ops + (sympy.Not,)\n\n # FIXME: \"private\" some of these\n # some precalced stuff for the functions\n self.str_syms = set(map(str, self.syms))\n self.tstr_syms = tuple(self.str_syms)\n self.bin_ops_chars = [self.BIN_OPS_MAP[op] for op in self.bin_ops]\n self.str_bin_ops = set(map(str, self.bin_ops))\n self.str_ops = set(map(str, self.ops))\n self.tstr_bin_ops = tuple(self.str_bin_ops)\n self.tstr_ops = tuple(self.bin_ops)\n self.or_op_regex = re.compile('|'.join(self.str_ops))\n\n # some truth table, given as a np.array with shape (2**n, )\n self.target_tt = target_tt if target_tt is not None else np.random.randint(2, size=2 ** 3, dtype=np.bool)\n\n self.tt_vars = list(itertools.product([0, 1], repeat=self.num_vars)) # [(0, 0, 0), (0, 0, 1), (0, 1, 0), ...]\n self.tt_vars_lists = list(\n zip(*self.tt_vars)) # [(0, 0, 0, 0, 1, 1, 1, 1), (0, 0, 1, 1, 0, 0, 1, 1), (0, 1, 0, 1, 0, 1, 0, 1)]\n\n self.mutate_prob = mutate_prob\n self.weight_num_agree = weight_num_agree\n self.weight_num_gates = weight_num_gates\n self.nsymbols = nsymbols\n\n # create process pool\n self.pool = multiprocessing.Pool(multiprocessing.cpu_count())", "def extract_rules(rules: Dict) -> Dict:\n\n \"\"\"Dictionary to return\"\"\"\n rules_ltl = {}\n\n if \"gridworld\" in rules:\n rules_ltl[\"gridworld\"] = []\n for elem, adjacent in rules[\"gridworld\"].items():\n ltl = \"G(\"\n ltl += elem.formula + \" -> X (\"\n ltl += \" | \".join([a.formula for a in adjacent])\n ltl += \"))\"\n variables = Variables()\n variables |= elem.variables\n for a in adjacent:\n variables |= a.variables\n rules_ltl[\"gridworld\"].append(LTL(formula=ltl, variables=variables, kind=\"gridworld\"))\n\n if \"context\" in rules:\n rules_ltl[\"context\"] = []\n if \"mutex\" in rules[\"context\"]:\n for mtx_elements in rules[\"context\"][\"mutex\"]:\n if len(mtx_elements) > 0:\n variables: Variables = Variables()\n ltl = \"G(\"\n for vs in mtx_elements:\n variables |= vs.variables\n mtx_elements_str = [n.formula for n in mtx_elements]\n clauses = []\n for vs_a in mtx_elements_str:\n clause = [deepcopy(vs_a)]\n for vs_b in mtx_elements_str:\n if vs_a is not vs_b:\n clause.append(Not(deepcopy(vs_b)))\n clauses.append(And(clause))\n ltl += Or(clauses)\n ltl += \")\"\n rules_ltl[\"context\"].append(LTL(formula=ltl, variables=variables, kind=\"context\"))\n\n if \"inclusion\" in rules[\"context\"]:\n for pre, post in rules[\"context\"][\"inclusion\"].items():\n variables = Variables()\n variables |= pre.variables | post.variables\n ltl = \"G((\" + pre.formula + \") -> (\" + post.formula + \"))\"\n rules_ltl[\"context\"].append(LTL(formula=ltl, variables=variables, kind=\"context\"))\n\n if \"context_gridworld\" in rules:\n rules_ltl[\"context_gridworld\"] = []\n for pre, post in rules[\"context_gridworld\"].items():\n variables = Variables()\n variables |= pre.variables | post.variables\n ltl = \"G((\" + pre.formula + \") -> (\" + post.formula + \"))\"\n rules_ltl[\"context_gridworld\"].append(LTL(formula=ltl, variables=variables, kind=\"context_gridworld\"))\n\n if \"constraints\" in rules:\n rules_ltl[\"constraints\"] = []\n if \"mutex\" in rules[\"constraints\"]:\n for mtx_elements in rules[\"constraints\"][\"mutex\"]:\n if len(mtx_elements) > 0:\n variables: Variables = Variables()\n ltl = \"G(\"\n for vs in mtx_elements:\n variables |= vs.variables\n mtx_elements_str = [n.formula for n in mtx_elements]\n clauses = []\n for vs_a in mtx_elements_str:\n clause = [deepcopy(vs_a)]\n for vs_b in mtx_elements_str:\n if vs_a is not vs_b:\n clause.append(Not(deepcopy(vs_b)))\n clauses.append(And(clause))\n ltl += Or(clauses)\n ltl += \")\"\n rules_ltl[\"constraints\"].append(\n LTL(formula=ltl, variables=variables, kind=\"constraints\"))\n\n if \"inclusion\" in rules[\"constraints\"]:\n for pre, post in rules[\"constraints\"][\"inclusion\"].items():\n variables = Variables()\n variables |= pre.variables | post.variables\n ltl = \"G((\" + pre.formula + \") -> (\" + post.formula + \"))\"\n rules_ltl[\"constraints\"].append(\n LTL(formula=ltl, variables=variables, kind=\"constraints\"))\n\n return rules_ltl", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def make_guard_usability_dict(client_as, fp_to_as, pfi):\r\n\r\n if client_as in client_to_guard_usability:\r\n guard_to_usability = client_to_guard_usability[client_as]\r\n else:\r\n print(\"Create guard_to_usability from scratch\")\r\n guard_to_usability = {}\r\n\r\n for guard_fp, guard_asn in fp_to_as.items():\r\n if guard_fp in guard_to_usability and len(guard_fp) == 40:\r\n # if non-malicious guard\r\n continue\r\n\r\n suspects = bidirectional_lookup(client_as, guard_asn, pfi)\r\n\r\n if len(suspects) == 0:\r\n # No path inference could be performed\r\n guard_to_usability[guard_fp] = False\r\n elif (len(suspects & SUSPECTS) != 0):\r\n # Suspect on path\r\n guard_to_usability[guard_fp] = False\r\n else:\r\n guard_to_usability[guard_fp] = True\r\n\r\n return guard_to_usability", "def _initialize_dicts(action_list):\n\n act_to_int = {}\n int_to_act = {}\n\n # Loop through the list and store it in the dictionaries\n for i, action in enumerate(action_list):\n act_to_int[action] = i\n int_to_act[i] = action\n\n return act_to_int, int_to_act", "def _reconstruct_hints(hint_records):\n recs = [hint_record[\"h\"] for hint_record in hint_records]\n return [Hint(rec[\"class\"], {k: v for k, v in rec.items() if k != \"class\"}) for rec in recs]", "def _facts(facts):\n return {'swift_facts': facts}", "def adjPopulate(adjDict,adjFlag):\n# adjectives with enhancements starting with 'a' will go into dictionaries for weapons\n# similarly, adjective enhancements leading with 'd' go to shields and 'h' for helmets\n# Returns a dictionary corresponding to the key/values that match the flag\n passDict = {}\n\n for key in adjDict:\n if adjDict[key][0] == adjFlag:\n passDict[key] = adjDict[key]\n\n return passDict", "def prepare_inputs(self, **inputs):\n true_inputs = {'imain_loc' : self.get_input_tensor(0, 'loc'),\n 'imain_prec' : self.get_input_tensor(0, 'prec'),\n 'ids_prec' : self.get_input_tensor(1, 'prec'),\n 'ids_A' : self.get_input_tensor(1, 'A'),\n 'iprior_scale' : self.get_input_tensor(2, 'scale')}\n \n if inputs:\n print(\"\\t\\tUpdating defaults,\", self.name, \"with\", list(inputs.keys()))\n true_inputs.update(inputs)\n return true_inputs", "def writePredicates(agent, predicates, output, errOutput):\r\n #Testing Script. Begin:\r\n# global chronometers\r\n #Testing Script. End.\r\n predimpls = []\r\n for pred in predicates:\r\n impl = agent.getImp(pred)\r\n if impl is None:\r\n continue\r\n try:\r\n if isinstance(impl, PersistablePredImpInt) \\\r\n and impl.symbol not in SPU_LOAD_INFO_PREDICATES:\r\n predimpls.append(impl)\r\n except AnyException, f:\r\n NEWPM.displayError()\r\n errOutput.write('[%s.%s]: %s\\n'%(pred.name, impl.__class__.__name__, str(f)))\r\n \r\n #Print predicate definitions to file\r\n for impl in predimpls:\r\n try:\r\n #Testing Script. Begin:\r\n# #esm = str(type(impl))\r\n# #if not chronometers.has_key(esm):\r\n# # chronometers[esm] = chronometer(esm)\r\n# #chronometers[esm].start() \r\n #Testing Script. End.\r\n arityOrFacts = impl.persist_arity_or_facts(agent)\r\n if isinstance(arityOrFacts, int):\r\n b, z = optBZ([None]*arityOrFacts)\r\n facts = [[b.bTermEval(agent, x) for x in z] \\\r\n for s in impl.solutions(agent, b, z) if s]\r\n elif arityOrFacts == None:\r\n raise LowError(\"persist_arity_or_facts method for %s returned None\"%impl.symbol)\r\n else:\r\n facts = arityOrFacts\r\n for fact in facts:\r\n writeFact(agent, impl.symbol, fact, output)\r\n except AnyException, f:\r\n errid = NEWPM.displayError()\r\n NEWPM.pm(errid)\r\n if isinstance(impl, DefaultImp):\r\n #use the predsym\r\n errOutput.write(\"%s(DefaultImp): %s\\n\"%(impl.symbol,str(f)))\r\n else:\r\n errOutput.write(\"%s: %s\\n\"%(impl.__class__.__name__,str(f)))\r\n #Testing Script. Begin:\r" ]
[ "0.62115085", "0.5905862", "0.54716694", "0.5459201", "0.54483634", "0.5425386", "0.53802824", "0.5355178", "0.5344006", "0.53094596", "0.5296737", "0.52827144", "0.52518475", "0.5238643", "0.5196496", "0.5182272", "0.51804197", "0.51510084", "0.5123071", "0.51040995", "0.50854367", "0.50609535", "0.5060923", "0.5058775", "0.5055246", "0.5048132", "0.5045625", "0.50160706", "0.5004318", "0.49689808", "0.49682495", "0.49672177", "0.49636066", "0.49633464", "0.4945771", "0.49293974", "0.49277538", "0.49179307", "0.4916199", "0.4913535", "0.48993403", "0.4890677", "0.488998", "0.4889126", "0.48725948", "0.48681274", "0.48621565", "0.48446956", "0.48428708", "0.48352176", "0.48170778", "0.48073024", "0.48038137", "0.48028484", "0.4793185", "0.47840956", "0.47788987", "0.47752312", "0.47740185", "0.47619045", "0.47604436", "0.47530428", "0.4748504", "0.4745828", "0.47413647", "0.47328392", "0.47261173", "0.47261173", "0.4717638", "0.47174245", "0.47107947", "0.470637", "0.47003984", "0.4694599", "0.46925837", "0.46905473", "0.46871653", "0.4685622", "0.46845233", "0.46796507", "0.46795872", "0.4679569", "0.46723777", "0.46721035", "0.46714157", "0.46710265", "0.46656168", "0.4663965", "0.46629575", "0.46627158", "0.46618432", "0.46594232", "0.46594232", "0.46587965", "0.4656927", "0.46446785", "0.4639267", "0.46378806", "0.46376985", "0.46364474" ]
0.4805963
52
Stores in a random location in the Linked list
def add(self, item): if self.count == 0: random_location = 0 else: random_location = random.randint(0, self.count - 1) self.insert(Node(item), random_location)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_location(self):\n return random.choice(self.locations_list)", "def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()", "def randVacantPoint(L):\n pliste = vacantPoint(L)\n\n return pliste[random.randint(0, len(pliste)-1)]", "def getRandom(self):\n index = random.randrange(0, self.length)\n node = self.head\n while index:\n node = node.next\n index -= 1\n return node.val", "def get_random_link(self):\n return tuple([random.randint(0, d-1) for d in self.link_idxs])", "def getRandomFromList(self, l):\n if (len(l) == 0):\n return -1\n return l[randint(0, len(l) - 1)]", "def random_insert_seq(lst, seq):\n insert_locations = random.sample(range(len(lst) + len(seq)), len(seq))\n inserts = dict(zip(insert_locations, seq))\n iter_lst = iter(lst)\n lst[:] = [\n inserts[pos]\n if pos in inserts else next(iter_lst)\n for pos in range(len(lst) + len(seq))]", "def totem_random():\n random_head()\n random_head()\n random_head()", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def random_position():\n pos = np.random.randn(3)\n pos[2] = 0\n return pos", "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def randLoc(this):\n from temp_aber import randperc, trapch\n\n if randperc() > 50:\n this.locId = -5\n else:\n this.locId = -183\n\n trapch(this.locId)", "def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))", "def topology_random_connect(self, probability):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tif not (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability:\n\t\t\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def random_link(self, state):\n raise NotImplementedError('missing data mixin')", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def selectRandomFromList(ldata):\n\treturn ldata[randint(0, len(ldata)-1)]", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed", "def randPlace(self):\r\n random.seed(self.seed)\r\n \r\n # Start placement on Partition A\r\n partA = True\r\n for node in self.G.nodes():\r\n \r\n randSite = random.randint(0,int(self.sitesNum/2)-1)\r\n \r\n if partA:\r\n partSite = self.sitesA\r\n self.G.node[node][\"part\"] = 'A'\r\n \r\n else:\r\n partSite = self.sitesB\r\n self.G.node[node][\"part\"] = 'B'\r\n \r\n while (partSite[randSite].isOcp()):\r\n randSite = random.randint(0,int(self.sitesNum/2)-1) \r\n\r\n partSite[randSite].setCell(node)\r\n self.G.node[node][\"site\"] = partSite[randSite]\r\n \r\n # Toggle partition for next placement\r\n partA = not partA", "def auto_play_random(self, player=None):\r\n if player is None:\r\n player = self.get_player()\r\n legal_list = self.get_legal_list()\r\n next_move = legal_list.rand_obj()\r\n self.new_edge(next_move)", "def getRandom(self) -> int:\n index = random.randint(0, len(self.lst) - 1)\n # self.lst[index], self.lst[len(self.lst) - 1] = self.lst[len(self.lst) - 1], self.lst[index]\n # val = self.lst.pop()\n # self.dic.pop(val)\n return self.lst[index]", "def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt", "def add_unique_node(node_list, current_company):\n selected_company = int(random.random() * COMPANY_COUNT)\n while selected_company in node_list or current_company == selected_company:\n selected_company = int(random.random() * COMPANY_COUNT)\n node_list.append(selected_company)", "def copy_list(node):\n curr = node\n map = OrderedDict()\n while curr is not None:\n if not map.get(curr, None):\n map[curr] = Node(curr.val)\n if curr.next and not map.get(curr.next, None):\n map[curr.next] = Node(curr.next.val)\n map[curr].next = map[curr.next]\n if curr.random and not map.get(curr.random, None):\n map[curr.random] = Node(curr.next.random)\n map[curr].random = map[curr.random]\n curr = curr.next\n display(node, next(iter(map)))", "def getRandom(self):\n ans = self.head\n index = 1\n node = ans.next\n while node:\n value = random.randrange(0, index + 1)\n if value == 0:\n ans = node\n index += 1\n node = node.next\n return ans.val", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def getRandom(self) -> int:\n count = 0\n temp = self.head\n while temp:\n if random.randint(0,count)==0:\n res = temp.val\n temp = temp.next\n count+=1\n return res", "def getRandom(self) -> int:\n return random.choice(self.store_list)", "def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n random_node = self.Node(self.end.x, self.end.y)\n return random_node", "def random_position():\n path = (\n os.path.dirname(__file__)\n + os.sep\n + \"templates\"\n + os.sep\n + \"data\"\n + os.sep\n + \"taxi_stations.json\"\n )\n with open(path) as f:\n stations = json.load(f)[\"features\"]\n pos = random.choice(stations)\n coords = [pos[\"geometry\"][\"coordinates\"][1], pos[\"geometry\"][\"coordinates\"][0]]\n lat = float(\"{0:.6f}\".format(coords[0]))\n lng = float(\"{0:.6f}\".format(coords[1]))\n return [lat, lng]", "def random_intraroute_insertion(customer, route, customers):\n fpositions = factible_route_positions(customer, route, customers)\n #print(\"customer, factibles = \",customer, fpositions)\n if fpositions:\n npos = random.choice(fpositions)\n route.insert(npos, [customer], customers)\n #print(\"route = \", route)\n return True\n else: return False", "def getRandom(self):\n if not self.l:\n return -1\n return random.choice(self.l)", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def getRandom(self) -> int:\n R = self.head; k = 1\n node = self.head.next\n i = 1\n\n while(node):\n j = random.randint(1, i+1)\n if j <= k:\n R = node\n\n node = node.next\n i += 1\n\n return R.val", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def insert_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])))\n mutated_genome[index][2].insert(point_index, point)", "def choice(L):\r\n LEN = len(L) # Get the length\r\n randomindex = int(LEN*random()) # Get a random index\r\n return L[randomindex] # Return that element\r", "def getRandom(self):\n import random\n res = -1\n len = 0\n head = self.head\n while head:\n if random.randint(0,len) == 0:\n res = head.val\n head = head.next\n len += 1\n return res", "def move1(self):\n\n options = self.location.exits.keys()\n self.location.objects.remove(a)\n print('fred is moving..')\n self.location = self.location.exits[random.choice(list(options))]\n self.location.objects.append(a)", "def insert(self,x,pos):\n new = ListNode()\n new.value = x\n new.next = pos.next\n pos.next = new", "def get_random_location(self):\n max_x, max_y, max_z, min_x, min_y, min_z = self.get_max_and_min()\n if max_x == float('-inf') and min_x == float('inf') and max_y == float('-inf') and min_y == float('inf') and \\\n max_z == float('-inf') and min_z == float('inf'):\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n return ans\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n counter += 1\n x = random.uniform(max_x, min_x)\n y = random.uniform(max_y, min_y)\n z = random.uniform(max_z, min_z)\n if counter == 0: # means all nodes doesn't have any location\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n else:\n ans = x, y, z\n return ans", "def createtown_random(self):\n town = m.Town()\n town.name = town.name + str(len(self.alltowns))\n self.print_mainlog(\n \"A new town, %s, appeared at %d,%d!\" %\n (town.name, town.pos.x, town.pos.y)\n )\n self.alltowns.append(town)", "def move(self):\n a = random.randint(0, len(self.state) - 1)\n b = random.randint(0, len(self.state) - 1)\n self.state[a], self.state[b] = self.state[b], self.state[a]\n\n # change type of restoration for one state\n c = random.choice(self.restoration_types)\n self.state[a] = (self.state[a][0], c)", "def random_adjacent_tile(self):\n adj = self.adjacent()\n pos_list = [pos for pos in adj if self.in_grid(pos) and pos != self.prev]\n return random.choice(pos_list)", "def clone_rand(self):", "def randomposition(self, identifier):\n # Initialize a validation to make sure we only generate one of each item\n onmap = False\n\n # Loop checking if a random spot is free\n # on the 15*15 squares map with the ' ' character\n while not onmap:\n self.square_x = random.randint(0, 14)\n self.square_y = random.randint(0, 14)\n # if the sprite is not a wall or another item\n if self.maze.structure[self.square_y][self.square_x] == ' ':\n # then we add the item identifier to the map like 'b' for bottle\n self.maze.structure[self.square_y][self.square_x] = identifier\n # then we exit the loop by moving the validation to True and making sure\n # we do not exceed the maximum amount of 1 item\n onmap = True\n return self.square_x, self.square_y", "def rand(self): # Method doctring\n\n self._last_rand = xorshift32(self._last_rand, self.triple)\n return self._last_rand", "def new_tile(self):\n \n # get random corordinates for new tile\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n # keeps generating random tile corordinates for non-empty tile\n while self.get_tile(row,col) != 0:\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n \n # get random index of new tile value\n freq = random.randint(0,9)\n if freq == 9:\n self.set_tile(row, col, 4)\n else:\n self.set_tile(row, col, 2)", "def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def setListRandomFromList(ldata, ldataRepl):\n\tl = len(ldata)\n\tselSet = set()\n\tfor d in ldataRepl:\n\t\ti = randint(0, l-1)\n\t\twhile i in selSet:\n\t\t\ti = randint(0, l-1)\n\t\tldata[i] = d\n\t\tselSet.add(i)", "def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def getRandom(self) -> int:\n some_item = self.container.pop()\n self.container.add(some_item)\n return some_item", "def getRandom(self):\n return random.choice(self.ls)", "def rand(self):\n raise NotImplementedError", "def random_place(board, player):\n available = possibilities(board)\n place(board, player, random.choice(available))", "def getRandom(self):\n if not self.head:\n return\n candidate, pv = self.head.val, 1\n tmp = self.head\n while tmp:\n if random.randint(1, pv) == 1:\n candidate = tmp.val\n tmp = tmp.next\n pv += 1\n return candidate", "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def randomnode(state,H,V):\n hit = randomhit(state,H,V)\n node = KFNode(hit,H)\n node.setstate('true',state)\n debug('randomnode x,node ',node)\n return node", "def put_item_random(self, x, y):\n r = int(random() * 10)\n if 3 < r and r <= 6:\n self.put_fireitem(x, y)\n elif 6 < r and r <= 9:\n self.put_bombitem(x, y)", "def randomNeighbor(s):\n s=random.randint( int(pow(10,-5)), int(pow(10,5)))\n return s", "def default_location(self, thing):\n return random.choice([loc_A, loc_B])", "def default_location(self, thing):\n return random.choice([loc_A, loc_B])", "def simple_ll():\n ll = LinkedList()\n ll.push(20)\n ll.push(4)\n ll.push(15)\n ll.push(85)\n return ll", "def random_intraroute_movement(position, route, customers):\n customer = route.customers[position+1]\n route.remove(position,1,customers)\n fpositions = factible_route_positions(customer, route, customers)\n #print(\"customer, position, factibles = \",customer, position, fpositions)\n fpositions.remove(position)\n if fpositions:\n npos = random.choice(fpositions)\n route.insert(npos, [customer], customers)\n print(\"succed intraroute movement\")\n #print(\"true, route = \", route)\n return True\n else:\n route.insert(position, [customer], customers)\n #print(\"false, route = \", route)\n return False", "def rand_pop(l: list):\n i = randrange(len(l)) \n l[i], l[-1] = l[-1], l[i] \n return l.pop()", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def insert(self, data):\n new_node = Item(data)\n new_node.next = self.head\n self.head = new_node", "def getRandom(self) -> int:\n import random \n \n count = 0\n node = self.head\n while node:\n if random.randint(0, count) == 0:\n ans = node.val\n node = node.next\n count += 1\n return ans", "def _random_pick(lst):\n\n choice = random.randint(0, len(lst) - 1)\n return lst[choice]", "def getRandom(self) -> int:\n return random.choice(self.l)", "def getRandom(self):\n random_index = randint(0, len(self.list_val)-1)\n return self.list_val[random_index]", "def getRandomPosition(self):\n x = random.randint(0, self.width - 1)\n y = random.randint(0, self.height - 1)\n return Position(x, y)", "def getRandom(self) -> int:\n steps = random.randint(0, self.len-1) # 随机抽取一个\n temp = self.head\n for i in range(steps):\n temp=temp.next\n return temp.val", "def random_interroute_insertion(customer, solution, customers):\n posbyroute = factible_positions_in_routes(customer, solution, customers)\n #print(\"posbyroute = \",posbyroute)\n if posbyroute:\n nroute,positions = random.choice(posbyroute)\n ripos = random.choice(positions)\n solution.routes[nroute].insert(ripos, [customer], customers)\n else:\n #print(\"New route!\")\n route = Aroute()\n route.insert(0, [customer], customers)\n solution.append(route)\n #print(\"solution = \", solution)", "def linked_node(self, value):\n self._linked_node = value", "def update_position(self):\n new_position = []\n for i in range(self.num_literals):\n r = random()\n position_i = 1 if r < self.sigmoid(self.velocity[i]) else 0\n new_position.append(position_i)\n self.position = new_position", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])", "def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))", "def remember_visited_node(self, node_pos):\n self.visited_nodes.append(node_pos)", "def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1", "def getRandom(self):\n res = self.head.val\n cur = self.head.next\n count = 2\n\n while cur != None:\n if random() <= 1.0 / count:\n res = cur.val\n\n count += 1\n cur = cur.next\n return res", "def __setitem__(self, item, val):\r\n if type(val) is not numpy.random.RandomState:\r\n raise TypeError('only values of type RandomState are permitted',\r\n val)\r\n for old_r, new_r in self.random_streams.random_state_variables:\r\n if item is old_r:\r\n container = self.memo[item].value\r\n container.value = val\r\n return\r\n raise KeyError(item)", "def mutate_random(self, point, population):\n other = Point(self.model.generate())\n other.evaluate(self.model)\n while other in population or other == point:\n other = Point(self.model.generate())\n other.evaluate(self.model)\n return other", "def insert(self,value):\n try:\n new_node=Node(value)\n if self.head == None:\n self.head=new_node\n else:\n current=self.head\n while current.next:\n current=current.next\n current.next=new_node\n print( new_node.value)\n return( new_node.value)\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def add(self, nodeLoc):\n self.table[self.getHashIndex(nodeLoc)] = True", "def getRandom(self):\n result, node, index = self.node, self.node.next, 1\n\n while node:\n if random.random() < (1.0 / (index+1)):\n result = node\n node = node.next\n index += 1\n return result.val\n\n\n\n # Your Solution object will be instantiated and called as such:\n # obj = Solution(head)\n # param_1 = obj.getRandom()", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def generate_new_node(self, parent, rand_node):\n dist = np.linalg.norm(parent.state - rand_node.state)\n if dist < self.Delta: # In case rand_node is very close to parent\n new_state = rand_node.state\n else:\n new_state = parent.state + (rand_node.state - parent.state) / dist * self.Delta\n new_node = Node(new_state)\n return new_node", "def NewTile(field):\n var = False\n while not var:\n temp = random.randrange(0, len(field), 1)\n if field[temp] == 0:\n r = random.randrange(0, 100, 1)\n if r > 80:\n field[temp] = -4\n else:\n field[temp] = -2\n \n var = True\n return field", "def new_tile(self):\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n while self.get_tile(rand_y, rand_x) != 0:\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n value = random.choice([2,2,2,2,2,2,2,2,2,4])\r\n del self.board[rand_y][rand_x]\r\n self.board[rand_y].insert(rand_x,value)\r\n return self.board", "def new_tile(self):\n\n # creating a random float variable that will roll a random value\n # if randomvalue > .90\n #\n\n tile_added = False\n while not tile_added:\n row = random.randint(0,self.grid_height - 1)\n col = random.randint(0,self.grid_width - 1)\n if self.board[row][col] == 0:\n tile_added = True\n random_tile = random.random()\n if random_tile < .90:\n self.board[row][col] = 2\n else:\n self.board[row][col] = 4" ]
[ "0.6553104", "0.6297904", "0.6075966", "0.5918239", "0.58364403", "0.5792438", "0.57132536", "0.57105196", "0.56871873", "0.56744456", "0.56678665", "0.56591904", "0.5640225", "0.5628301", "0.5613032", "0.5598745", "0.5593619", "0.5581534", "0.5567704", "0.55652964", "0.55643505", "0.5558386", "0.5538791", "0.5530747", "0.5529713", "0.5520587", "0.54846376", "0.5480163", "0.54785717", "0.54776603", "0.54776484", "0.5476026", "0.5472378", "0.5466245", "0.54542094", "0.5452095", "0.54477537", "0.54460216", "0.54419196", "0.5438004", "0.5421798", "0.5420015", "0.539014", "0.53856033", "0.5378527", "0.53777885", "0.5377474", "0.5374219", "0.53716564", "0.5364547", "0.536209", "0.53602195", "0.5359464", "0.5352752", "0.5346631", "0.53430444", "0.5343016", "0.5340155", "0.5331365", "0.5327628", "0.5325997", "0.5316365", "0.5314419", "0.5314419", "0.53120935", "0.53052866", "0.52966726", "0.5289195", "0.5282304", "0.52758074", "0.5273889", "0.5267086", "0.5266429", "0.5259924", "0.525018", "0.5247783", "0.52472216", "0.52454144", "0.5244392", "0.5239476", "0.5235821", "0.5234386", "0.52292734", "0.52291095", "0.5226098", "0.5226068", "0.52188486", "0.5213965", "0.52139324", "0.52125454", "0.5210735", "0.5210324", "0.52050257", "0.51972234", "0.5195629", "0.5195629", "0.51898247", "0.5188985", "0.5188928", "0.5175538" ]
0.7000383
0
saw online, a lineartimegrowth algorithm
def findRelativeRanks(nums): compare_lst = copy.deepcopy(nums) compare_lst.sort(reverse=True) for i in nums: compare_index = compare_lst.index(i) nums_index = nums.index(i) if compare_index > 2: nums[nums_index] = str(compare_index + 1) elif compare_index == 0: nums[nums_index] = 'Gold Medal' elif compare_index == 1: nums[nums_index] = 'Silver Medal' else: nums[nums_index] = 'Bronze Medal' return nums
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_trajectory():\n pass", "def M_D_1(arrival_time,max_time,service_time=1/90):\n #conversion in seconds\n max_seconds = max_time*60*60\n sim_time = 0.0 # simulation time\n t_1 = 0.0 # time for next event (arrival)\n t_2 = max_seconds # time for next event (departure)\n t_n = 0.0 #last event time--> tempo dell'ultimo avvenimento generico\n t_b = 0.0 # last start of busy time--> tempo in cui la queue inizia ad essere non vuota per l'ultima volta\n c = 0 # numero di servizi completati\n queue_aircraft = [] # number of aircraft in the queue\n aircraft = 0\n arrival = [] # time of arrival\n attesa = [] # attesa per gli aerei-->NON SICURO CHE SI CALCOLI COSI'\n # simulation loop\n while(sim_time < max_seconds):\n if(t_1<t_2): #event1:arrival\n sim_time = t_1\n arrival.append(t_1)\n aircraft += 1\n queue_aircraft.append(aircraft)\n t_n = sim_time\n t_1 = sim_time + rm.expovariate(arrival_time)\n if(aircraft==1):\n t_b = sim_time\n t_2 = sim_time + 1/service_time\n else:\n sim_time = t_2\n aircraft = aircraft -1\n queue_aircraft.append(aircraft)\n t_n = sim_time\n attesa.append( t_2 - arrival[c])\n c+=1\n if(aircraft>0):\n t_2=sim_time + 1/service_time\n else:\n t_2 = max_seconds\n\n\n\n\n return queue_aircraft,arrival,attesa", "def gaver_stehfest(time, lap_func):\n def nCr(n, r):\n return math.factorial(n)/(math.factorial(r)*\n math.factorial(n-r))\n def a(k, n):\n summation = 0.\n for j in range((k+1)/2, min(k, n)+1):\n current_summation = float(pow(j, n+1))/float(math.factorial(n))\n current_summation *= nCr(n, j)\n current_summation *= nCr(2*j, j)\n current_summation *= nCr(j, k-j)\n summation += current_summation\n return summation*pow(-1, n+k)\n n = 7\n total_sum = a(1, n)*lap_func(1.*np.log(2.)/time)\n for k in range(2, 2*n+1):\n total_sum += a(k, n)*lap_func(k*np.log(2.)/time)\n return total_sum*np.log(2.)/time", "def old():\n therm = [[300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.]]\n ts = np.linspace(0, 50, 1000)\n\n #odvod temperature bo vsota gradientov (diferencialov) z desne in z leve glede x\n #dT/dt[i] = K/x^2 * (temperature[i-1]- 2*temperature[i] + temperature[i+1])\n #razen ce je robna tocka\n #potem je treba nekaj scarat - robna bo funkcija\n def odvod(indeks, arr, K, time):\n odvodt = K * (arr[indeks-1][time] - 2*arr[indeks][time] + arr[indeks+1][time])\n return odvodt\n\n def robna(time):\n return 5*m.cos(0.05*time)\n\n\n K = 0.02\n x = 0.003\n\n def main_old():\n t = 0\n dt = 50. / 1000.\n for time in ts:\n for i in range(0,9):\n therm[i].append(therm[i][t] + (robna(time) if i==0 else odvod(i, therm, K, t)*dt/(x**2)))\n therm[9].append(300.)\n t+=1\n\n import matplotlib.pyplot as plt\n\n plt.plot(ts[:], therm[4][:-1], label = 'T(t)')\n plt.show()\n \n main_old()", "def hr_game(t0, tf, n, A, B, R, x0):\n # t0 - Initial time\n # tf - Final time\n # n - Number of steps\n # A - Adjacency matrix, np.ndarray (N,N)\n # B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)\n # R - Relationship or preference matrix, np.ndarray (N,N)\n # x0 - Initial state of our system, np.ndarray (N,S), must be double\n\n # Number of players\n N = A[:, 0].size\n # Number of strategies\n S = x0[0, :].size\n # Step in each iteration\n h = (tf - t0) / n\n # Result of each step, np.ndarray (N, S, n+1)\n y = np.zeros([N, S, n+1], dtype='double')\n y[:, :, 0] = x0\n k = np.zeros([N, S])\n # I still don't know why, but theres a problem with negative payoffs\n B = matrixTranslate(B)\n\n # Fourth order Runge-Kutta\n for t in range(n):\n k1 = np.multiply(h, hr_egn(A, B, R, y[:, :, t]))\n k2 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k1, 2))))\n k3 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k2, 2))))\n k4 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], k3)))\n # k = (k1 + 2*k2 + 2*k3 + k4)/6\n k = np.divide(np.add(np.add(k1, np.multiply(2, k2)), np.add(np.multiply(2, k3), k4)), 6)\n\n y[:, :, t+1] = np.add(y[:, :, t], k)\n\n # Filter results with machine epsilon\n for v in range(N):\n for s in range(S):\n if y[v, s, t+1] < np.sqrt(np.finfo('double').eps):\n y[v, s, t+1] = 0\n elif y[v, s, t+1] > np.subtract(1, np.sqrt(np.finfo('double').eps)):\n y[v, s, t + 1] = 1\n\n return y", "def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")", "def test_lineage(self):\n M, W = simulation.generate_poisson_lineage(3, 100, 50)\n sim_data = simulation.generate_state_data(M, W)\n sim_data = sim_data + 1e-8\n m2 = M + np.random.random(M.shape) - 0.5\n curves, fitted_vals, edges, assignments = lineage(m2, W)\n # TODO: assert something about the distances???\n print(len(edges))\n adjacent_count = 0\n for e in edges:\n if np.abs(e[0]-e[1]) <= 1:\n adjacent_count += 1\n self.assertTrue(adjacent_count>150)", "def _get_lt_problem(self,x,n_seg=[10,10], high_fidelity=True):\n\tfrom PyKEP import epoch, lambert_problem, DAY2SEC, fb_prop, propagate_lagrangian\n\tfrom PyGMO import population\n\tfrom math import pi, acos,cos,sin,sqrt, exp\n\tfrom scipy.linalg import norm\n\t\n\tretval = []\n\t#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience\n\tT = x[3::4]\n\tn_legs = len(x)/4\n\tseq = self.get_sequence()\n\tcommon_mu = seq[0].mu_central_body\n\t#2 - We compute the epochs and ephemerides of the planetary encounters\n\tt_P = list([None] * (n_legs))\n\tr_P = list([None] * (n_legs))\n\tv_P = list([None] * (n_legs))\n\tDV = list([None] * (n_legs))\n\t\n\tfor i,planet in enumerate(seq):\n\t\tt_P[i] = epoch(x[0]+sum(T[:i+1]))\n\t\tr_P[i],v_P[i] = seq[i].eph(t_P[i])\n\n\t#3 - We start with the first leg: a lambert arc\n\ttheta = 2*pi*x[1]\n\tphi = acos(2*x[2]-1)-pi/2\n\tr = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection\n\tr = [JR*1000*d for d in r]\n\t\n\tl = lambert_problem(r,r_P[0],T[0]*DAY2SEC,common_mu, False, False)\n\n\t#Lambert arc to reach seq[1]\n\tv_end_l = l.get_v2()[0]\n\tv_beg_l = l.get_v1()[0]\n\t\n\t#We start appending in the lt chromosome (see mga_incipit_lt)\n\tretval.append(theta)\n\tretval.append(phi)\n\t\n\t#First DSM occuring at the very beginning (will be cancelled by the optimizer)\n\tDV[0] = abs(norm(v_beg_l) - 3400)\n\t\n\t#Start of the first lt leg encoding \n\tretval.append(T[0])\n\tretval.append(exp(-DV[0]/9.80665/2000)*2000) #Tsiolkowsky\n\tretval.extend(v_beg_l)\n\tretval.extend([a-b for a,b in zip(v_end_l,v_P[0])])\n\n\t#4 - And we proceed with each successive leg\n\tfor i in xrange(1,n_legs):\n\t\t#Fly-by \n\t\tv_out = fb_prop(v_end_l,v_P[i-1],x[1+4*i]*seq[i-1].radius,x[4*i],seq[i-1].mu_self)\n\t\t#s/c propagation before the DSM\n\t\tr,v = propagate_lagrangian(r_P[i-1],v_out,x[4*i+2]*T[i]*DAY2SEC,common_mu)\n\t\t#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)\n\t\tdt = (1-x[4*i+2])*T[i]*DAY2SEC\n\t\tl = lambert_problem(r,r_P[i],dt,common_mu, False, False)\n\t\tv_end_l = l.get_v2()[0]\n\t\tv_beg_l = l.get_v1()[0]\n\t\t#DSM occuring at time nu2*T2\n\t\tDV[i] = norm([a-b for a,b in zip(v_beg_l,v)])\n\t\t\n\t\t#lt encoding of all legs\n\t\tretval.append(T[i])\n\t\tretval.append(exp(-sum(DV[:i+1])/9.80665/2000)*2000) #Tsiolkowsky\n\t\tretval.extend([a-b for a,b in zip(v_out,v_P[i-1])])\n\t\tif i != n_legs-1:\n\t\t\tretval.extend([a-b for a,b in zip(v_end_l,v_P[i])])\n\t\n\tretval = retval + [0]*sum(n_seg)*3\n\tprob = mga_incipit_lt(high_fidelity=high_fidelity,seq=seq, n_seg = n_seg,tf = epoch(x[0]+sum(T)), vf = [a-b for a,b in zip(v_end_l,v_P[i])])\n\t# solves the problem of chemical trajectories wanting higher launch dv\n\tub = list(prob.ub)\n\tlb = list(prob.lb)\n\tub[4:7] = [5000,5000,5000]\n\tlb[4:7] = [-5000,-5000,-5000]\n\tprob.set_bounds(lb, ub)\n\tpop = population(prob)\n\tpop.push_back(retval)\n\treturn (prob,pop)", "def part1():\n\tmoons = read_input()\n\tvelocities = [[0, 0, 0] for moon in moons]\n\ttime = 0\n\tend_time = 1000\n\twhile time < end_time:\n\t\tgravities = [[0, 0, 0] for moon in moons]\n\t\tfor i, moon in enumerate(moons):\n\t\t\tfor other in moons:\n\t\t\t\tfor axis in range(len(moon)):\n\t\t\t\t\tif moon[axis] < other[axis]:\n\t\t\t\t\t\tgravities[i][axis] += 1\n\t\t\t\t\telif moon[axis] > other[axis]:\n\t\t\t\t\t\tgravities[i][axis] -= 1\n\t\tfor i, moon in enumerate(moons):\n\t\t\tfor axis in range(3):\n\t\t\t\tvelocities[i][axis] += gravities[i][axis]\n\t\t\t\tmoon[axis] += velocities[i][axis]\n\t\ttime += 1\n\tenergy = 0\n\tfor i, moon in enumerate(moons):\n\t\tpotential = sum([abs(d) for d in moon])\n\t\tkinetic = sum(abs(d) for d in velocities[i])\n\t\tenergy += potential * kinetic\n\tprint(energy)", "def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")", "def _get_penalty_data(self,x):\n\t\n\tfrom PyKEP import epoch, lambert_problem, propagate_lagrangian, fb_prop, DAY2SEC;\n\tfrom math import pi, acos, cos, sin;\n\timport numpy as np;\n\tfrom _mass_penalty import get_rp_ra_Trev\n\t\n\tTrajectory = [];\n\t\n\t#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience\n\tT = x[3::4]\n\t\n\t# reconstruct properties that are known in _mga_incipit:\n\tself.seq = self.get_sequence();\n\tself.__n_legs = len(self.seq);\n\tself.common_mu = self.seq[0].mu_central_body\n\t\n\t#2 - We compute the epochs and ephemerides of the planetary encounters\n\tt_P = list([None] * (self.__n_legs))\n\tr_P = list([None] * (self.__n_legs))\n\tv_P = list([None] * (self.__n_legs))\n\tDV = list([None] * (self.__n_legs))\n\t\n\tfor i,planet in enumerate(self.seq):\n\t\tt_P[i] = epoch(x[0]+sum(T[:i+1]))\n\t\tr_P[i],v_P[i] = self.seq[i].eph(t_P[i])\n\n\t#3 - We start with the first leg: a lambert arc\n\ttheta = 2*pi*x[1]\n\tphi = acos(2*x[2]-1)-pi/2\n\tr = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection\n\tr = [JR*1000*d for d in r]\n\t\n\tl = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)\n\n\t#Lambert arc to reach seq[1]\n\tv_end_l = l.get_v2()[0]\n\tv_beg_l = l.get_v1()[0]\n\tTr = [tuple(r), v_beg_l, r_P[0], v_end_l, T[0]*DAY2SEC];\n\trPvec = np.asarray(r_P[0]);\n\tvPvec = np.asarray(v_end_l);\n\tTr = Tr + get_rp_ra_Trev(rPvec, vPvec);\n\tvinf = vPvec - np.asarray(v_P[0]);\n\tTr = Tr + [vinf];\n\tTrajectory.append(Tr);\n\n\t#First DSM occuring at the very beginning (will be cancelled by the optimizer)\n\tDV[0] = abs(np.linalg.norm(v_beg_l) - 3400)\n\n\t#4 - And we proceed with each successive leg\n\tfor i in xrange(1,self.__n_legs):\n\t\t#Fly-by \n\t\tv_out = fb_prop(v_end_l,v_P[i-1],x[1+4*i]*self.seq[i-1].radius,x[4*i],self.seq[i-1].mu_self)\n\t\t#s/c propagation before the DSM\n\t\tr,v = propagate_lagrangian(r_P[i-1],v_out,x[4*i+2]*T[i]*DAY2SEC,self.common_mu)\n\t\t# append r, v, etc. to the Trajectory:\n\t\tTr = [r_P[i-1], v_out, r, v, x[4*i+2]*T[i]*DAY2SEC];\n\t\trPvec = np.asarray(r);\n\t\tvPvec = np.asarray(v);\n\t\tTr = Tr + get_rp_ra_Trev(rPvec, vPvec);\n\t\tvinf = [];\n\t\tTr = Tr + [vinf];\n\t\tTrajectory.append(Tr);\n\t\t\n\t\t#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)\n\t\tdt = (1-x[4*i+2])*T[i]*DAY2SEC\n\t\tl = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)\n\t\tv_end_l = l.get_v2()[0]\n\t\tv_beg_l = l.get_v1()[0]\n\t\t# append r, v, etc. to the Trajectory:\n\t\tTr = [r, v_beg_l, r_P[i], v_end_l, (1-x[4*i+2])*T[i]*DAY2SEC];\n\t\trPvec = np.asarray(r_P[i]);\n\t\tvPvec = np.asarray(v_end_l);\n\t\tTr = Tr + get_rp_ra_Trev(rPvec, vPvec);\n\t\tvinf = vPvec - np.asarray(v_P[i]);\n\t\tTr = Tr + [vinf];\n\t\tTrajectory.append(Tr);\n\t\t\n\t\t\n\t\t#DSM occuring at time nu2*T2\n\t\tDV[i] = np.linalg.norm([a-b for a,b in zip(v_beg_l,v)])\n\treturn Trajectory;", "def trend_reduce(sample_data):\n data = np.asarray(sample_data)*1e9\n width = 400\n slope = 0.070\n dataLength = len(data)\n Time = np.arange(0, dataLength)\n\n r1 = np.arange(dataLength, 0, -1)*slope\n r2 = -1*(slope*width/np.pi)*np.sin(np.arange(0,width+1)*(np.pi/width))\n r3 = np.arange(0, dataLength)*slope\n CompareLong = np.array(r1)\n CompareLong = np.append(CompareLong, r2)\n CompareLong = np.append(CompareLong, r3)\n\n upperBoundary = np.amax(data)+0*data\n Compare = []\n for i in range(0,len(CompareLong)-dataLength-1):\n Compare = CompareLong[i:-1]\n Compare = Compare[0:dataLength]\n Compare = Compare + np.amax(data-Compare)\n upperBoundary = np.minimum(upperBoundary, Compare)\n\n FindUpperPoints = abs(upperBoundary - data)<(slope/4)\n upperLine = interp1d(Time[np.where(FindUpperPoints)], upperBoundary[np.where(FindUpperPoints)], kind='quadratic', fill_value='extrapolate')\n \n \n\n lowerBoundary = np.amin(data)+0*data\n Compare = []\n for i in range(0,len(CompareLong)-dataLength-1):\n Compare = -CompareLong[i:-1]\n Compare = Compare[0:dataLength]\n Compare = Compare - np.amax(Compare-data)\n lowerBoundary = np.maximum(lowerBoundary, Compare)\n\n FindLowerPoints = abs(lowerBoundary - data)<(slope/4)\n\n lowerLine = interp1d(Time[np.where(FindLowerPoints)], lowerBoundary[np.where(FindLowerPoints)], kind='quadratic', fill_value='extrapolate')\n\n middleLine = (upperLine(Time)+lowerLine(Time))/2\n middle = (upperBoundary+lowerBoundary)/2\n\n '''\n fig_t0 = plt.figure()\n plt_t0 = fig_t0.add_subplot(111)\n plt_t0.plot(Time, data)\n plt_t0.scatter(Time[np.where(FindUpperPoints)], upperBoundary[np.where(FindUpperPoints)], c='r')\n plt_t0.plot(Time, upperLine(Time))\n plt_t0.plot(Time, lowerBoundary, c='r')\n plt_t0.plot(Time, upperBoundary, c='r')\n plt_t0.scatter(Time[np.where(FindLowerPoints)], lowerBoundary[np.where(FindLowerPoints)], c='r')\n plt_t0.plot(Time, lowerLine(Time))\n plt_t0.plot(Time, middleLine)\n plt_t0.plot(Time, middle)\n\n plt.close()\n '''\n return (sample_data - middle/1e9)", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def evaluate(self, time) -> float:\n ...", "def SLTrace(self,NSL=100,Pts=[]):\n TOF_end=[]\n SL_end=[]\n \n for i in range(4): #4 Subgrids\n \n if(len(Pts)==0):\n nsl=int(NSL*self.theta[i]/2/np.pi)\n Pts_init=PointOnUnitEdge(nsl) #Generating the start point along the well edge(alpha=0)\n else:\n nsl=len(Pts)\n Pts_init=Pts\n \n for j in range(nsl): #nsl streamlines\n GridID=i\n temp_trace=self.SubGrids[GridID].Trace1SL(Pts=Pts_init[j])\n \n SLtemp=RotateSL(temp_trace[3],Single=1,angle=self.SubGrids[GridID].RotateAngle)\n SLtemp=TranslateSL(SLtemp,Single=1,new_origin=self.SubGrids[GridID].NewOrigin)\n TOFtemp=temp_trace[5]\n \n flag=True\n while (flag==True): #the streamline will continue travel in another subgrid\n Pts_end=temp_trace[2][-1]\n temp_neighbor=self.NeighborTest(GridID,Pts_end) #test of crossing trace of a streamline\n flag=temp_neighbor[0]\n if(flag==True):\n temp_trace=[]\n SLtemp2=[]\n TOFtemp2=[]\n \n GridID_next=temp_neighbor[1]\n Pts_init_next=temp_neighbor[2]\n #Pts and TOF base starts from previous node\n temp_trace=self.SubGrids[GridID_next].Trace1SL(Pts=Pts_init_next,TOF_base=TOFtemp[-1])\n\n SLtemp2=RotateSL(temp_trace[3],Single=1,angle=self.SubGrids[GridID_next].RotateAngle)\n SLtemp2=TranslateSL(SLtemp2,Single=1,new_origin=self.SubGrids[GridID_next].NewOrigin)\n TOFtemp2=temp_trace[5]\n \n #SLtemp=np.append(SLtemp,SLtemp2,axis=0)\n #TOFtemp=np.append(TOFtemp,TOFtemp2,axis=0)\n SLtemp=np.append(SLtemp,SLtemp2[1:],axis=0)\n TOFtemp=np.append(TOFtemp,TOFtemp2[1:],axis=0)\n \n SL_end.append(SLtemp[-1])\n TOF_end.append(TOFtemp[-1])\n #Add all nodes and TOF into SL list\n self.SL.append(SLtemp)\n self.TOF.append(TOFtemp)\n \n \n #Plot the stremline\n plt.figure(figsize=(3, 3))\n plt.ylim(bottom=0,top=50)\n plt.xlim(left=0,right=50)\n plt.axes().set_aspect('equal')\n plt.title(r'Streamline in Physical Space ($x,y$)')\n \n #Grid edge\n Bound_vert=[self.Pts[0],self.Pts[1],self.Pts[2],self.Pts[3],self.Pts[0]]\n Internal_edge=[self.Pts[0],self.Pts[2],self.Pts[3],self.Pts[1]]\n \n plt.plot(*np.asarray(Bound_vert).T,lw=3,color='red')\n plt.plot(*np.asarray(Internal_edge).T,lw=2,ls='--',color='red')\n \n #Streamline\n for i in range(len(self.SL)):\n plt.plot(*np.asarray(self.SL[i]).T,lw=1,marker='o',markersize=0,color='blue')\n \n\n\n plt.show()\n return self.SL,self.TOF,SL_end,TOF_end", "def horde_step(self, observation):", "def LSPmetrics(phen, xnew, nGS, num, phentype):\n inds = np.isnan(phen) # check if array has NaN values\n if inds.any(): # check is all values are NaN\n return np.repeat(np.nan, num)\n else:\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # basic variables\n vpos = np.max(phen)\n ipos = np.where(phen == vpos)[0]\n pos = xnew[ipos]\n trough = np.min(phen)\n ampl = vpos - trough\n\n # get position of seasonal peak and trough\n ipos = np.where(phen == vpos)[0]\n\n # scale annual time series to 0-1\n ratio = (phen - trough) / ampl\n\n # separate greening from senesence values\n dev = np.gradient(ratio) # first derivative\n greenup = np.zeros([ratio.shape[0]], dtype=bool)\n greenup[dev > 0] = True\n\n # select time where SOS and EOS are located (arround trs value)\n # KneeLocator looks for the inflection index in the curve\n try:\n with warnings.catch_warnings():\n # estimate SOS and EOS as median of the season\n i = np.median(xnew[:ipos[0]][greenup[:ipos[0]]])\n ii = np.median(xnew[ipos[0]:][~greenup[ipos[0]:]])\n sos = xnew[(np.abs(xnew - i)).argmin()]\n eos = xnew[(np.abs(xnew - ii)).argmin()]\n isos = np.where(xnew == int(sos))[0]\n ieos = np.where(xnew == eos)[0]\n if sos is None:\n isos = 0\n sos = xnew[isos]\n if eos is None:\n ieos = len(xnew) - 1\n eos = xnew[ieos]\n except ValueError:\n sos = np.nan\n isos = np.nan\n eos = np.nan\n ieos = np.nan\n except TypeError:\n sos = np.nan\n isos = np.nan\n eos = np.nan\n ieos = np.nan\n\n # los: length of season\n try:\n los = eos - sos\n if los < 0:\n los[los < 0] = len(phen) + \\\n (eos[los < 0] - sos[los < 0])\n except ValueError:\n los = np.nan\n except TypeError:\n los = np.nan\n\n # get MSP, MAU (independent from SOS and EOS)\n # mean spring\n try:\n idx = np.mean(xnew[(xnew > sos) & (xnew < pos[0])])\n idx = (np.abs(xnew - idx)).argmin() # indexing value\n msp = xnew[idx] # DOY of MGS\n vmsp = phen[idx] # mgs value\n\n except ValueError:\n msp = np.nan\n vmsp = np.nan\n except TypeError:\n msp = np.nan\n vmsp = np.nan\n # mean autum\n try:\n idx = np.mean(xnew[(xnew < eos) & (xnew > pos[0])])\n idx = (np.abs(xnew - idx)).argmin() # indexing value\n mau = xnew[idx] # DOY of MGS\n vmau = phen[idx] # mgs value\n\n except ValueError:\n mau = np.nan\n vmau = np.nan\n except TypeError:\n mau = np.nan\n vmau = np.nan\n\n # doy of growing season\n try:\n green = xnew[(xnew > sos) & (xnew < eos)]\n id = []\n for i in range(len(green)):\n id.append((xnew == green[i]).nonzero()[0])\n # index of growing season\n id = np.array([item for sublist in id for item in sublist])\n except ValueError:\n id = np.nan\n except TypeError:\n id = np.nan\n\n # get intergral of green season\n try:\n ios = trapz(phen[id], xnew[id])\n except ValueError:\n ios = np.nan\n except TypeError:\n ios = np.nan\n\n # rate of greening [slope SOS-POS]\n try:\n rog = (vpos - phen[isos]) / (pos - sos)\n except ValueError:\n rog = np.nan\n except TypeError:\n rog = np.nan\n\n # rate of senescence [slope POS-EOS]\n try:\n ros = (phen[ieos] - vpos) / (eos - pos)\n except ValueError:\n ros = np.nan\n except TypeError:\n ros = np.nan\n\n # skewness of growing season\n try:\n sw = skew(phen[id])\n except ValueError:\n sw = np.nan\n except TypeError:\n sw = np.nan\n\n metrics = np.array((sos, pos[0], eos, phen[isos][0], vpos,\n phen[ieos][0], los, msp, mau, vmsp, vmau, ampl, ios, rog[0],\n ros[0], sw))\n\n return metrics\n\n except IndexError:\n return np.repeat(np.nan, num)\n except ValueError:\n return np.repeat(np.nan, num)\n except TypeError:\n return np.repeat(np.nan, num)", "def LSPmetrics(phen, xnew, nGS, num, phentype):\n inds = np.isnan(phen) # check if array has NaN values\n if inds.any(): # check is all values are NaN\n return np.repeat(np.nan, num)\n else:\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # basic variables\n vpos = np.max(phen)\n ipos = np.where(phen == vpos)[0]\n pos = xnew[ipos]\n trough = np.min(phen)\n ampl = vpos - trough\n\n # get position of seasonal peak and trough\n ipos = np.where(phen == vpos)[0]\n\n # scale annual time series to 0-1\n ratio = (phen - trough) / ampl\n\n # separate greening from senesence values\n dev = np.gradient(ratio) # first derivative\n greenup = np.zeros([ratio.shape[0]], dtype=bool)\n greenup[dev > 0] = True\n\n # select time where SOS and EOS are located (arround trs value)\n # KneeLocator looks for the inflection index in the curve\n try:\n with warnings.catch_warnings():\n # estimate SOS and EOS as median of the season\n i = np.median(xnew[:ipos[0]][greenup[:ipos[0]]])\n ii = np.median(xnew[ipos[0]:][~greenup[ipos[0]:]])\n sos = xnew[(np.abs(xnew - i)).argmin()]\n eos = xnew[(np.abs(xnew - ii)).argmin()]\n isos = np.where(xnew == int(sos))[0]\n ieos = np.where(xnew == eos)[0]\n if sos is None:\n isos = 0\n sos = xnew[isos]\n if eos is None:\n ieos = len(xnew) - 1\n eos = xnew[ieos]\n except ValueError:\n sos = np.nan\n isos = np.nan\n eos = np.nan\n ieos = np.nan\n except TypeError:\n sos = np.nan\n isos = np.nan\n eos = np.nan\n ieos = np.nan\n\n # los: length of season\n try:\n los = eos - sos\n if los < 0:\n los[los < 0] = len(phen) + \\\n (eos[los < 0] - sos[los < 0])\n except ValueError:\n los = np.nan\n except TypeError:\n los = np.nan\n\n # get MSP, MAU (independent from SOS and EOS)\n # mean spring\n try:\n idx = np.mean(xnew[(xnew > sos) & (xnew < pos[0])])\n idx = (np.abs(xnew - idx)).argmin() # indexing value\n msp = xnew[idx] # DOY of MGS\n vmsp = phen[idx] # mgs value\n\n except ValueError:\n msp = np.nan\n vmsp = np.nan\n except TypeError:\n msp = np.nan\n vmsp = np.nan\n # mean autum\n try:\n idx = np.mean(xnew[(xnew < eos) & (xnew > pos[0])])\n idx = (np.abs(xnew - idx)).argmin() # indexing value\n mau = xnew[idx] # DOY of MGS\n vmau = phen[idx] # mgs value\n\n except ValueError:\n mau = np.nan\n vmau = np.nan\n except TypeError:\n mau = np.nan\n vmau = np.nan\n\n # doy of growing season\n try:\n green = xnew[(xnew > sos) & (xnew < eos)]\n id = []\n for i in range(len(green)):\n id.append((xnew == green[i]).nonzero()[0])\n # index of growing season\n id = np.array([item for sublist in id for item in sublist])\n except ValueError:\n id = np.nan\n except TypeError:\n id = np.nan\n\n # get intergral of green season\n try:\n ios = trapz(phen[id], xnew[id])\n except ValueError:\n ios = np.nan\n except TypeError:\n ios = np.nan\n\n # rate of greening [slope SOS-POS]\n try:\n rog = (vpos - phen[isos]) / (pos - sos)\n except ValueError:\n rog = np.nan\n except TypeError:\n rog = np.nan\n\n # rate of senescence [slope POS-EOS]\n try:\n ros = (phen[ieos] - vpos) / (eos - pos)\n except ValueError:\n ros = np.nan\n except TypeError:\n ros = np.nan\n\n # skewness of growing season\n try:\n sw = skew(phen[id])\n except ValueError:\n sw = np.nan\n except TypeError:\n sw = np.nan\n\n metrics = np.array((sos, pos[0], eos, phen[isos][0], vpos,\n phen[ieos][0], los, msp, mau, vmsp, vmau, ampl, ios, rog[0],\n ros[0], sw))\n\n return metrics\n\n except IndexError:\n return np.repeat(np.nan, num)\n except ValueError:\n return np.repeat(np.nan, num)\n except TypeError:\n return np.repeat(np.nan, num)", "def schrage(data):\n Pi = []\n G = []\n N = data.copy()\n t = min(N)[0]\n start = timer()\n while len(G) != 0 or len(N) != 0:\n while len(N) != 0 and Schrage.save_min(N) <= t:\n e = min(N, key=lambda x: x[0])\n G.append(e)\n N.remove(e)\n\n if len(G) != 0:\n e = max(G, key=lambda x: x[2])\n G.remove(e)\n Pi.append(e)\n t = t + e[1]\n else:\n t = min(N, key=lambda x: x[0])[0]\n end = timer()\n executionTime = end - start\n return Pi, executionTime", "def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')", "def f(x,time):\n ret = []\n for i in xrange(n):\n xd = s[i] - gamma[i]*x[i] + \\\n sum([abs(beta[i,j])*hill(x[j],beta[i,j],theta[0]) \\\n for j in xrange(n)])\n ret.append(xd)\n return ret", "def calc_calories(gpx_track, wt = 175, activity='Run'):", "def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time", "def get_interval_from_minflow(self, wide=False):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"start\"],\n # self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"destin\"],\n # self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.source(),\n # x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.sink(),\n # x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(s_prime, v, int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(v, t_prime, int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # s_prime,\n # x,\n # int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # x,\n # t_prime,\n # int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i],\n capacities[i],\n unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n # print('Minimum cost:', min_cost_flow.OptimalCost())\n # print('')\n # print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n # cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n # print('%1s -> %1s %3s / %3s %3s' % (\n # min_cost_flow.Tail(i),\n # min_cost_flow.Head(i),\n # min_cost_flow.Flow(i),\n # min_cost_flow.Capacity(i),\n # cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n # print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n # print(\"Has become ({}, {}) with sup {}\".format(start,\n # destin,\n # sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in\n self.arc_info[arc].keys()):\n # print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n bound_1 = old_flow + sup_flow\n bound_2 = old_flow - sup_flow\n new_lb = max(0, int(min(bound_1, bound_2)))\n new_ub = int(max(bound_1, bound_2))\n if wide:\n if new_lb == new_ub:\n # print(\"We had a zero interval\")\n new_lb = int(new_lb*0.8)\n new_ub = int(new_ub*1.2)\n if new_lb == 0:\n # print(\"We got a zero lower bound\")\n new_ub = 5\n # print(\"But now we're doing {} {}\".\n # format(new_lb, new_ub))\n\n self.arc_info[arc][\"lower_bound\"] = new_lb\n self.arc_info[arc][\"upper_bound\"] = new_ub\n # print(\"Edge ({},{}) bounds are [{},{}]\".format(\n # start,\n # destin,\n # self.arc_info[arc][\"lower_bound\"],\n # self.arc_info[arc][\"upper_bound\"]))\n # print(self.arc_info[arc])\n else:\n print('There was an issue with the min cost flow input.')\n # self.check_conservation_of_flow() # check that solution is valid", "def simulate_hawkes_time_increment(runtime,\n dt=0.01,\n lbd=functions.kernel_zhao,\n p=functions.infectious_rate_tweets,\n start_time=0,\n int_fol_cnt=10000,\n follower_mean=200,\n split=0.02):\n events = [(0, int_fol_cnt)]\n lambda_t = [p(0) * int_fol_cnt * lbd(0)]\n memory_effect_t = [int_fol_cnt * lbd(0)]\n n = round(runtime / dt) # number of intervals\n\n for i in range(1, n):\n cur_interval = i * dt\n x = rand.uniform()\n memory_effect = sum([\n fol_cnt * lbd(cur_interval - event_time)\n for event_time, fol_cnt in events\n ])\n llambda = p(cur_interval + start_time\n ) * memory_effect * dt # intensity for current interval\n\n lambda_t.append(llambda)\n memory_effect_t.append(memory_effect)\n\n if x < llambda: # event occurred\n events.append((int(cur_interval),\n rand_followers_extended(int_fol_cnt, follower_mean,\n split)))\n return events, lambda_t, memory_effect_t", "def compute_CTMC_flow_in_logspace(adj_mat, time_resolution=.1,\n n_timepoints=10):\n\n n = len(adj_mat)\n\n # compute infinitesimal generator\n degrees = np.sum(adj_mat, axis=0) * 1.\n D = scipy.sparse.csr_matrix(np.diag(degrees))\n\n generator_mat = compute_infinitesimal_generator(adj_mat)\n\n # compute 2nd-order approximation to e^(time_resolution * generator_mat)\n tmp = time_resolution * generator_mat\n U = np.eye(n) + tmp + np.linalg.matrix_power(\n tmp, 2) / 2.\n\n print \"Bridge passed ..\"\n # Techinique: U^2k = (U^k)^2\n for j in xrange(n_timepoints):\n # compute flow matrix at time t = time_resolution * 2 ^j\n V = D.dot(U)\n\n # update U\n U = np.linalg.matrix_power(U, 2)\n\n # yield compouted flow matrix\n yield V", "def levin(x):\n summ = 0\n for t, l in x: # for the time and length of each algorithm\n summ += l + np.log(t)\n return summ", "def schrage_nlogn(data):\n N = data.copy()\n for i in range(len(data)):\n N[i] = (N[i][0], N[i])\n heapq.heapify(N)\n \"\"\"\"\n mozna to zaltwic przy wczytaniu danych nie wplywa na zloznosc samego algorytmu\n \n N to tablica tablica krotek takich że (r , [r, p,q]), (r1, [r1 ,p1 , q1]) ........\n heapq sortuje po pierwszym elemncie dlatego tak\n \n G analogicznie z tym że sortowane jest malejaco po q więc G = [(q, [r, p ,q ]), (q1, [r1, p1, q1]) .......... ] \n \"\"\"\n G = []\n Pi = []\n t = N[0][0]\n start = timer()\n while len(G) != 0 or len(N) != 0:\n while len(N) != 0 and Schrage.save_min(N) <= t:\n e = heapq.heappop(N)\n heapq.heappush(G, (-e[1][2], e[1])) # O(log n)\n if len(G) != 0:\n e = heapq.heappop(G) # O(log n)\n Pi.append(e[1]) # O(1)\n t = t + e[1][1]\n else:\n t = N[0][0] # O(1)\n end = timer()\n executionTime = end - start\n return Pi, executionTime", "def fit_lorentzian(comp_key,p0,time_step,conn,func = fitting.fun_lorentzian,fig=None,wind=3):\n\n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",(comp_key,)).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp_key)]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n j = int(time_step/dtime)\n count = g[fd('step',j)]['y']['disp_count'][:]\n count += g[fd('step',j)]['x']['disp_count'][:]\n edges = g[fd('step',j)]['y']['disp_edges'][:]\n\n \n edges = np.array([np.mean(edges[j:j+wind]) for j in range(1,len(count)-wind,wind)])\n count = np.array([np.sum(count[j:j+wind]) for j in range(1,len(count)-wind,wind)])\n \n \n edges = edges[count>30]\n count = count[count>30]\n \n\n out = fitting.fit_curve(edges,count,p0,func)\n fig = fitting.display_fit(edges,count,out.beta,func,fig)\n print out.beta\n return out,fig", "def _logistic_uncertainty(\n prophet_model: Prophet,\n mat: np.ndarray,\n deltas: np.ndarray,\n k: float,\n m: float,\n cap: np.ndarray,\n t_time: np.ndarray,\n n_length: int,\n single_diff: float,\n) -> np.ndarray:\n\n def _ffill(arr: np.ndarray) -> np.ndarray:\n mask = arr == 0\n idx = np.where(~mask, np.arange(mask.shape[1]), 0)\n np.maximum.accumulate(idx, axis=1, out=idx)\n return arr[np.arange(idx.shape[0])[:, None], idx]\n\n # for logistic growth we need to evaluate the trend all the way from the start of the train item\n historical_mat, historical_time = _make_historical_mat_time(\n deltas, prophet_model.changepoints_t, len(mat), single_diff\n )\n mat = np.concatenate([historical_mat, mat], axis=1)\n full_t_time = np.concatenate([historical_time, t_time])\n\n # apply logistic growth logic on the slope changes\n k_cum = np.concatenate(\n (np.ones((mat.shape[0], 1)) * k, np.where(mat, np.cumsum(mat, axis=1) + k, 0)),\n axis=1,\n )\n k_cum_b = _ffill(k_cum)\n gammas = np.zeros_like(mat)\n for i in range(mat.shape[1]):\n x = full_t_time[i] - m - np.sum(gammas[:, :i], axis=1)\n ks = 1 - k_cum_b[:, i] / k_cum_b[:, i + 1]\n gammas[:, i] = x * ks\n # the data before the -n_length is the historical values, which are not needed, so cut the last n_length\n k_t = (mat.cumsum(axis=1) + k)[:, -n_length:]\n m_t = (gammas.cumsum(axis=1) + m)[:, -n_length:]\n sample_trends = cap / (1 + np.exp(-k_t * (t_time - m_t)))\n # remove the mean because we only need width of the uncertainty centered around 0\n # we will add the width to the main forecast - yhat (which is the mean) - later\n return sample_trends - sample_trends.mean(axis=0)", "def set_tlines(ty,slist):\r\n t = []\r\n for i in range(numpops-1):\r\n t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]\r\n ty = []\r\n if gv[\"localyscale\"] == -1:\r\n yint = gv[\"line0y\"] - gv[\"lastt_lower_y\"]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n if gv[\"eventimes\"] == False:\r\n tmax = slist[5][4][numpops-2][3] ## bottom of confidence interval of largest(oldest) t\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j]*yint)/tmax)\r\n else:\r\n## ty[i].append(gv[\"line0y\"] - ((i+1)/float(numpops+1)*yint)/tmax)\r\n ty[i].append(gv[\"line0y\"] - yint * (i+1)/float(numpops) )\r\n else:\r\n timeumean = slist[7][4][1]\r\n scaleumean = slist[7][4][2]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j] * (scaleumean/timeumean/1e6)* gv[\"localyscale\"]))\r\n if ty[i][j] < gv[\"lineINFy\"]:\r\n print ( \" time line too low in graph, reduce local y scale (-y value) \")\r\n gv[\"lastt_lower_y\"] = ty[numpops-2][2]\r\n## print \"ty : \",ty\r\n return ty", "def workflow(now, realtime):\n szx = 7000\n szy = 3500\n # Create the image data\n imgdata = np.zeros((szy, szx), 'u1')\n sts = now - datetime.timedelta(minutes=2)\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': now.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'product': 'a2m',\n 'units': '0.02 mm'}\n\n gribfn = mrms.fetch('PrecipRate', now)\n if gribfn is None:\n print((\"mrms_rainrate_comp.py NODATA for PrecipRate: %s\"\n ) % (now.strftime(\"%Y-%m-%dT%H:%MZ\"),))\n return\n\n # http://www.nssl.noaa.gov/projects/mrms/operational/tables.php\n # Says units are mm/hr\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n os.unlink(gribfn)\n\n val = grb['values']\n # Convert into units of 0.1 mm accumulation\n val = val / 60.0 * 2.0 * 50.0\n val = np.where(val < 0., 255., val)\n imgdata[:, :] = np.flipud(val.astype('int'))\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n\n # Create Image\n png = Image.fromarray(np.flipud(imgdata))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n routes = \"c\" if realtime else \"\"\n prefix = 'a2m'\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.wld GIS/mrms/%s_%s.wld wld' %s.wld\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.png GIS/mrms/%s_%s.png png' %s.png\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n if realtime:\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/%s.tif GIS/mrms/%s_%s.tif tif' %s.tif\"\n \"\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/%s.json GIS/mrms/%s_%s.json json' \"\n \"%s.json\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n if os.path.isfile(\"%s.%s\" % (tmpfn, suffix)):\n os.unlink('%s.%s' % (tmpfn, suffix))\n\n os.close(tmpfp)\n os.unlink(tmpfn)", "def greedy(self, state, timestep, epsilon=0):\n\n counts = np.bincount(self.call_locs, minlength=self.num_nodes)\n # print(self.lengths)\n # print(counts)\n score = self.lengths @ counts\n action = []\n for _ in range(self.num_ambulance):\n node = np.argmin(score)\n action.append(node)\n score[node] = 99999999\n return action", "def Montecarlo(h,porcentaje,nombre):\n tm = time()\n print('Se esta procesando el '+str(porcentaje)+'% de particulas')\n print('\\n')\n redor = Quitar(porcentaje,redhex)\n Guardar_archivo(redor,nombre + 'original')\n \n redopt = Optim(redor,h,1)\n fin = datetime.now()\n Guardar_archivo(redopt,nombre +'optim')\n print('Calculado para '+str(len(redor))+' particulas')\n Dt2 = time() -tm\n print('en un tiempo de '+str(Dt2)+'s. \\n')\n print(fin)\n return()", "def RecursiveLowPassFast(signal, coeff, self):\n # Creates running mean value of the input\n ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) \n # Plot Running threshold value at the current plot\n self.p1.plot(self.t, ml, pen=pg.mkPen(color=(246, 178, 255), width=3))\n\n # Creates running square deviation from the mean\n vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml))\n # Creates \"threshold line\". If current value < sl[i] -> i belongs to event. \n sl = ml - coeff['S'] * np.sqrt(vl)\n self.p1.plot(self.t, sl, pen=pg.mkPen(color=(173, 27, 183), width=3))\n # Finds the length of the initial signal\n Ni = len(signal)\n # Finds those points where signal less than \"threshold line\"\n points = np.array(np.where(signal<=sl)[0])\n to_pop=np.array([]) # Empty supplementary array for finding adjacent points \n # For loop for finding adjacent points \n for i in range(1,len(points)):\n if points[i] - points[i - 1] == 1:\n to_pop=np.append(to_pop, i)\n # Points contain only border points of events\n points = np.delete(points, to_pop)\n # Empty list for Event location storage\n RoughEventLocations = []\n NumberOfEvents=0 #Number of events\n\n # For Loop for finding separating edges of different events and satisfying Event length limits\n for i in points:\n if NumberOfEvents is not 0:\n if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]:\n continue\n NumberOfEvents += 1\n start = i\n El = ml[i] - coeff['E'] * np.sqrt(vl[i])\n Mm = ml[i]\n Vv = vl[i]\n duration = 0\n while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['eventlengthLimit']:\n duration += 1\n i += 1\n if duration >= coeff['eventlengthLimit'] or i > (Ni - 10):\n NumberOfEvents -= 1\n else:\n k = start\n while signal[k] < Mm and k > 1:\n k -= 1\n start = k - 1\n k2 = i + 1\n while signal[k2] > Mm:\n k2 -= 1\n endp = k2\n if start<0:\n start=0\n RoughEventLocations.append((start, endp, ml[start], vl[start]))\n\n return np.array(RoughEventLocations)", "def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def objective(trial):\n %time\n env = gym.make('Delivery-v0')\n alpha = trial.suggest_discrete_uniform('alpha', 0.3,0.9,0.3)\n gamma = trial.suggest_discrete_uniform('gamma', 0.6, 1,0.1)\n epsilon = trial.suggest_discrete_uniform('epsilon', 0.01, 0.11, 0.04)\n episodes = 1000000\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n last_reward = np.mean(rewards)\n # trial.report(-1 * last_reward)\n\n return -1 * last_reward", "def heinon_heiles(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n\n def hamiltonian_fn(coords):\n x, y, px, py = np.split(coords, 4)\n lambda_ = 1\n H = 0.5 * px ** 2 + 0.5 * py ** 2 + 0.5 * (x ** 2 + y ** 2) + lambda_ * (\n (x ** 2) * y - (y ** 3) / 3)\n return H\n\n def dynamics_fn(t, coords):\n dcoords = autograd.grad(hamiltonian_fn)(coords)\n dxdt, dydt, dpxdt, dpydt = np.split(dcoords, 4)\n S = np.concatenate([dpxdt, dpydt, -dxdt, -dydt], axis=-1)\n return S\n\n def get_trajectory(t_span=[0, 3], timescale=0.01, ssr=sub_sample_rate, radius=None, y0=None, noise_std=0.1,\n **kwargs):\n\n # get initial state\n x = np.random.uniform(-0.5, 0.5)\n y = np.random.uniform(-0.5, 0.5)\n px = np.random.uniform(-.5, .5)\n py = np.random.uniform(-.5, .5)\n\n y0 = np.array([x, y, px, py])\n\n spring_ivp = rk(lambda t, y: dynamics_fn(t, y), t_span, y0,\n t_eval=np.arange(0, t_span[1], timescale),\n rtol=1e-12, atol=1e-12, method='DOP853')\n accum = spring_ivp.y.T\n ssr = int(ssr / timescale)\n accum = accum[::ssr]\n daccum = [dynamics_fn(None, accum[i]) for i in range(accum.shape[0])]\n energies = []\n for i in range(accum.shape[0]):\n energies.append(np.sum(hamiltonian_fn(accum[i])))\n\n return accum, np.array(daccum), energies\n\n def get_dataset(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, seed=seed, test_split=0.5,\n **kwargs):\n data = {'meta': locals()}\n\n # randomly sample inputs\n np.random.seed(seed)\n data = {}\n ssr = int(sub_sample_rate / dt)\n\n xs, dxs, energies, ks, ms = [], [], [], [], []\n for s in range(num_trajectories):\n x, dx, energy = get_trajectory(t_span=[0, T_max], timescale=dt, ssr=sub_sample_rate)\n\n x += np.random.randn(*x.shape) * noise_std\n dx += np.random.randn(*dx.shape) * noise_std\n\n xs.append(x)\n dxs.append(dx)\n energies.append(energy)\n ks.append([1])\n ms.append([1])\n\n data['x'] = np.concatenate(xs)\n data['dx'] = np.concatenate(dxs)\n data['energy'] = np.concatenate(energies)\n data['ks'] = np.concatenate(ks)\n data['mass'] = np.concatenate(ms)\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(data, f)\n f.close()\n\n return data\n\n return get_dataset(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate)", "def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def time_window_growth_rate(self, t0, tend):\n return self.time_window_eigs(t0, tend).real/self.original_time['dt']", "def power_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n\n ]", "def lastTick():", "def overheads(NPT, DIT, NDIT):\n ov = 360. + 120. + NPT*NDIT*(DIT + 80. + 15.)\n print 'Telescope time in h = ', ov/3600.", "def generate_linear_trace(self, min_queries, min_duration, qps):\n timestamp = 0\n arrival = []\n timestep = 1 / qps\n while timestamp < min_duration and len(arrival) < min_queries:\n timestamp += timestep\n arrival.append(timestep)\n self.arrival = arrival", "def sim_an_lin(neighborhood, Tmax, Tmin, iterations):\n\n temp = Tmax\n\n # set iteration number to 0\n n = 0\n\n plot_list = []\n\n current_costs = neighborhood.get_total_costs()\n\n cooling_rate = (Tmax-Tmin) / iterations\n\n\n while (n < iterations):\n\n # adjust temperature according to exponential cooling scheme\n temp = Tmax - (n * cooling_rate)\n\n\n swap_succes = False\n while not swap_succes:\n cable_1 = random.choice(neighborhood.cables)\n cable_2 = random.choice(neighborhood.cables)\n swap_succes = neighborhood.swap_connection(cable_1, cable_2)\n\n new_costs = neighborhood.get_total_costs()\n if (acceptance_probability(current_costs, new_costs, temp) > random.random()):\n current_costs = new_costs\n else:\n cable_1 = neighborhood.cables[-1]\n cable_2 = neighborhood.cables[-2]\n neighborhood.swap_connection(cable_1, cable_2)\n\n plot_list.append(current_costs)\n n += 1", "def curve_of_growth(b, Nmin=12., Nmax=19, num=100):\n N = np.logspace(Nmin, Nmax, num)\n W = np.zeros_like(N)\n\n b *= 1.e5\n\n # Define line parameters, they are not important.\n l0 = 2600.17\n f = 0.242\n gam = 272300000.\n\n dl = 0.01\n l = np.arange(l0-10, l0+10, dl)\n\n for i, N_i in enumerate(N):\n profile = Voigt(l, l0, f, N_i, b, gam)\n I = np.exp(-profile)\n W[i] = np.sum(1.-I)*dl\n\n tau_0 = N*f*l0\n return (tau_0, W/l0)", "def RankineHugoniot(P):\n # prevP = P+1\n # while(abs(P-prevP)>tol):\n # prevP = P\n Pr = P/rbc[0]\n temp = 1/beta*(cR/cL)*(Pr-1)\n temp = temp/np.sqrt(1+(gamma+1)/(2*gamma)*(Pr-1))\n temp = (1-temp)**beta\n # return P\n return temp*lbc[0]-P", "def frontier_list(self,image):\r\n\r\n pos_0,pos_1,n=min(self.frontier,key=self.frontier.get)\r\n theta=n\r\n #n=int(n)\r\n #theta=n*self.theta_diff\r\n\r\n self.current_score=[pos_0,pos_1,n]\r\n \r\n cost=self.frontier[pos_0,pos_1,n]\r\n \r\n del self.frontier[pos_0,pos_1,n]\r\n\r\n\r\n \r\n if self.image_p[floor(pos_0),floor(pos_1),n]==1 or np.array_equiv(image[self.maximum_size-pos_0,pos_1,:],np.array([00,00,0])) or self.image_p[floor(pos_0),floor(pos_1),n]==2 :\r\n\r\n\r\n return self.frontier_list(image)\r\n \r\n else:\r\n score=self.string(pos_0,pos_1,n)\r\n \r\n \r\n\r\n \"\"\"if self.step_size==1:\r\n image[self.maximum_size-pos_0,pos_1,:]=200,200,0\r\n self.image_p[floor(pos_0),floor(pos_1),floor(theta)]=1\"\"\"\r\n if self.parent_orignal_data[score] is not None:\r\n parent=self.parent_orignal_data[score]\r\n self.parent_pos=self.data_with_string[parent]\r\n self.image_p[floor(pos_0),floor(pos_1),floor(theta)]=1\r\n cv2.line(image,(pos_1,self.maximum_size-pos_0),(self.parent_pos[1],self.maximum_size-self.parent_pos[0]),(200,200,0),1) \r\n \r\n \r\n \r\n\r\n image=image.astype(np.uint8)\r\n \r\n return pos_0,pos_1,n", "def triathlon(hours, metric):\n weight_loss = float((1/3500)*(200*hours[0] + 475*hours[1] + 275*hours[2] ))\n if (metric == True):\n print(weight_loss * 0.45359237)\n elif (metric == False):\n print(weight_loss)", "def lapse(self):\n pass", "def mc_glie(env, iterations=1000, gamma=0.9):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n n_visits = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA # initially all actions are equally likely\n epsilon = 1\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n start = time.time() # to time how long convergence takes\n print(\"---Monte Carlo First Visit---\\nTraining Started.\")\n # policy = epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon)\n k = 1\n while k < iterations:\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n \n Q_value, n_visits = mc_policy_evaluation(env, policy, Q_value, n_visits, gamma=0.9) # evaluate using Monte Carlo First Visit\n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"n_visits = {0}\".format(n_visits))\n k += 1\n epsilon = 1/k # update epsilon\n policy = epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon) # Improve policy using epsilon-greedy\n\n # print(\"Policy = {0}\".format(policy))\n # print(\"---\")\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n\n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def aestrella(inicio,obj):\n nodos_abiertos=[inicio]\n nodos_cerrados=[]\n lista1=[]\n for cel in nodos_abiertos:\n lista1.append(cel.costo)\n m=min(lista1)\n for j in nodos_abiertos:\n j.set_gscore(g(inicio,j))\n j.set_hscore(h(j,obj))\n j.set_fscore(f(inicio,obj))\n if j.fscore==m:\n if j==obj:\n print'terminado'\n nodos_cerrados.append(j)\n else:\n nodos_abiertos.append(j)\n for k in j.vecinos:\n if k in nodos_cerrados :\n gk=k.gscore\n gk1=k.get_gscore()\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n elif k in nodos_abiertos:\n gk=k.gscore\n gk1=k.get_gscore\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n \n else:\n nodos_abiertos.append(k)\n k.set_gscore()\n else:\n pass\n ruta=[] \n for u in nodos_cerrados:\n lnc=len(nodos_cerrados)\n for v in range(lnc):\n ruta.insert(v,nodos_cerrados[lnc-v])\n return ruta", "def step(self, delta_l11, delta_l12, delta_l13, delta_l21, delta_l22, delta_l23):\n self.l11 += delta_l11; self.l12 += delta_l12; self.l13 += delta_l13\n self.l21 += delta_l11; self.l22 += delta_l12; self.l23 += delta_l13\n self.l21 += delta_l21; self.l22 += delta_l22; self.l23 += delta_l23\n # check that all tendon lenghts are within limit\n self.l11 = self.l1min if self.l11 < self.l1min else self.l11\n self.l12 = self.l1min if self.l12 < self.l1min else self.l12\n self.l13 = self.l1min if self.l13 < self.l1min else self.l13\n self.l11 = self.l1max if self.l11 > self.l1max else self.l11\n self.l12 = self.l1max if self.l12 > self.l1max else self.l12\n self.l13 = self.l1max if self.l13 > self.l1max else self.l13\n self.l21 = self.l2min if self.l21 < self.l2min else self.l21\n self.l22 = self.l2min if self.l22 < self.l2min else self.l22\n self.l23 = self.l2min if self.l23 < self.l2min else self.l23\n self.l21 = self.l2max if self.l21 > self.l2max else self.l21\n self.l22 = self.l2max if self.l22 > self.l2max else self.l22\n self.l23 = self.l2max if self.l23 > self.l2max else self.l23\n old_tip_vec = self.tip_vec2 # used for potential reward\n self.update_variables()\n new_tip_vec = self.tip_vec2 # used for potential reward\n reward = self.r_static\n return reward", "def simula_atendimento(\n n: int,\n lambd: int,\n th: int,\n T: int,\n) -> List[float]:\n tr = 0 # instante da ultima requisicao de entrada\n t = np.zeros(n) # tempo de disponibilidade das linhas\n x, y = 0, 0 # aceitas e rejeitadas\n z = expovariate(lambd)\n\n while tr + z <= T:\n tr += z\n\n linha_disp = np.argmin(t)\n t_disp = t[linha_disp]\n\n if t_disp <= tr:\n t[linha_disp] = tr + th\n x += 1\n else:\n y += 1\n\n z = expovariate(lambd)\n\n return x, y, y/(x+y)", "def calculate_lorenz_curve(graph, type, steps=100):\n\tvertices = {}\n\ttotal_weight = 0.0\n\tfor node in graph.nodes():\n\t\tif graph.node[node]['type'] is type:\n\t\t\tvertices[node] = graph.node[node]['weight']\n\t\t\ttotal_weight += graph.node[node]['weight']\n\t\n\tprint 'Total Weight:', total_weight\n\n\tstep_size = len(vertices.keys()) / steps\n\t# Now sort vertices by their weights:\n\tsorted_vertex_keys = sorted(vertices, key=vertices.get, reverse=True)\n \t\n \ty = [0]*steps\n \ty_index = 0\n \tcum_sum = 0.0\n\tfor i in range(0, len(sorted_vertex_keys)):\n\t\tcum_sum += (vertices[sorted_vertex_keys[i]] / total_weight)\n \t\ty[y_index] = cum_sum\n\n \t\tif i % step_size == 0 and y_index<len(y)-1:\n \t\t\ty_index += 1\n \t\n \tx=[]\n \tx_sum=0.0\n \tfor i in range(0, steps):\n \t\tx_sum += (float(step_size)/len(vertices.keys()))\n \t\tx.append(x_sum)\n\n \treturn [x,y]", "def eady_growth_rate(data):\n N2 = ixr.brunt_vaisala(data)\n f = 2.0*omega*xruf.sin(xruf.deg2rad(data.lat))\n\n dz = ixr.domain.calculate_dz(data)\n du = ixr.domain.diff_pfull(data.ucomp, data)\n\n N = xruf.sqrt(N2.where(N2 > 0))\n\n egr = 0.31*du/dz*f/N\n return np.abs(egr)", "def estimate_tr_slot(br_data_df, fab_started_at, leeway, agent_df):\r\n a = br_data_df.loc[0, 'AVG(ca_op_time)']\r\n b = br_data_df.loc[0, 'AVG(tr_op_time)']\r\n # br_data_df.loc[0, 'AVG(ca_op_time)'] == 9:\r\n #if br_data_df.loc[0, 'AVG(tr_op_time)'] == 3.5: # if these 2 conditions are met, with high prob we are in first ever auction.\r\n #fab_started_at = datetime.datetime.now()\r\n #auction_total_time = 2 # auction estimated total time = 2 min\r\n # slot_1_start = datetime.datetime.now() + datetime.timedelta(minutes=int(auction_total_time)) - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) - leeway # time when on going fab started + mean ca processing time - mean tr operation time - margin.\r\n # slot_1_end = slot_1_start + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) # time when on going fab started + mean tr operation time\r\n # slot_2_start = ca_estimated_end + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) - leeway # time when on going fab started + mean ca processing time - margin\r\n # slot_2_end = slot_2_start + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) # time when on going fab started + mean ca processing time + mean tr operation time\r\n ca_estimated_end = fab_started_at + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) # time when on going fab started + mean ca processing time.\r\n if br_data_df.loc[0, 'AVG(ca_op_time)'] == 9:\r\n if br_data_df.loc[0, 'AVG(tr_op_time)'] == 3.5:\r\n slot_1_start = ca_estimated_end - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) - (leeway / 2)\r\n slot_1_end = ca_estimated_end + (leeway / 2)\r\n\r\n slot_1_start = ca_estimated_end - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)'])) - (leeway / 2)\r\n slot_1_end = ca_estimated_end + (leeway / 2)\r\n slot_2_start = ca_estimated_end + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) - datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)']/2)) - (leeway / 2)\r\n slot_2_end = ca_estimated_end + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(ca_op_time)'])) + datetime.timedelta(minutes=int(br_data_df.loc[0, 'AVG(tr_op_time)']/2)) + (leeway / 2) # time when on going fab started + mean ca processing time + mean tr operation time\r\n ca_to_tr_df = pd.DataFrame([], columns=['id', 'agent_type', 'location_1', 'location_2', 'location', 'purpose', 'request_type', 'action', 'time', 'slot_1_start', 'slot_1_end', 'slot_2_start', 'slot_2_end', 'slot'])\r\n ca_to_tr_df.at[0, 'id'] = agent_df.loc[0, 'id']\r\n ca_to_tr_df.at[0, 'agent_type'] = agent_df.loc[0, 'agent_type']\r\n ca_to_tr_df.at[0, 'location_1'] = agent_df.loc[0, 'location_1']\r\n ca_to_tr_df.at[0, 'location_2'] = agent_df.loc[0, 'location_2']\r\n ca_to_tr_df.at[0, 'location'] = agent_df.loc[0, 'location']\r\n ca_to_tr_df.at[0, 'purpose'] = \"request\"\r\n ca_to_tr_df.at[0, 'slot_1_start'] = slot_1_start\r\n ca_to_tr_df.at[0, 'slot_1_end'] = slot_1_end\r\n ca_to_tr_df.at[0, 'slot_2_start'] = slot_2_start\r\n ca_to_tr_df.at[0, 'slot_2_end'] = slot_2_end\r\n this_time = datetime.datetime.now()\r\n ca_to_tr_df.at[0, 'time'] = this_time\r\n ca_to_tr_df.at[0, 'request_type'] = \"request\"\r\n ca_to_tr_df.at[0, 'action'] = \"pre-book\"\r\n return ca_to_tr_df", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def trend_filter(y, lambd = 0, order = 3):\n \n # PARAMETERS\n alpha = 0.01 #backtracking linesearch parameter (0,0.5]\n beta = 0.5 # backtracking linesearch parameter (0,1)\n mu = 2 # IPM parameter: t update\n max_iter = 40 # IPM parameter: max iteration of IPM\n max_ls_iter = 20 # IPM parameter: max iteration of line search\n tol = 1e-4 # IPM parameter: tolerance\n\n itr = 0\n gap = 1\n\n # DIMENSIONS\n n = len(y) #length of signal x\n\n # OPERATOR MATRICES\n D = diff_mat(n,order)\n\n DDT = D * D.T\n Dy = D * y\n\n m = len(Dy)\n\n # VARIABLES\n z = np.zeros(m) # dual variable\n mu1 = np.ones(m) # dual of dual variable\n mu2 = np.ones(m) # dual of dual variable\n\n t = 1e-10; \n p_obj = float('inf')\n d_obj = 0\n step = float('inf')\n f1 = z - lambd\n f2 = - z - lambd\n print(f'Iteration Primal obj. Dual obj. Gap')\n print('\\n')\n \n #----------------------------------------------------------------------\n # MAIN LOOP\n #----------------------------------------------------------------------\n\n for iters in range(max_iter):\n\n DTz = (z.T * D).T\n DDTz = D * DTz\n\n w = Dy - (mu1 - mu2)\n\n # two ways to evaluate primal objective:\n # 1) using dual variable of dual problem\n # 2) using optimality condition \n #temp = lsqr(DDT, w)[0] Not comparable to backslash in matlab (unstable)\n #temp = nla.lstsq(DDT.todense(), w)[0] # numpy library (similar results as scipy)\n\n temp = sla.lstsq(DDT.todense(), w)[0] #may be an overkill but stable\n p_obj1 = 0.5 * np.dot(w,temp) + lambd * np.sum(mu1 + mu2)\n p_obj2 = 0.5 * np.dot(DTz.T, DTz) + lambd * np.sum(np.abs(Dy - DDTz))\n\n p_obj = np.min([p_obj1, p_obj2])\n d_obj = -0.5 * np.dot(DTz, DTz) + np.dot(Dy.T, z)\n\n gap = p_obj - d_obj\n\n print(\"{0:6d} {1:15.4e} {2:13.5e} {3:10.2e}\".format(iters, p_obj, d_obj, gap))\n #Check stopping criterion\n if gap <= tol:\n status = 'solved'\n print(status)\n x = y - D.T * z\n\n #return x\n break\n\n if step >= 0.2:\n t = np.max([2 * m * mu / gap, 1.2 * t])\n\n # CALCULATE NEWTON STEP\n\n rz = DDTz - w\n\n val = mu1 / f1 + mu2 / f2\n row = np.arange(m)\n col = np.arange(m)\n\n S = DDT - coo_matrix((val, (row, col)), shape = (m,m))\n r = - DDTz + Dy + ( 1 / t ) / f1 - ( 1 / t ) / f2\n\n dz = sla.lstsq(S.todense(), r)[0]\n dmu1 = - ( mu1 + ( (1 / t) + dz * mu1 ) / f1 )\n dmu2 = - ( mu2 + ( ( 1 / t ) - dz * mu2 ) / f2 )\n\n resDual = rz\n resCent = np.concatenate([- mu1 * f1 - 1 / t, - mu2 * f2 - 1 / t])\n residual= np.concatenate([resDual, resCent])\n\n # BACKTRACKING LINESEARCH\n negIdx1 = (dmu1 < 0)\n negIdx2 = (dmu2 < 0)\n step = 1\n\n if any(negIdx1):\n step = np.min( [step, 0.99 * np.min( - mu1[negIdx1] / dmu1[negIdx1] )])\n if any(negIdx2):\n step = np.min( [step, 0.99 * np.min( - mu2[negIdx2] / dmu2[negIdx2] )])\n\n for liter in range(max_ls_iter):\n\n newz = z + step * dz\n newmu1 = mu1 + step * dmu1\n newmu2 = mu2 + step * dmu2\n newf1 = newz - lambd\n newf2 = - newz - lambd\n\n # UPDATE RESIDUAL\n newResDual = DDT * newz - Dy + newmu1 - newmu2\n newResCent = np.concatenate([- newmu1 * newf1 - 1 / t, - newmu2 * newf2 - 1 / t])\n newResidual = np.concatenate([newResDual, newResCent])\n\n if ( np.max([np.max(newf1), np.max(newf2)]) < 0 ) and ( norm(newResidual) <= (1 - alpha * step) * norm(residual) ):\n break\n\n step = beta * step\n\n # UPDATE PRIMAL AND DUAL VARIABLES\n z = newz\n mu1 = newmu1\n mu2 = newmu2\n f1 = newf1\n f2 = newf2\n\n # The solution may be close at this point, but does not meet the stopping\n # criterion (in terms of duality gap).\n x = y - D.T * z\n if (iters >= max_iter):\n status = 'maxiter exceeded'\n print(status)\n \n return x", "def linear_schedule(progress):\n return 1 - progress", "def get_shocks(self):\r\n \r\n \r\n '''\r\n \r\n if self.jacW == True:\r\n \r\n if self.t_sim == self.s:\r\n \r\n self.wage = .833333 + self.dx\r\n \r\n print(\"made it here\")\r\n \r\n else:\r\n \r\n self.wage = .833333\r\n \r\n \r\n PermShkDstn_U = Lognormal(np.log(self.mu_u) - (self.L*(self.PermShkStd[0])**2)/2 , self.L*self.PermShkStd[0] , 123).approx(self.PermShkCount) #Permanent Shock Distribution faced when unemployed\r\n PermShkDstn_E = MeanOneLogNormal( self.PermShkStd[0] , 123).approx(self.PermShkCount) #Permanent Shock Distribution faced when employed\r\n \r\n TranShkDstn_E = MeanOneLogNormal( self.TranShkStd[0],123).approx(self.TranShkCount)#Transitory Shock Distribution faced when employed\r\n TranShkDstn_E.X = (TranShkDstn_E.X *(1-self.tax_rate)*self.wage*self.N)/(1-self.UnempPrb)**2 #add wage, tax rate and labor supply\r\n \r\n lng = len(TranShkDstn_E.X )\r\n TranShkDstn_U = DiscreteDistribution(np.ones(lng)/lng, self.IncUnemp*np.ones(lng)) #Transitory Shock Distribution faced when unemployed\r\n \r\n IncShkDstn_E = combine_indep_dstns(PermShkDstn_E, TranShkDstn_E) # Income Distribution faced when Employed\r\n IncShkDstn_U = combine_indep_dstns(PermShkDstn_U,TranShkDstn_U) # Income Distribution faced when Unemployed\r\n \r\n #Combine Outcomes of both distributions\r\n X_0 = np.concatenate((IncShkDstn_E.X[0],IncShkDstn_U.X[0]))\r\n X_1=np.concatenate((IncShkDstn_E.X[1],IncShkDstn_U.X[1]))\r\n X_I = [X_0,X_1] #discrete distribution takes in a list of arrays\r\n \r\n #Combine pmf Arrays\r\n pmf_I = np.concatenate(((1-self.UnempPrb)*IncShkDstn_E.pmf, self.UnempPrb*IncShkDstn_U.pmf))\r\n \r\n IncShkDstn = [DiscreteDistribution(pmf_I, X_I)]\r\n \r\n self.IncShkDstn = IncShkDstn\r\n \r\n \r\n '''\r\n \r\n PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays\r\n TranShkNow = np.zeros(self.AgentCount)\r\n newborn = self.t_age == 0\r\n for t in range(self.T_cycle):\r\n these = t == self.t_cycle\r\n N = np.sum(these)\r\n if N > 0:\r\n IncShkDstnNow = self.IncShkDstn[\r\n t - 1\r\n ] # set current income distribution\r\n PermGroFacNow = self.PermGroFac[t - 1] # and permanent growth factor\r\n # Get random draws of income shocks from the discrete distribution\r\n IncShks = IncShkDstnNow.draw(N)\r\n\r\n PermShkNow[these] = (\r\n IncShks[0, :] * PermGroFacNow\r\n ) # permanent \"shock\" includes expected growth\r\n TranShkNow[these] = IncShks[1, :]\r\n \r\n # That procedure used the *last* period in the sequence for newborns, but that's not right\r\n # Redraw shocks for newborns, using the *first* period in the sequence. Approximation.\r\n N = np.sum(newborn)\r\n if N > 0:\r\n these = newborn\r\n IncShkDstnNow = self.IncShkDstn[0] # set current income distribution\r\n PermGroFacNow = self.PermGroFac[0] # and permanent growth factor\r\n\r\n # Get random draws of income shocks from the discrete distribution\r\n EventDraws = IncShkDstnNow.draw_events(N)\r\n PermShkNow[these] = (\r\n IncShkDstnNow.X[0][EventDraws] * PermGroFacNow\r\n ) # permanent \"shock\" includes expected growth\r\n TranShkNow[these] = IncShkDstnNow.X[1][EventDraws]\r\n # PermShkNow[newborn] = 1.0\r\n TranShkNow[newborn] = 1.0\r\n\r\n # Store the shocks in self\r\n self.EmpNow = np.ones(self.AgentCount, dtype=bool)\r\n self.EmpNow[TranShkNow == self.IncUnemp] = False\r\n self.shocks['PermShk'] = PermShkNow\r\n self.shocks['TranShk'] = TranShkNow", "def minutes_of_new_data(symbol, kline_size, data, source, client):\n if len(data) > 0:\n old = parser.parse(data[\"timestamp\"].iloc[-1])\n elif source == \"binance\":\n old = datetime.strptime('1 Jan 2017', '%d %b %Y')\n elif source == \"bitmex\":\n old = client.Trade.Trade_getBucketed(symbol=symbol, binSize=kline_size, count=1, reverse=False).result()[0][0][\n 'timestamp'] \n if source == \"binance\": new = pd.to_datetime(client.get_klines(symbol=symbol, interval=kline_size)[-1][0],\n unit='ms')\n if source == \"bitmex\": new = \\\n client.Trade.Trade_getBucketed(symbol=symbol, binSize=kline_size, count=1, reverse=True).result()[0][0]['timestamp']\n return old, new", "def pretty(self,x):\n\n\t\t#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience\n\t\tT = list([0]*(self.__n_legs))\n\n\t\tfor i in xrange(self.__n_legs):\n\t\t\tT[i] = (x[4+4*i]/sum(x[4::4]))*x[3]\n\n\t\t#2 - We compute the epochs and ephemerides of the planetary encounters\n\t\tt_P = list([None] * (self.__n_legs))\n\t\tr_P = list([None] * (self.__n_legs))\n\t\tv_P = list([None] * (self.__n_legs))\n\t\tDV = list([None] * (self.__n_legs))\n\t\tclose_d = list([None] * (self.__n_legs))\n\t\t\n\t\tfor i,planet in enumerate(self.seq):\n\t\t\tt_P[i] = epoch(x[0]+sum(T[:i+1]))\n\t\t\tr_P[i],v_P[i] = self.seq[i].eph(t_P[i])\n\n\t\t#3 - We start with the first leg: a lambert arc\n\t\ttheta = 2*pi*x[1]\n\t\tphi = acos(2*x[2]-1)-pi/2\n\t\tr = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection\n\t\tr = [JR*1000*d for d in r]\n\t\t\n\t\tl = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)\n\n\t\t#Lambert arc to reach seq[1]\n\t\tv_end_l = l.get_v2()[0]\n\t\tv_beg_l = l.get_v1()[0]\n\t\tclose_d[0] = closest_distance(r,v_beg_l, r_P[0], v_end_l, self.common_mu)[0] / JR\n\n\t\t#First DSM occuring at the very beginning (will be cancelled by the optimizer)\n\t\tDV[0] = abs(norm(v_beg_l) - 3400)\n\n\t\tprint \"\\nFirst Leg: 1000JR to \" + self.seq[0].name \n\t\tprint \"\\tDeparture: \" + str(t_P[0]) + \" (\" + str(t_P[0].mjd2000) + \" mjd2000) \" \n\t\tprint \"\\tDuration: \" + str(T[0]) + \"days\"\n\t\tprint \"\\tInitial Velocity Increment (m/s): \" + str(DV[0])\n\t\tprint \"\\tArrival relative velocity at \" + self.seq[0].name +\" (m/s): \" + str(norm([a-b for a,b in zip(v_end_l,v_P[0])]))\n\t\tprint \"\\tClosest approach distance: \" + str(close_d[0])\n\n\t\t#4 - And we proceed with each successive leg\n\t\tfor i in xrange(1,self.__n_legs):\n\t\t\t#Fly-by \n\n\t\t\tv_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)\n\t\t\t#s/c propagation before the DSM\n\t\t\tr,v = propagate_lagrangian(r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu)\n\t\t\ttmp, ra = closest_distance(r_P[i-1],v_out, r,v, self.common_mu)\n\t\t\t#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)\n\t\t\tdt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC\n\t\t\tl = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)\n\t\t\tv_end_l = l.get_v2()[0]\n\t\t\tv_beg_l = l.get_v1()[0]\n\t\t\ttmp2, ra2 = closest_distance(r,v_beg_l, r_P[i], v_end_l, self.common_mu)\n\t\t\tif tmp < tmp2:\n\t\t\t\tclose_d[i] = tmp/JR\n\t\t\t\tra = ra/JR\n\t\t\telse:\n\t\t\t\tclose_d[i] = tmp2/JR\n\t\t\t\tra = ra2/JR\n\t\t\t#DSM occuring at time nu2*T2\n\t\t\tDV[i] = norm([a-b for a,b in zip(v_beg_l,v)])\n\n\t\t\tprint \"\\nleg no. \" + str(i+1) + \": \" + self.seq[i-1].name + \" to \" + self.seq[i].name \n\t\t\tprint \"\\tDuration (days): \" + str(T[i])\n\t\t\tprint \"\\tFly-by epoch: \" + str(t_P[i]) + \" (\" + str(t_P[i].mjd2000) + \" mjd2000) \" \n\t\t\tprint \"\\tFly-by altitude (km): \" + str((x[6+(i-1)*4]*self.seq[i-1].radius-self.seq[i-1].radius)/1000)\n\t\t\tprint \"\\tDSM after (days): \" + str(x[7+(i-1)*4]*T[i])\n\t\t\tprint \"\\tDSM magnitude (m/s): \" + str(DV[i]) \n\t\t\tprint \"\\tClosest approach distance: \" + str(close_d[i])\n\t\t\tprint \"\\tApoapsis at closest distance: \" + str(ra)\n\t\t\tprint \"\\tV in (m/s): \" + str(v_end_l)\n\t\t\tprint \"\\tV out (m/s): \" + str(v_out)\n\t\t\n\t\t\n\t\tprint \"\\nArrival at \" + self.seq[-1].name\n\t\tvel_inf = [a-b for a,b in zip(v_end_l,v_P[-1])]\n\t\tprint \"Arrival epoch: \" + str(t_P[-1]) + \" (\" + str(t_P[-1].mjd2000) + \" mjd2000) \" \n\t\tprint \"Arrival Vinf (m/s): \" + vel_inf.__repr__() + \" - \" + str(norm(vel_inf))\n\t\tprint \"Total mission time (days): \" + str(sum(T))\n\t\tprint \"Total DV (m/s): \" + str(sum(DV))", "def test_expected_growth(self):\r\n\r\n graph = nx.lollipop_graph(4, 1)\r\n graph.add_edge(4, 2)\r\n\r\n c = [3, 4]\r\n result = clique.search(c, graph, iterations=100)\r\n assert result == [0, 1, 2, 3]", "def test_suite():\n test(growthrate(1,1,1) == 0)\n test(growthrate(1000,500,10) == 50)\n test(growthrate(50,100,1) == -50)\n test(growthrate(50,50,1) == 0)\n test(growthrate(200,100,1) == 100)\n test(growthrate(300,200,1) == 100)\n test(growthrate(300,200,2) == 50)\n test(growthrate(80,40,2) == 20)\n test(growthrate(100,300,-2) == 100)\n test(growthrate(500,100,-1) == -400)", "def horde_start(self, observation):", "def gpt2_1w (station, dmjd,dlat,dlon,hell,it):\n\n# need to find diffpod and difflon\n if (dlon < 0):\n plon = (dlon + 2*np.pi)*180/np.pi;\n else:\n plon = dlon*180/np.pi;\n# transform to polar distance in degrees\n ppod = (-dlat + np.pi/2)*180/np.pi; \n\n# % find the index (line in the grid file) of the nearest point\n# \t % changed for the 1 degree grid (GP)\n ipod = np.floor(ppod+1); \n ilon = np.floor(plon+1);\n \n# normalized (to one) differences, can be positive or negative\n#\t% changed for the 1 degree grid (GP)\n diffpod = (ppod - (ipod - 0.5));\n difflon = (plon - (ilon - 0.5));\n\n\n# change the reference epoch to January 1 2000\n print('Modified Julian Day', dmjd)\n dmjd1 = dmjd-51544.5 \n\n pi2 = 2*np.pi\n pi4 = 4*np.pi\n\n# mean gravity in m/s**2\n gm = 9.80665;\n# molar mass of dry air in kg/mol\n dMtr = 28.965E-3 \n# dMtr = 28.965*10^-3 \n# universal gas constant in J/K/mol\n Rg = 8.3143 \n\n# factors for amplitudes, i.e. whether you want time varying\n if (it==1):\n print('>>>> no refraction time variation ')\n cosfy = 0; coshy = 0; sinfy = 0; sinhy = 0;\n else: \n cosfy = np.cos(pi2*dmjd1/365.25)\n coshy = np.cos(pi4*dmjd1/365.25) \n sinfy = np.sin(pi2*dmjd1/365.25) \n sinhy = np.sin(pi4*dmjd1/365.25) \n cossin = np.matrix([1, cosfy, sinfy, coshy, sinhy])\n# initialization of new vectors\n p = 0; T = 0; dT = 0; Tm = 0; e = 0; ah = 0; aw = 0; la = 0; undu = 0;\n undul = np.zeros(4)\n Ql = np.zeros(4)\n dTl = np.zeros(4)\n Tl = np.zeros(4)\n pl = np.zeros(4)\n ahl = np.zeros(4)\n awl = np.zeros(4)\n lal = np.zeros(4)\n Tml = np.zeros(4)\n el = np.zeros(4)\n#\n pgrid, Tgrid, Qgrid, dTgrid, u, Hs, ahgrid, awgrid, lagrid, Tmgrid = read_4by5(station,dlat,dlon,hell)\n#\n for l in [0,1,2,3]:\n KL = l #silly to have this as a variable like this \n# transforming ellipsoidal height to orthometric height:\n# Hortho = -N + Hell\n undul[l] = u[KL] \n hgt = hell-undul[l] \n# pressure, temperature at the height of the grid\n T0 = Tgrid[KL,0] + Tgrid[KL,1]*cosfy + Tgrid[KL,2]*sinfy + Tgrid[KL,3]*coshy + Tgrid[KL,4]*sinhy;\n tg = float(Tgrid[KL,:] *cossin.T)\n# print(T0,tg)\n\n p0 = pgrid[KL,0] + pgrid[KL,1]*cosfy + pgrid[KL,2]*sinfy + pgrid[KL,3]*coshy + pgrid[KL,4]*sinhy;\n \n# humidity \n Ql[l] = Qgrid[KL,0] + Qgrid[KL,1]*cosfy + Qgrid[KL,2]*sinfy + Qgrid[KL,3]*coshy + Qgrid[KL,4]*sinhy;\n \n# reduction = stationheight - gridheight\n Hs1 = Hs[KL]\n redh = hgt - Hs1;\n\n# lapse rate of the temperature in degree / m\n dTl[l] = dTgrid[KL,0] + dTgrid[KL,1]*cosfy + dTgrid[KL,2]*sinfy + dTgrid[KL,3]*coshy + dTgrid[KL,4]*sinhy;\n \n# temperature reduction to station height\n Tl[l] = T0 + dTl[l]*redh - 273.15;\n\n# virtual temperature\n Tv = T0*(1+0.6077*Ql[l]) \n c = gm*dMtr/(Rg*Tv) \n \n# pressure in hPa\n pl[l] = (p0*np.exp(-c*redh))/100 \n \n# hydrostatic coefficient ah\n ahl[l] = ahgrid[KL,0] + ahgrid[KL,1]*cosfy + ahgrid[KL,2]*sinfy + ahgrid[KL,3]*coshy + ahgrid[KL,4]*sinhy;\n \n# wet coefficient aw\n awl[l] = awgrid[KL,0] + awgrid[KL,1]*cosfy + awgrid[KL,2]*sinfy + awgrid[KL,3]*coshy + awgrid[KL,4]*sinhy;\n\t\t\t\t\t \n# water vapor decrease factor la - added by GP\n lal[l] = lagrid[KL,0] + lagrid[KL,1]*cosfy + lagrid[KL,2]*sinfy + lagrid[KL,3]*coshy + lagrid[KL,4]*sinhy;\n\t\t\t\t\t \n# mean temperature of the water vapor Tm - added by GP\n Tml[l] = Tmgrid[KL,0] + Tmgrid[KL,1]*cosfy + Tmgrid[KL,2]*sinfy + Tmgrid[KL,3]*coshy + Tmgrid[KL,4]*sinhy;\n\t\t\t\t\t \t\t \n# water vapor pressure in hPa - changed by GP\n e0 = Ql[l]*p0/(0.622+0.378*Ql[l])/100; # % on the grid\n aa = (100*pl[l]/p0)\n bb = lal[l]+1\n el[l] = e0*np.power(aa,bb) # % on the station height - (14) Askne and Nordius, 1987\n \n dnpod1 = np.abs(diffpod); # % distance nearer point\n dnpod2 = 1 - dnpod1; # % distance to distant point\n dnlon1 = np.abs(difflon);\n dnlon2 = 1 - dnlon1;\n \n# pressure\n R1 = dnpod2*pl[0]+dnpod1*pl[1];\n R2 = dnpod2*pl[2]+dnpod1*pl[3];\n p = dnlon2*R1+dnlon1*R2;\n \n# temperature\n R1 = dnpod2*Tl[0]+dnpod1*Tl[1];\n R2 = dnpod2*Tl[2]+dnpod1*Tl[3];\n T = dnlon2*R1+dnlon1*R2;\n \n# temperature in degree per km\n R1 = dnpod2*dTl[0]+dnpod1*dTl[1];\n R2 = dnpod2*dTl[2]+dnpod1*dTl[3];\n dT = (dnlon2*R1+dnlon1*R2)*1000;\n \n# water vapor pressure in hPa - changed by GP\n R1 = dnpod2*el[0]+dnpod1*el[1];\n R2 = dnpod2*el[2]+dnpod1*el[3];\n e = dnlon2*R1+dnlon1*R2;\n \n# hydrostatic\n R1 = dnpod2*ahl[0]+dnpod1*ahl[1];\n R2 = dnpod2*ahl[2]+dnpod1*ahl[3];\n ah = dnlon2*R1+dnlon1*R2;\n \n# wet\n R1 = dnpod2*awl[0]+dnpod1*awl[1];\n R2 = dnpod2*awl[2]+dnpod1*awl[3];\n aw = dnlon2*R1+dnlon1*R2;\n \n# undulation\n R1 = dnpod2*undul[0]+dnpod1*undul[1];\n R2 = dnpod2*undul[2]+dnpod1*undul[3];\n undu = dnlon2*R1+dnlon1*R2;\n\n# water vapor decrease factor la - added by GP\n R1 = dnpod2*lal[0]+dnpod1*lal[1];\n R2 = dnpod2*lal[2]+dnpod1*lal[3];\n la = dnlon2*R1+dnlon1*R2;\n\t\t\n# mean temperature of the water vapor Tm - added by GP\n R1 = dnpod2*Tml[0]+dnpod1*Tml[1];\n R2 = dnpod2*Tml[2]+dnpod1*Tml[3];\n Tm = dnlon2*R1+dnlon1*R2; \n\n return p, T, dT,Tm,e,ah,aw,la,undu", "def graphformation(time_lower, time_upper):\n\tprm = param.Para()\n\ttry:\n\t\tdb_connection = mysql.connector.connect(\n\t\t host=prm.hostname,\n\t\t user=prm.username,\n\t\t passwd=prm.password,\n\t\t database= prm.dbname\n\t\t )\n\t\tdb_cursor = db_connection.cursor()\n\texcept:\n\t\tprint(\"Can't Connect to database, check credentials in parameter file\")\n\tquery = (\"SELECT * FROM identity \")\n\tdb_cursor.execute(query)\n\tdf1=pd.DataFrame(db_cursor.fetchall())\n\tdf1.columns= ['node','deviceid','student','rollno']\n\tdict_identity = dict(zip(df1.deviceid, df1.node))\n\trev_dict_identity = dict(zip(df1.node, df1.deviceid ))\n\tquery = (\"SELECT * FROM activity WHERE time BETWEEN '{}' AND '{}'\".format(time_lower,time_upper)) ## incomplete\n\tdb_cursor.execute(query)\n\tactivity_data = pd.DataFrame(db_cursor.fetchall())\n\tif activity_data.empty==False:\n\t\tactivity_data.columns=[\"sl_no\",\"time\",\"node\",\"latitude\",\"longitude\"]\n\telse:\n\t\tprint(\"No Activity in the selected Time Window\")\n\t\treturn\n\tnumnodes= len(df1)\n\tedges= []\n\tscore = {}\n\t#print(activity_data)\n\ttime_groups = activity_data.groupby('time')\n\twith open(r'C:\\Users\\HP\\Desktop\\project\\Contact_Graph\\bluetooth.txt') as json_file:\n\t\tdata1 = json.load(json_file)\n\tfor name, group in time_groups:\n\t\tscore_tmp = decayfunc(name,time_upper)\n\t\tgroup = group.sort_values('node')\n\t\tfor i in range(len(group)-1):\n\t\t\tnode1 = group.iloc[i,2]\n\t\t\t###########################\n\t\t\tlistnearby=[]\n\t\t\ttry:\n\t\t\t\tlistnearby = data1[rev_dict_identity[node1]][str(name)]\n\t\t\t\tlistnearby = [dict_identity[i] for i in listnearby if dict_identity[i]>node1]\n\t\t\t\tfor i in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(node1,i)]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(node1,i)]=1\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t###########################\n\t\t\tfor j in range(i+1,len(group)):\n\t\t\t\tnode2 =group.iloc[j,2]\n\t\t\t\tif proximityfunc(group.iloc[i,3],group.iloc[i,4],group.iloc[j,3],group.iloc[j,4]) and node2 not in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]=1\n\tnode_list = list(df1.node)\n\ttitle_list = list(df1.deviceid)\n\tedges_list = []\n\tfor edge,val in score.items():\n\t\tedges_list.append((int(edge[0]),int(edge[1]),float(val)))\n\n\treturn edges_list,node_list,title_list", "def line_moved(self):\n\n # The line is supposed to be moved by hand to the beginning of first wrinkle.\n # The optimal spot is local maximum (not always visible)\n ext_index = self.index_of_drop + int(self.line.value() * 10000)\n ext_value = self.data[ext_index]\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n\n n = toolbox_2.particle_count_2(ext_value)\n\n # measurement series 1\n if self.selected_data == 3 and 7 <= self.meas_selected_number <= 17 and self.meas_selected_series == 1:\n index = self.meas_selected_number - 7 # Assumes that first measurement is number 7\n self.smallest_particles[index] = smallest_growing_particle\n self.number_counts[index] = n\n\n self.update_distribution()\n # Update plot\n self.curve_distribution.setData(self.particle_distribution_x, self.particle_distribution_y*1e-10)\n self.curve_distribution_cumulative.setData(self.smallest_particles, self.number_counts*1e-10)\n\n # measurement series 2\n elif self.selected_data == 3 and self.meas_selected_series == 2:\n index = self.meas_selected_number - 1 # begins from 1, 0th measurement is just copy of 8th\n self.number_counts_2[index] = n\n\n self.curve_rotatometer.setData(np.array([4, 6, 8, 10, 12, 14, 16, 18]), self.number_counts_2*1e-10)\n x = np.linspace(3.5, 20, 100)\n self.curve_rotatometer_fit.setData(x, self.number_counts_2[0] * 4 * (1 / x) *1e-10)\n\n #print(\"N\", \"%.2e\"%n, \"dpres\", round(p_i - p_f))", "def betterEvaluationFunction(currentGameState):\n\n # Useful information you can extract from a GameState (pacman.py)\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n newCapsules = currentGameState.getCapsules()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # Volem que s'apropi a les fruites i s'allunyi dels fantasmes en cas que aquests ens puguin matar, si no, hem d'intentar menjar-nos-els, pensant en seguir optant a la fruita.\n\n foodDistance = [util.manhattanDistance(newPos, food) for food in newFood.asList()]\n if foodDistance:\n foodMinima = min(foodDistance)\n else:\n foodMinima = -1 # perque si la llista esta buida vol dir que hem hem d'anar cap aquesta direcció, i per tant necessitem un valor molt gran.\n\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n ghostDistance = [util.manhattanDistance(newPos, ghostState.getPosition()) for ghostState in newGhostStates]\n\n distanciaFantasmes = 0\n fantasmaMoltAprop = 0\n\n for i in range(len(ghostDistance)):\n if newScaredTimes[i] >= 2:\n distanciaFantasmes -= ghostDistance[i]\n if ghostDistance[i] <= 1:\n fantasmaMoltAprop -= 1\n else:\n distanciaFantasmes += ghostDistance[i]\n if ghostDistance[i] <= 1:\n fantasmaMoltAprop += 1\n\n if distanciaFantasmes == 0:\n distanciaFantasmes = -1 # perque aixo voldra dir que tenim els fantasmes al voltant, i per tant ens en volem allunyar si o si d'aquesta direcció\n\n capsulesDistances = [util.manhattanDistance(newPos, capsuleState) for capsuleState in newCapsules]\n\n if capsulesDistances:\n capsulaMinima = min(capsulesDistances)\n itemMinim = min(capsulaMinima, foodMinima)\n else:\n itemMinim = foodMinima\n\n result = currentGameState.getScore() + 1 / float(itemMinim) - 1 / float(distanciaFantasmes) - fantasmaMoltAprop\n\n\n return result", "def CalculateChebyPaths(self):\n Kmin, Kmax = self.Kmin, self.Kmax\n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.cpath[y] = self.chebeval(array([self.apath[y]]),self.ac[y],Kmin,Kmax)\n # if self.cpath[y] < 0:\n # self.cpath[y] = 0\n if y >= self.W:\n income = self.b\n else:\n self.npath[y] = self.chebeval(array([self.apath[y]]),self.an[y],Kmin,Kmax)\n income = (1-self.tau)*self.w*self.npath[y]\n self.apath[y+1] = (1+self.r)*self.apath[y] + income - self.cpath[y]\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1] = (1+self.r)*self.apath[self.T-1] + self.b\n # self.cpath[self.T-1] = self.chebeval(array([self.apath[self.T-1]]),self.ac[self.T-1],Kmin,Kmax)\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])\n # print self.cpath, self.apath, self.npath", "def auxmax_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If k_ind = nomax-1, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1: #\n print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n return f_cc\n else:\n print \"hihu1\",line_start\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n \n # The last line.\n if k_ind==cfg.nomax-1:\n \n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n \n \n return f_cc", "def main():\n\n # Hypothesis:\n # The `impact` encapsulates the volatility, stability and overall\n # fluctuation of the market; in particular, movements that would\n # affect one's portfolio, e.g. unexpected (i.e. not predicted)\n # increases or drops in prices.\n # For the StrategyLearner should directly affect the learned\n # policy, particularly, in terms of willingness to take risks by\n # betting on the behavior of the market.\n # This can be translated into three metrics:\n # - Number of entries:\n # These should be reduced as market impact increases which\n # shows the learning agent being more cautious about its bets\n # - Cumulative return:\n # Directly related to the point mentioned above, as market\n # impact increases and the agent's willingness to take risks\n # decreaes, so is the overall performance of the strategy\n # - Training episodes:\n # This applies specifically to the Q-Learning agent, but it\n # is interesting to see how as the market impact increases,\n # the number of complete training episodes (i.e. a complete\n # pass on the trading data) is not affected. One would think\n # that the agent would converge faster when the impact is\n # large as it would quickly realize that the most optimal\n # strategy is to not do anything. However, impact does not\n # affect the rate of convergence, but rather the strategy\n # that the agent converges to\n\n # Set the seed for reproducibility\n random.seed(1481090000)\n\n # Experiment parameters\n symbol = 'JPM'\n # In-sample: January 1, 2008 to December 31 2009\n start_date = dt.datetime(2008, 1, 1)\n end_date = dt.datetime(2009, 12, 31)\n starting_value = 100000\n commission = 0.0\n # Values to use to evaluate the effect of the impact\n impact_values = [0.0, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0]\n\n all_entries = []\n all_returns = []\n all_episodes = []\n\n for impact in impact_values:\n log.info(\"Evaluating the effect of impact=%s\", impact)\n strategy_learner = StrategyLearner(verbose=False, impact=impact)\n\n log.info(\"Training StrategyLearner\")\n strategy_learner.addEvidence(\n symbol=symbol,\n sd=start_date,\n ed=end_date,\n sv=starting_value\n )\n\n log.info(\"Querying StrategyLearner to generate trades\")\n trades = strategy_learner.testPolicy(\n symbol=symbol,\n sd=start_date,\n ed=end_date,\n sv=starting_value\n )\n\n log.info(\"Transforming StrategyLearner trades into marketsim orders\")\n orders = _convert_trades_to_marketisim_orders(symbol, trades)\n\n log.info(\"Computing portfolio values for %d orders\", orders.shape[0])\n port_vals = compute_portvals(\n orders,\n start_val=starting_value,\n commission=commission,\n impact=impact\n )\n\n cumulative_return = _compute_cumulative_return(port_vals)\n\n all_entries.append(strategy_learner.metadata['entries'])\n all_returns.append(cumulative_return)\n all_episodes.append(strategy_learner.metadata['training_episodes'])\n\n _plot_and_save_number_of_entries_per_impact_value(impact_values, all_entries)\n _plot_and_save_number_of_episodes_per_impact_value(impact_values, all_episodes)\n _plot_and_save_cumulative_return_per_impact_value(impact_values, all_returns)", "def update(self, sim, dt):\n #growth kinetics\n self.division_timer += dt\n #you can grow unless you are in the A state meaning apoptosis\n if(self.division_timer >= self.division_time and self._division):\n #now you can divide\n if(self.state == \"T1\"):\n #change the current sytate to D\n self.state = \"NSC\"\n self._division = False\n self.division_time = 36\n #progenitor time is faster with concentration factor\n\n #add the concentration\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n self.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## norm_mn = float(mn_count) / float(tot)\n## if(norm_mn < self._p2):\n## self.division_time = 36*(norm_mn) # in hours\n## self.division_time = max(self.division_time, 1) \n## else:\n## \n## print(norm_mn, self.division_time)\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n if(self.state == \"T2\"):\n #change the current sytate to D\n self.state = \"MN\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n self.set_gradient_source_sink_coeff(\"EGF\", 50.0*source, 1.0*consump_rate)\n if(self.state == \"T3\"):\n #change the current sytate to D\n self.state = \"G\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n #get the location\n #pick a random point on a sphere\n location = RandomPointOnSphere()*self.radius/2.0 + self.location\n #get the radius\n radius = self.radius\n #get the ID\n ID = sim.get_ID()\n #make the object\n sc = NueronalStemCell(location, radius, ID, self.state,\n division_time = self.division_time,\n params = [self._p1, self._p2,\n self._p3, self._p4, self._p5,\n self._p6, self.p7])\n #copy secretion to NSC progeny\n if(self.state == \"NSC\"):\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n sc.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n sc._division = False\n #set its soluble count\n## sc.sol_count = self.sol_count / 2.\n## self.sol_count = self.sol_count / 2.\n #copy over all of the coefficients to the new cells\n## prod_cons = self.get_gradient_source_sink_coeff(\"O2\")\n## sc.set_gradient_source_sink_coeff(\"O2\", prod_cons[0], prod_cons[1])\n prod_cons = self.get_gradient_source_sink_coeff(\"EGF\")\n sc.set_gradient_source_sink_coeff(\"EGF\", prod_cons[0], prod_cons[1]) \n #add it to the imsulation\n sim.add_object_to_addition_queue(sc)\n #reset the division time\n self.division_timer = 0\n \n if(self.state == \"U\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x = rand.random()\n prob = self._p1 #probability of turning into a NSC\n #longer before the differentiation starts\n if(x < prob):\n #differentiation occurs\n self.state = \"T1\"\n #also add a proabability to differentiate directly to a mn\n n1 = self._p4\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n #get the value fo the gradient and make differntiation inversly\n #inversly correlated with the proportion present\n norm_mn = self.get_gradient_value(\"EGF\")\n #probability of turning into a motor nueron\n n1 = self._p4\n## #normalize the result\n## if(tot != 0):\n## norm_mn = float(mn_count) / float(tot)\n## else:\n## norm_mn = 0\n #calculate the probability\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p2**n1 + norm_mn**n1)\n x1 = rand.random()\n if(x1 <= self._p1*prob_MN):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n \n if(self.state == \"NSC\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x1 = rand.random()\n x2 = rand.random()\n #Find all the motor nuerons\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## #normalize the result\n## norm_mn = float(mn_count) / float(tot)\n #Make differerntiationd ependant on the gradient value\n norm_mn = self.get_gradient_value(\"EGF\")\n #set the paramaters\n n1 = self._p4\n #update the division time\n## self.division_time = norm_mn * 38 #in hours takes care of the feedback\n #depends on other motor nuerons\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p3**n1 + norm_mn**n1) #probability of turning into a motor nueron\n## prob_G = (1.*norm_mn**n2)/(self._p3**n1 + norm_mn**n2) #of turning into a glial cell\n prob_G = self._p5\n #longer before the differentiation starts\n if(x1 <= prob_MN and x2 > prob_G):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n if(x1 > prob_MN and x2 <= prob_G):\n #differentiation occurs towards a glial cell\n self.state = \"T3\"\n #check to see if division enabled\n if(self._division == False):\n #check for mitotic speed up\n a = self._p6\n b = self._p7\n norm_nsc = self.get_gradient_value(\"TNF\")\n prob_divide = (1.*norm_nsc**b)/(a**b + norm_nsc**b)\n r = rand.random()\n if(r <= x):\n self._division = True", "def branch_precursor(state, time, d):\n assert d[\"alpha_IL2\"] < d[\"alpha1\"] and d[\"alpha_IL2\"] < d[\"alpha2\"]\n \n th0 = state[0]\n \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"]+1)]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]+1):]\n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n \n t_eff = th1_all+th2_all\n t_il2 = np.sum(th1[:d[\"alpha_IL2\"]]) + np.sum(th2[:d[\"alpha_IL2\"]])\n\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n \n conc_il2 = d[\"rate_il2\"]*t_il2/(d[\"K_il2\"]+t_eff)\n\n # compute feedbacks\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2) \n \n ### calculate probability, note that these are adjusted to beta1 beta2 so that\n # they are not necessarily \\in (0,1)\n p1, p2 = get_prob(d, beta1, beta2, cyto_1, cyto_2)\n \n #print(beta1*p1_adj/(beta1*p1_adj+beta2))\n beta1_p = d[\"beta1_p\"]\n beta2_p = d[\"beta2_p\"]\n rate_death = d[\"d_eff\"] \n \n # check for homeostasis regulation\n if d[\"crit\"] == False:\n update_t0(d, time, conc_il2, t_eff)\n elif d[\"death_mode\"] == False:\n assert d[\"crit\"] == True \n beta1_p = beta1_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n beta2_p = beta2_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n\n else:\n rate_death = rate_death*np.exp(time-d[\"t0\"])\n\n # this is the actual differentiation where odes are computed \n dt_th1 = diff_precursor(th1, th0, d[\"alpha1\"], beta1, beta1_p, p1, rate_death, d)\n dt_th2 = diff_precursor(th2, th0, d[\"alpha2\"], beta2, beta2_p, p2, rate_death, d)\n dt_th0 = -(beta1*p1+beta2)*th0 \n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def growthrate(cur, pre, y):\n return (cur-pre)/y", "def agentbasedsim(L, a, pi, aenv, pienv, xi,\n adev=1.0, pidev=0.5,\n nind=10, ngeneration=100, nburnin=10,\n prng=None,\n callback=None):\n\n p, q = from_api(a, pi)\n alpha, beta = from_api(aenv, pienv)\n if not adev == 1.0:\n delta, epsilon = from_api(adev, pidev)\n\n # all parameters need to be in array form if cython acceleration is used\n if usecstepmarkov:\n alpha = _arrayify(alpha, L)\n beta = _arrayify(beta, L)\n p = _arrayify(p, (nind, L))\n q = _arrayify(q, (nind, L))\n if not adev == 1.0:\n delta = _arrayify(delta, (nind, L))\n epsilon = _arrayify(epsilon, (nind, L))\n \n env = np.zeros(L, dtype = bool)\n gen = np.zeros((nind, L), dtype = bool)\n \n totoffsprings = np.zeros(ngeneration)\n prng = prng if prng else np.random\n \n for generation in range(ngeneration):\n # time step environment\n rand = prng.rand(L)\n env = stepmarkov(env, alpha, beta, rand)\n if callback and generation >= nburnin:\n callback(gen, env)\n if not adev == 1.0:\n rand = prng.rand(nind, L)\n phen = stepmarkov2d(gen, delta, epsilon, rand)\n else:\n phen = gen\n # calculate growth rate\n noffspring = xi(phen, env)\n totoffspring = noffspring.sum()\n totoffsprings[generation] = totoffspring\n # time step population\n rand = prng.rand(nind, L)\n parent = gen[np.arange(nind).repeat(prng.multinomial(nind, noffspring/totoffspring))]\n gen = stepmarkov2d(parent, p, q, rand)\n \n # calculate Lambda = mean growth rate\n return np.mean(np.log(totoffsprings[nburnin:]/nind))", "def prob_getLine(img, threshold, line_length, line_gap, width, height, theta):\n\n\n\n # maximum line number to prevent infinite loop\n lines_max = 2 ** 15\n lines = []\n\n # calculate the image diagonal\n imgDiagnal = 2 * np.ceil((np.sqrt(img.shape[0] * img.shape[0] +img.shape[1] * img.shape[1])))\n accum = np.zeros((int(imgDiagnal), int(theta.shape[0])))\n offset = imgDiagnal / 2\n nthetas = theta.shape[0]\n # compute the bins and allocate the accumulator array\n mask = np.zeros((height, width))\n line_end = np.zeros((2, 2))\n\n # compute sine and cosine of angles\n cosinTheta = np.cos(theta)\n sinTheta = np.sin(theta)\n\n # find the nonzero indexes\n yXis, xXis = np.nonzero(img)\n points = list(zip(xXis, yXis))\n # mask all non-zero indexes\n mask[yXis, xXis] = 1\n shift = 16\n\n while 1:\n\n # check if the image is empty, quit if no remaining points\n count = len(points)\n if count == 0:\n break\n\n # select a random non-zero point\n index = random.randint(0,count) % count\n x = points[index][0]\n y = points[index][1]\n\n # remove the pixel from the image\n del points[index]\n\n # if previously eliminated, skip\n if not mask[y, x]:\n continue\n\n #set some constant for the ease of later use\n value = 0\n max_value = threshold - 1\n max_theta = -1\n\n # apply hough transform on point\n for j in range(nthetas):\n accum_idx = int(round((cosinTheta[j] * x + sinTheta[j] * y)) + offset)\n accum[accum_idx, j] += 1\n value = accum[accum_idx, j]\n if value > max_value:\n max_value = value\n max_theta = j\n\n #check if the highest value change for this pixel has detected line or not\n if max_value < threshold:\n continue #if less than the threshold, than skip this point\n\n # from the random point walk in opposite directions and find the longest line segment continuous\n a = -sinTheta[max_theta]\n b = cosinTheta[max_theta]\n x0 = x\n y0 = y\n\n # calculate gradient of walks using fixed point math\n xflag = np.fabs(a) > np.fabs(b)\n if xflag:\n if a > 0:\n dx0 = 1\n else:\n dx0 = -1\n dy0 = round(b * (1 << shift) / np.fabs(a))\n y0 = (y0 << shift) + (1 << (shift - 1))\n else:\n if b > 0:\n dy0 = 1\n else:\n dy0 = -1\n dx0 = round(a * (1 << shift) / np.fabs(b))\n x0 = (x0 << shift) + (1 << (shift - 1))\n\n # find the line segment not exceeding the acceptable line gap\n for k in range(2):\n gap = 0\n px = x0\n py = y0\n dx = dx0\n dy = dy0\n if k > 0:\n dx = -dx\n dy = -dy\n while 1:\n if xflag:\n x1 = px\n y1 = int(py) >> shift\n else:\n x1 = int(px) >> shift\n y1 = py\n # check when line exits image boundary\n if x1 < 0 or x1 >= width or y1 < 0 or y1 >= height:\n break\n gap += 1\n # if non-zero point found, continue the line\n if mask[y1, x1]:\n gap = 0\n line_end[k, 1] = y1\n line_end[k, 0] = x1\n # if gap to this point was too large, end the line\n elif gap > line_gap:\n break\n px += dx\n py += dy\n\n\n # confirm line length is acceptable\n acceptableLine = abs(line_end[1, 1] - line_end[0, 1]) >= line_length or \\\n abs(line_end[1, 0] - line_end[0, 0]) >= line_length\n\n # reset the accumulator and points on this line\n for k in range(2):\n px = x0\n py = y0\n dx = dx0\n dy = dy0\n if k > 0:\n dx = -dx\n dy = -dy\n while 1:\n if xflag:\n x1 = px\n y1 = int(py) >> shift\n else:\n x1 = int(px) >> shift\n y1 = py\n # if non-zero point found, continue the line\n if mask[y1, x1]:\n if acceptableLine:\n accum_idx = int(round((cosinTheta[j] * x1 + sinTheta[j] * y1)) + offset)\n accum[accum_idx, max_theta] -= 1\n mask[y1, x1] = 0\n # exit when the point is the line end\n if x1 == line_end[k, 0] and y1 == line_end[k, 1]:\n break\n px += dx\n py += dy\n\n # add line to the result\n if acceptableLine:\n lines.append(((line_end[0, 0], line_end[0, 1]),\n (line_end[1, 0], line_end[1, 1])))\n if len(lines) > lines_max:\n return lines\n\n return lines", "def learning_curve():\n loss = []\n val_loss = []\n data_size = []\n\n x_slid, y_slid = sliding_window_main(x, y)\n x_train, y_train, x_val, y_val, x_test, y_test = data_splitting_main(x_slid, y_slid)\n m_tot = x_train.shape[0]\n\n batch_step = 50\n try:\n for m in range(batch_size, m_tot, batch_step*batch_size):\n print(\"Training: \", m)\n net = create_network()\n history = trainer(net, x_train[:m], y_train[:m], x_val, y_val)\n loss.append(history.history[\"loss\"][-1])\n val_loss.append(history.history[\"val_loss\"][-1])\n data_size.append(m)\n\n print(\"Loss:\", loss[-1])\n print()\n\n finally:\n plt.plot(data_size, loss, label=\"Loss\", marker=\"o\")\n plt.plot(data_size, val_loss, label=\"Val Loss\", marker=\"o\")\n plt.xlabel(\"m\")\n plt.ylabel(\"Losses\")\n plt.title(\"Model Loss\")\n plt.legend()\n plt.savefig(\"img/\" + datetime.now().strftime(\"%y%m%d_%H%M\") + \"_learning_curve.png\")\n plt.show()\n plt.close()\n\n return loss, val_loss", "def _metropolis_hastings(\n self, currentLogPs, proposalLogPs, nChains, jumpLogP=0, reverseJumpLogP=0\n ):\n logMetropHastRatio = np.array(proposalLogPs) - np.array(\n currentLogPs\n ) # + (reverseJumpLogP - jumpLogP)\n decision = np.log(np.random.uniform(size=nChains)) < logMetropHastRatio\n\n # replace values which ecoures an overflow for e^x with 100\n isATooBigValue = logMetropHastRatio >= 1e3\n logMetropHastRatio[isATooBigValue] = 1e2\n\n return decision, np.minimum(1, np.exp(logMetropHastRatio))", "def run_genetic_algorithm(gaObj, peaks, epoch, range_peaks, gen_lines):\n\n for line in range_peaks:\n gaObj.create_population(peaks[line], peaks[line+1])\n epoch_line = pymp.shared.list()\n for p in range(epoch): \n st_time = time.time() \n gen_line = gaObj.call()\n epoch_line.append(gen_line.A) # For draw results each epoch, add results of each epoch.\n print(f'Line = {line}, Epoch = {p}, fit = {gen_line.fit}, Time = {time.time()-st_time}') \n gen_lines.append(epoch_line)\n \n return gen_lines", "def calculate(self, time):\n from numpy import sqrt\n\n gamma = self.gamma\n xr, r1, p1, u1 = self.vn[0]\n xl, r4, p4, u4 = self.vn[3]\n\n a4 = sqrt(gamma*p4/r4)\n a1 = sqrt(gamma*p1/r1)\n\n # calculate region 2.\n p41 = p4/p1\n p21 = self.strength(p4/p1, gamma, a1/a4)\n r21 = (1+(gamma+1)/(gamma-1)*p21) / ((gamma+1)/(gamma-1)+p21)\n r2 = r21*r1\n u2 = a1/gamma * (p21-1) * sqrt( \n 2*gamma/(gamma+1) / (p21 + (gamma-1)/(gamma+1)) )\n p2 = p21*p1\n\n # calculate shock speed.\n us = a1 * sqrt( (gamma+1)/(2*gamma)*(p21-1) + 1 )\n self.vn[1] = us*time, r2, p2, u2\n\n # caluculate region 3.\n p34 = p21/p41\n r34 = p34**(1/gamma)\n r3 = r34*r4\n p3 = p34*p4\n u3 = u2\n a3 = sqrt(gamma*p3/r3)\n self.vn[2] = u2*time, r3, p3, u3\n\n # calculate expansion wave.\n self.ve[:] = self.expwave(\n r4, p4, a4, u3, a3, gamma, time, self.nx)\n\n self.ve[0] += self.xshift\n self.vn[:,1:3] += self.xshift", "def dY_dt(self, y, t=0):\n\t\t \n\t\t#variables\n\t\tpSgg = y[0] / float(sum(y))\n\t\tpSgh = y[3] / float(sum(y))\n\t\tpSh = y[3] / float(y[3] + y[4] + y[5])\n\t\t\n\t\t#exit flows\n\t\texit_Sg = y[0] * (1 / time_active) * t \n\t\texit_Pg = y[1] * (1 / time_active) * t\n\t\texit_PPg = y[2] * (1 / time_active) * t\n\t\texit_Sh = y[3] * (1 / time_active) * t\n\t\texit_Ph = y[4] * (1 / time_active) * t\n\t\texit_PPh = y[5] * (1 / time_active) * t\n\t\t#episodic flows\n\t\tSg_to_h = y[0] * (1 / tin_g) * t\n\t\tPg_to_h = y[1] * (1 / tin_g) * t\n\t\tPPg_to_h = y[2] * (1 / tin_g) * t\n\t\tSh_to_g = y[3] * (1 / tin_h) * t\n\t\tPh_to_g = y[4] * (1 / tin_h) * t\n\t\tPPh_to_g = y[5] * (1 / tin_h) * t\n\t\t#entry flows\n\t\tinto_g = new_g * t\n\t\tinto_h = new_h * t\n\t\t#infection flows\n\t\tnewinf_gg = ((y[1] + y[4]) * B1 + (y[2] + y[5]) * B2) * Cg * pSgg * t\n\t\tnewinf_gh = ((y[1] + y[4]) * B1 + (y[2] + y[5]) * B2) * Cg * pSgh * t\n\t\tnewinf_h = (y[4] * B1 + y[5] * B2) * Ch * pSh * t\n\t\t#stage progression flows\n\t\tPg_to_PPg = y[1] * D1 * t\n\t\tPPg_to_d = y[2] * D2 * t\n\t\tPh_to_PPh = y[4] * D1 * t\n\t\tPPh_to_d = y[5] * D2 * t\n\t\t\t\n\t\tstate = [- exit_Sg - newinf_gg - Sg_to_h + into_g + Sh_to_g,\n\t\t\t\t - exit_Pg - Pg_to_PPg - Pg_to_h + newinf_gg + Ph_to_g,\n\t\t\t\t - exit_PPg - PPg_to_d - PPg_to_h + Pg_to_PPg + PPh_to_g,\n\t\t\t\t - exit_Sh - newinf_gh - newinf_h - Sh_to_g + into_h + Sg_to_h,\n\t\t\t\t - exit_Ph - Ph_to_PPh - Ph_to_g + newinf_gh + newinf_h + Pg_to_h,\n\t\t\t\t - exit_PPh - PPh_to_d - PPh_to_g + Ph_to_PPh + PPg_to_h]\n\t\n\t\treturn state", "def hurst(data):\n\tn = 6\n\tdata = pd.Series(data).pct_change()[1:]\n\tars = list()\n\tlag = list()\n\tfor i in range(n):\n\t\tm = 2 ** i\n\t\tsize = np.size(data) // m\n\t\tlag.append(size)\n\t\tpanel = {}\n\t\tfor j in range(m):\n\t\t\tpanel[str(j)] = data[j * size:(j + 1) * size].values\n\n\t\tpanel = pd.DataFrame(panel)\n\t\tmean = panel.mean()\n\t\tdeviation = (panel - mean).cumsum()\n\t\tmaxi = deviation.max()\n\t\tmini = deviation.min()\n\t\tsigma = panel.std()\n\t\trs = maxi - mini\n\t\trs = rs / sigma\n\t\tars.append(rs.mean())\n\n\tlag = np.log10(lag)\n\tars = np.log10(ars)\n\thurst_exponent = np.polyfit(lag, ars, 1)\n\tresult = hurst_exponent[0]\n\treturn result", "def greedy(self):\n n_step_t = self.filter['n_step_t']\n n_traj = self.filter['n_traj']\n traj = self.filter['traj']\n steps = [0 for i in xrange(n_step_t)]\n for i in xrange(n_traj):\n n_step = traj[i]['n_step']\n for j in xrange(n_step):\n steps[j] += 1\n self.filter['steps'] = steps\n \n return", "def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)", "def main():\r\n PathGenerator = TrajectoryGenerator()\r\n \r\n ## coordinate \r\n # Y \r\n # ^ /\r\n # | /\r\n # | / <theta>\r\n # o -- -- -- >X\r\n\r\n x_0 = 0.0 # initial x position\r\n y_0 = 0.0 # initial y position\r\n theta_0 = 0.0 *np.pi/180 # initial heading angle of the vehicle \r\n kappa_0 = 0.0 *np.pi/180 # initial steering angle \r\n initial_state = [x_0, y_0, theta_0, kappa_0] \r\n \r\n x_f = 13.0 # final x position\r\n y_f = 8.0 # final y position\r\n theta_f = 0.0 *np.pi/180 # final heading angle of the vehicle \r\n kappa_f = 0.0 *np.pi/180 # final steering angle \r\n final_state = [x_f, y_f, theta_f, kappa_f] \r\n\r\n traject = PathGenerator.compute_spline(initial_state, final_state)\r\n point_array = np.asarray(traject)\r\n plt.plot(point_array[:,0], point_array[:,1],'o')\r\n \r\n sample_resolution = 0.5\r\n temp_goal_list = []\r\n for i in range(-2, 3):\r\n temp_final_state = np.copy(final_state)\r\n temp_final_state[1] = temp_final_state[1] + float(i)*sample_resolution\r\n temp_goal_list.append(temp_final_state)\r\n \r\n start = time.time()\r\n point_list = []\r\n for i in range(0, 5):\r\n temp_goal = temp_goal_list[i]\r\n traject = PathGenerator.compute_spline(initial_state, temp_goal)\r\n point_list.append(traject)\r\n end = time.time()\r\n print('Executed time is %f'%(end - start))\r\n \r\n # pdb.set_trace()\r\n for i in range(0,5):\r\n point_array = np.asarray(point_list[i])\r\n plt.plot(point_array[:,0], point_array[:,1],'o')\r\n \r\n plt.axis('equal')\r\n plt.show()", "def dopri853core(\n n, func, x, t, hmax, h, rtol, atol, nmax, safe, beta, fac1, fac2, pos_neg, args\n):\n # array to store the result\n result = numpy.zeros((len(t), n))\n\n # initial preparations\n facold = 1.0e-4\n expo1 = 1.0 / 8.0 - beta * 0.2\n facc1 = 1.0 / fac1\n facc2 = 1.0 / fac2\n\n k1 = numpy.array(func(x, t[0], *args))\n hmax = numpy.fabs(hmax)\n iord = 8\n\n if h == 0.0: # estimate initial time step\n h, k1, k2, k3 = hinit(func, x, t, pos_neg, k1, iord, hmax, rtol, atol, args)\n\n reject = 0\n t_current = t[\n 0\n ] # store current integration time internally (not the current time wanted by user!!)\n t_old = t[0]\n finished_user_t_ii = 0 # times indices wanted by user\n\n result[0, :] = x\n\n # basic integration step\n while (\n finished_user_t_ii < len(t) - 1\n ): # check if the current computed time indices less than total inices needed\n # keep time step not too small\n h = pos_neg * numpy.max([numpy.fabs(h), 1e3 * uround])\n\n # the twelve stages\n xx1 = x + h * a21 * k1\n k2 = numpy.array(func(xx1, t_current + c2 * h, *args))\n\n xx1 = x + h * (a31 * k1 + a32 * k2)\n k3 = numpy.array(func(xx1, t_current + c3 * h, *args))\n\n xx1 = x + h * (a41 * k1 + a43 * k3)\n k4 = numpy.array(func(xx1, t_current + c4 * h, *args))\n\n xx1 = x + h * (a51 * k1 + a53 * k3 + a54 * k4)\n k5 = numpy.array(func(xx1, t_current + c5 * h, *args))\n\n xx1 = x + h * (a61 * k1 + a64 * k4 + a65 * k5)\n k6 = numpy.array(func(xx1, t_current + c6 * h, *args))\n\n xx1 = x + h * (a71 * k1 + a74 * k4 + a75 * k5 + a76 * k6)\n k7 = numpy.array(func(xx1, t_current + c7 * h, *args))\n\n xx1 = x + h * (a81 * k1 + a84 * k4 + a85 * k5 + a86 * k6 + a87 * k7)\n k8 = numpy.array(func(xx1, t_current + c8 * h, *args))\n\n xx1 = x + h * (a91 * k1 + a94 * k4 + a95 * k5 + a96 * k6 + a97 * k7 + a98 * k8)\n k9 = numpy.array(func(xx1, t_current + c9 * h, *args))\n\n xx1 = x + h * (\n a101 * k1\n + a104 * k4\n + a105 * k5\n + a106 * k6\n + a107 * k7\n + a108 * k8\n + a109 * k9\n )\n k10 = numpy.array(func(xx1, t_current + c10 * h, *args))\n\n xx1 = x + h * (\n a111 * k1\n + a114 * k4\n + a115 * k5\n + a116 * k6\n + a117 * k7\n + a118 * k8\n + a119 * k9\n + a1110 * k10\n )\n k2 = numpy.array(func(xx1, t_current + c11 * h, *args))\n\n xx1 = x + h * (\n a121 * k1\n + a124 * k4\n + a125 * k5\n + a126 * k6\n + a127 * k7\n + a128 * k8\n + a129 * k9\n + a1210 * k10\n + a1211 * k2\n )\n\n t_old_older = numpy.copy(t_old)\n t_old = numpy.copy(t_current)\n t_current += h\n\n k3 = numpy.array(func(xx1, t_current, *args))\n\n k4 = (\n b1 * k1\n + b6 * k6\n + b7 * k7\n + b8 * k8\n + b9 * k9\n + b10 * k10\n + b11 * k2\n + b12 * k3\n )\n k5 = x + h * k4\n\n # error estimation\n sk = atol + rtol * numpy.max([numpy.fabs(x), numpy.fabs(k5)], axis=0)\n erri = k4 - bhh1 * k1 - bhh2 * k9 - bhh3 * k3\n err2 = numpy.sum(numpy.square(erri / sk), axis=0)\n erri = (\n er1 * k1\n + er6 * k6\n + er7 * k7\n + er8 * k8\n + er9 * k9\n + er10 * k10\n + er11 * k2\n + er12 * k3\n )\n err = numpy.sum(numpy.square(erri / sk), axis=0)\n\n deno = err + 0.01 * err2\n deno = 1.0 if deno <= 0.0 else deno\n err = numpy.fabs(h) * err * numpy.sqrt(1.0 / (deno * n))\n\n # computation of hnew\n fac11 = numpy.power(err, expo1)\n\n # Lund-stabilization\n fac = fac11 / pow(facold, beta)\n\n # we require fac1 <= hnew / h <= fac2\n fac = numpy.max([facc2, numpy.min([facc1, fac / safe])])\n hnew = h / fac\n\n if err <= 1.0:\n # step accepted\n facold = numpy.max([err, 1.0e-4])\n k4 = numpy.array(func(k5, t_current, *args))\n\n # final preparation for dense output\n rcont1 = numpy.copy(x)\n xdiff = k5 - x\n rcont2 = xdiff\n bspl = h * k1 - xdiff\n rcont3 = numpy.copy(bspl)\n rcont4 = xdiff - h * k4 - bspl\n rcont5 = (\n d41 * k1\n + d46 * k6\n + d47 * k7\n + d48 * k8\n + d49 * k9\n + d410 * k10\n + d411 * k2\n + d412 * k3\n )\n rcont6 = (\n d51 * k1\n + d56 * k6\n + d57 * k7\n + d58 * k8\n + d59 * k9\n + d510 * k10\n + d511 * k2\n + d512 * k3\n )\n rcont7 = (\n d61 * k1\n + d66 * k6\n + d67 * k7\n + d68 * k8\n + d69 * k9\n + d610 * k10\n + d611 * k2\n + d612 * k3\n )\n rcont8 = (\n d71 * k1\n + d76 * k6\n + d77 * k7\n + d78 * k8\n + d79 * k9\n + d710 * k10\n + d711 * k2\n + d712 * k3\n )\n\n # the next three function evaluations\n xx1 = x + h * (\n a141 * k1\n + a147 * k7\n + a148 * k8\n + a149 * k9\n + a1410 * k10\n + a1411 * k2\n + a1412 * k3\n + a1413 * k4\n )\n k10 = numpy.array(func(xx1, t_old + c14 * h, *args))\n xx1 = x + h * (\n a151 * k1\n + a156 * k6\n + a157 * k7\n + a158 * k8\n + a1511 * k2\n + a1512 * k3\n + a1513 * k4\n + a1514 * k10\n )\n k2 = numpy.array(func(xx1, t_old + c15 * h, *args))\n xx1 = x + h * (\n a161 * k1\n + a166 * k6\n + a167 * k7\n + a168 * k8\n + a169 * k9\n + a1613 * k4\n + a1614 * k10\n + a1615 * k2\n )\n k3 = numpy.array(func(xx1, t_old + c16 * h, *args))\n\n # final preparation\n rcont5 = h * (rcont5 + d413 * k4 + d414 * k10 + d415 * k2 + d416 * k3)\n rcont6 = h * (rcont6 + d513 * k4 + d514 * k10 + d515 * k2 + d516 * k3)\n rcont7 = h * (rcont7 + d613 * k4 + d614 * k10 + d615 * k2 + d616 * k3)\n rcont8 = h * (rcont8 + d713 * k4 + d714 * k10 + d715 * k2 + d716 * k3)\n\n k1 = numpy.copy(k4)\n x = numpy.copy(k5)\n\n # loop for dense output in this time slot\n while (finished_user_t_ii < len(t) - 1) and (\n pos_neg * t[finished_user_t_ii + 1] < pos_neg * t_current\n ):\n result[finished_user_t_ii + 1, :] = dense_output(\n t[finished_user_t_ii + 1],\n t_old,\n h,\n [rcont1, rcont2, rcont3, rcont4, rcont5, rcont6, rcont7, rcont8],\n )\n finished_user_t_ii += 1\n\n if numpy.fabs(hnew) > hmax:\n hnew = pos_neg * hmax\n if reject:\n hnew = pos_neg * numpy.min([numpy.fabs(hnew), numpy.fabs(h)])\n\n reject = 0\n else:\n # step rejected since error too big\n hnew = h / numpy.min([facc1, fac11 / safe])\n reject = 1\n\n # reverse time increment since error rejected\n t_current = numpy.copy(t_old)\n t_old = numpy.copy(t_old_older)\n\n h = numpy.copy(hnew) # current h\n\n return result", "def lineaarinen():\n x = []\n y = []\n if not kirjasto[\"korjaus\"]:\n try:\n for erottaja in kirjasto[\"lineaariset_arvot\"]:\n x_arvo, y_arvo = erottaja\n x.append(x_arvo)\n y.append(y_arvo)\n kirjasto[\"lineaariset_arvot\"] = []\n kirjasto[\"pisteet\"] = []\n if x and x[0] != x[1] and y[0] != y[1]:\n kk = (y[1]-y[0])/(x[1]-x[0])\n intensiteetti_korjaus = []\n for j in kirjasto[\"kineettiset_energiat\"]:\n y_korjaava = kk * (j - x[0]) + y[0]\n intensiteetti_korjaus.append(y_korjaava)\n for k, l in enumerate(kirjasto[\"intensiteetit\"]):\n korjaus = l - intensiteetti_korjaus[k]\n kirjasto[\"korjaus\"].append(korjaus)\n else:\n ik.avaa_viesti_ikkuna(\"Error\", \"Korjauspisteiden valinnassa tapahtui virhe\")\n return\n except IndexError:\n ik.avaa_viesti_ikkuna(\"Error\", \"Korjauspisteitä ei ole valittu\")\n else:\n ikkuna(\"korjattu_spektri\", kirjasto[\"kineettiset_energiat\"], kirjasto[\"korjaus\"], \"Integroi\", integrointi)\n else:\n ikkuna(\"korjattu_spektri\", kirjasto[\"kineettiset_energiat\"], kirjasto[\"korjaus\"], \"Integroi\", integrointi)", "def punches(self):\n #:TODO Need to parameterize n\n # Initialize smoothing function\n # Also because I can't take the second derivitive\n\n n = 3\n assert (len(self.averages)==len(self.timestamps))\n size = len(self.averages)\n slopes = []\n for t in [0,size-n]:\n averages = np.asarray(self.averages[t:size])\n timestamps = np.asarray(self.timestamps[t:size])\n \"\"\"\n slope = np.absolute((np.corrcoef(averages,\n timestamps))*np.std(averages)/np.std(timestamps))\n \"\"\"\n slope = np.absolute(np.polyfit(timestamps, averages, 1)[0])*1000000\n #plt.scatter(timestamps, averages)\n slopes.append(slope)\n # If you were punching you are likely still punching need to set a weighting factor to this somehow\n # print(slopes[1])\n self.smoothing_queue.pop(0)\n if self.SIG_DELTA_AVERAGE < slopes[1]:\n self.smoothing_queue.append(1)\n else:\n self.smoothing_queue.append(0)\n if self.smoothing_queue.count(1) > len(self.smoothing_queue)/2:\n punching = True\n else: punching = False\n # print(self.smoothing_queue)\n\n return punching\n #self.counter +=1\n \"\"\"\n if self.counter==self.timing:\n self.counter == 0\n else:\n \"\"\"", "def next_step(self):\n\n y_next = []\n y_next.append(0)\n for i in range(1, len(self.x) - 1):\n x = self.x[i]\n\n y = self.constant* (self.y_current[i + 1] + self.y_current[i - 1] - 2 * self.y_current[i])\\\n + 2 * self.y_current[i] - self.y_previous[i]\n\n y_next.append(y)\n\n y_next.append(0)\n\n self.y_previous = copy.copy(self.y_current)\n self.y_current = copy.copy(y_next)\n\n if self.timestep % 10000 is 0:\n self.timeframes[self.timestep] = copy.copy(self.y_current)\n\n self.timestep += 1", "def main():\n print('This program computes Hailstone sequences.')\n data = int(input('Enter a number: '))\n start = time.time()\n n = data\n steps = 0\n # Check if the number is 1 or not.\n if n == FINAL:\n print('It took ' + str(steps) + ' steps to reach 1.')\n # If the number is not 1, we have to start calculating until it reach 1.\n else:\n while True:\n # Every time, check if the number is 1(stop) or not.\n if n == FINAL:\n break\n # If a number is odd, multiply it by 3 and add 1.\n if data % 2 == 1:\n n = 3*n+1\n print(str(data) + ' is odd, so I make 3n+1: ' + str(n))\n # If a number is even, divide it by 2.\n if data % 2 == 0:\n n = n//2\n print(str(data) + ' is even, so I take half: ' + str(n))\n data = n\n steps += 1\n print('It took ' + str(steps) + ' steps to reach 1.')\n end = time.time()\n print(\"The time of execution of above program is :\", end - start)", "def params(timeseries_input):\n # Settings for Nelder Mead Algorithm\n global timeseries\n timeseries=timeseries_input\n\n NumIters = 1 # First Iteration\n MaxIters = 1e3 # Maximum number of iterations\n Tolerance = 1e-5 # Tolerance on best and worst function values\n N = 5 # Number of Heston and Nandi parameters\n r = 0.01 / 252.0 # Risk Free Rate\n\n # Heston and Nandi parameter starting values (vertices) in vector form\n\n x = [[0 for i in range(N + 1)] for j in range(N)]\n x[0][0] = 5.02e-6;\n x[0][1] = 5.12e-6;\n x[0][2] = 5.00e-6;\n x[0][3] = 4.90e-6;\n x[0][4] = 4.95e-6;\n x[0][5] = 4.99e-6 # omega\n x[1][0] = 1.32e-6;\n x[1][1] = 1.25e-6;\n x[1][2] = 1.35e-6;\n x[1][3] = 1.36e-6;\n x[1][4] = 1.30e-6;\n x[1][5] = 1.44e-6 # alpha\n x[2][0] = 0.79;\n x[2][1] = 0.80;\n x[2][2] = 0.78;\n x[2][3] = 0.77;\n x[2][4] = 0.81;\n x[2][5] = 0.82 # beta\n x[3][0] = 427.0;\n x[3][1] = 421.0;\n x[3][2] = 425.0;\n x[3][3] = 419.1;\n x[3][4] = 422.1;\n x[3][5] = 430.0 # gamma\n x[4][0] = 0.21;\n x[4][1] = 0.20;\n x[4][2] = 0.22;\n x[4][3] = 0.19;\n x[4][4] = 0.18;\n x[4][5] = 0.205 # lambda\n\n # Run Nelder Mead and output Nelder Mead results\n B = NelderMead(LogLike, N, NumIters, MaxIters, Tolerance, x, r)\n\n #\tprint(\"Nelder Mead Minimization of Log-Likelihood for Heston and Nandi parameters\")\n #\tprint(\"---------------------------------\")\n #\tprint(\"omega = \", B[0])\n #\tprint(\"alpha = \", B[1])\n #\tprint(\"beta = \", B[2])\n #\tprint(\"gamma = \", B[3])\n #\tprint(\"lambda = \", B[4])\n #\tprint(\"Value of Objective Function = \", B[N])\n #\tprint(\"Number of Iterations = \", B[N+1])\n #\tprint(\"Persistence \", B[2]+B[1]*(B[3]**2) )\n #\tprint(\"---------------------------------\")\n\n # alpha,beta,gamma,omega,lambda\n return [B[1], B[2], B[3], B[0], B[4]]", "def directed_cycle_score(A):\n\n # Implement your cycle score given Problem 4 Part 2\n temp_matrix = np.zeros(A.shape)\n alpha = 0.05\n k = 0\n summation_term = 999999\n num_terms = A.shape[0]\n # while change < 0.05:\n for i in range(num_terms):\n summation_term = (1 / np.math.factorial(k)) * expm(A)\n temp_matrix += summation_term\n\n cycle_score = np.trace(temp_matrix) - (A.shape[0] * num_terms)\n return cycle_score", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def fn(i, x):\n if i == goal: return x == n \n ans = 0 \n if x < n: ans += (n-x) * fn(i+1, x+1) # a new song\n if k < x: ans += (x-k) * fn(i+1, x) # an old song\n return ans % 1_000_000_007", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def Q_learning_test(env,alpha,gamma,episodes, q_table):\n %time\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n total_reward = 0\n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n \n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n next_state, reward, done, info = env.step(action) \n\n\n if reward == -10:\n penalties += 1\n \n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n \n total_reward += reward\n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n\n \n print(\"Training finished.\\n\")\n \n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened testing reward per episode\", pad = 30 , size = BIGGER_SIZE)\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major', labelsize=16);\n plt.tick_params(axis='both', which='minor', labelsize=16);\n #plt.xlim(100000, 200000);\n #plt.ylim(0,50)\n # plt.xticks(np.arange(0, episodes+1, 5000));\n # plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def computeTm(self):\n #first step is finding the derivative series of the well\n x = self.temperatures\n if self.fluorescence == None:\n self.Tm = None\n return\n y = self.fluorescence\n \n xdiff = np.diff(x)\n dydx = -np.diff(y)/xdiff\n #the derivative series, has one less index since there is one fewer differences than points\n seriesDeriv = pandas.Series(dydx, x[:-1])\n \n #now that we have the derivative series, we can find the Tm\n lowestPoint = 0\n lowestPointIndex = None\n \n #gets number of signchanges between max and min of the curve, used to determin if the curve\n #is complex or not\n lowestPoint2 = 1\n lowestIndex2 = None\n highestPoint = 0\n highestIndex = None\n previous = None\n for i, value in enumerate(self.fluorescence[:-1]):\n if value > highestPoint:\n highestPoint = value\n highestIndex = i\n if highestIndex == 0 :\n highestPoint = 0\n highestIndex = None\n for i, value in enumerate(self.fluorescence[:-1]):\n if value<lowestPoint2:\n lowestPoint2 = value\n lowestIndex2 = i\n for i, value in enumerate(self.fluorescence[:-1]):\n if i < lowestIndex2:\n continue\n if value > highestPoint:\n highestPoint = value\n highestIndex = i\n else:\n for i, value in enumerate(self.fluorescence[:-1]):\n if i > highestIndex:\n break\n if value<lowestPoint2:\n lowestPoint2 = value\n lowestIndex2 = i\n signChange = False\n for ind in seriesDeriv.index[lowestIndex2+1:highestIndex]:\n \n if previous:\n if seriesDeriv[ind] + SIGN_CHANGE_THRESH < 0 and previous - SIGN_CHANGE_THRESH > 0:\n signChange = True\n if seriesDeriv[ind] - SIGN_CHANGE_THRESH > 0 and previous + SIGN_CHANGE_THRESH < 0:\n signChange = True\n # if seriesDeriv[ind] == 0:\n # signChangeCount += 1\n previous = seriesDeriv[ind]\n\n \n #finding the lowest point and its index on the derivative series\n #only search for Tm up to 90degrees, since last part is hard to predict\n #and often gives false positives\n ignoreNum = int(len(seriesDeriv.index)*0.125)\n for ind in seriesDeriv.index[:-ignoreNum]:\n if seriesDeriv[ind]<lowestPoint:\n lowestPoint = seriesDeriv[ind]\n lowestPointIndex = ind\n \n #TODO working, tms not steep enough added to complex\n #if the slope is not steep enough, tm remains saved, but curve is grouped with the\n #complex curves (now known as the unreliable group)\n #if lowestPoint > -0.000001 / (normalisationFactor / saturation max point of all curves thing):\n # print self.name, 'lowestpoint too small', lowestPoint\n # self.complex = True\n\n #if lowest point is the first index, then no curve fit is required\n if lowestPointIndex == seriesDeriv.index[0]:\n tm = lowestPointIndex\n self.Tm = tm\n \n #set complex to true if curve was complex\n if signChange:\n self.complex = True\n return\n \n #could not find any Tm\n if lowestPointIndex == None:\n self.Tm = None\n \n #if no tm, the curve hopefully be picked up as a monotonic/in the noise/saturated/outlier\n #however, if this does not happen, the curve remains as complex\n self.complex = True\n return \n \n #the indices in the series either side of the lowest index\n #note the first list is indexed e.g. list[i] where i is the section using .index\n leftIndex = [ind for ind in seriesDeriv.index][[ind for ind in seriesDeriv.index].index(lowestPointIndex)-1]\n rightIndex = [ind for ind in seriesDeriv.index][[ind for ind in seriesDeriv.index].index(lowestPointIndex)+1]\n \n \n #matrices used to fit a parabola to the 3 points\n Y=[seriesDeriv[leftIndex],\n seriesDeriv[lowestPointIndex],\n seriesDeriv[rightIndex]]\n \n A=[[leftIndex**2, leftIndex, 1],\n [lowestPointIndex**2, lowestPointIndex, 1],\n [rightIndex**2, rightIndex, 1]]\n \n #solve for b, in the form Y=Ab\n (a,b,c) = np.linalg.solve(A,Y)\n \n #initialise tm to left most point of relevant curve\n tm=seriesDeriv[leftIndex]\n tmValue=0\n #make tm the lowest point on the fitted parabola rounded to nearest 0.01\n for x in np.arange(leftIndex,rightIndex,0.01):\n point = (a*(x**2) + b*x + c)\n if tmValue > point:\n tmValue = point\n tm = x\n self.Tm = tm\n \n #again check for complex shape before returning\n if signChange:\n self.complex = True\n\n\n averagePoint = (lowestPoint2 +highestPoint) / 2\n i = lowestIndex2\n while self.fluorescence[i]<averagePoint:\n i += 1;\n\n # estimates tm by another method and if the difference is too large the curve is considred complex\n if (self.temperatures[i] -self.Tm)**2 > 5**2:\n self.complex=True\n return" ]
[ "0.5885385", "0.5741723", "0.5741651", "0.5738419", "0.5645377", "0.55954784", "0.5511052", "0.5502609", "0.54691374", "0.5444442", "0.5437097", "0.5412522", "0.53915256", "0.53911185", "0.53891474", "0.5327796", "0.5316556", "0.5316556", "0.52812976", "0.5263162", "0.5256186", "0.519026", "0.5181705", "0.5176984", "0.5163407", "0.5162127", "0.5135392", "0.51346296", "0.51337475", "0.51188886", "0.51129895", "0.51060283", "0.51051044", "0.51023525", "0.51000834", "0.509934", "0.5092364", "0.50893366", "0.5086625", "0.50850147", "0.50839436", "0.508249", "0.5081394", "0.5076661", "0.50759476", "0.5072742", "0.5066462", "0.5060872", "0.5050987", "0.5050416", "0.5046655", "0.5039335", "0.5038328", "0.50374496", "0.5034555", "0.5034326", "0.5030122", "0.5028157", "0.50268126", "0.50266814", "0.5026525", "0.50184405", "0.5008308", "0.50018543", "0.50013256", "0.49928337", "0.4991004", "0.49846953", "0.49814102", "0.49810824", "0.49714676", "0.49670774", "0.49650377", "0.49637187", "0.4962459", "0.49580175", "0.49568808", "0.49537978", "0.495215", "0.49512985", "0.49450132", "0.49444613", "0.4943095", "0.4937482", "0.49343917", "0.4932754", "0.49301887", "0.49281672", "0.4923683", "0.4919097", "0.49187228", "0.49156106", "0.49015015", "0.48981908", "0.48914608", "0.4882283", "0.48807943", "0.48782647", "0.48737115", "0.4873452", "0.48696095" ]
0.0
-1
read in raw data from Matlab file
def read_mat(filename, offset, count_read, init_flag): import numpy as np import os import sys from scipy.io import loadmat import logging sample_size = 2*2 if (init_flag == 1): logging.debug('Reading matlab file with init_flag == 1') f = loadmat(filename) # Read in the frequency in Hz fs = np.squeeze(f['fs']) ecg = np.squeeze(f['ecg']) pp = np.squeeze(f['pp']) # Determine the file size in bytes (1 byte = 8 bits) file_size = (1+len(ecg)+len(pp))*2 data_info = np.array([file_size, fs]) return(data_info) else: logging.debug('Reading matlab file with init_flag == 0') # Adjust the offset offset = int(offset/sample_size) try: f = loadmat(filename) ecg = np.squeeze(f['ecg']) pp = np.squeeze(f['pp']) ecg_data = ecg[offset:offset+count_read] pp_data = pp[offset:offset+count_read] data = np.zeros(len(ecg_data) + len(pp_data)) data[0::2] = ecg_data data[1::2] = pp_data # plt.plot(ecg) # plt.show(block=True) except EOFError: logging.error('Reached end of input file, can not read another ' 'block') print('Finished processing all data...') print('Heart Rate Monitor Finished') sys.exit() # If any Nan's are present, convert to 0 data = np.nan_to_num(data) return(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_raw(rawfile, shape, dtype=np.uint16, kind='middleton'):\n\n # -- alert\n print(\"READ_RAW: reading {0}...\".format(rawfile))\n\n\n # -- read file\n if kind=='middleton':\n return np.fromfile(open(rawfile),dtype) \\\n .reshape(shape[2],shape[0],shape[1])[:,:,::-1] \\\n .transpose(1,2,0) \\\n .astype(float)", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def Read_Rcwa_Matlab(Path) : \n x,y=[],[]\n fs = open(Path, 'r') \n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:25]))\n y.append(float(txt[29:-2])) \n fs.close()\n return x,y", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def _loadMatlab(self, file, with_axis=None):\n self.set_data_writable()\n _data = io.loadmat(file)[\"data\"]\n self.data = self._extract_data_with_axis(_data, with_axis)\n self.set_data_protected()", "def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def read_file(self,file_name):\r\n data = np.genfromtxt(file_name)\r\n return data;", "def _read_data(self):", "def _read_rmf(file):\n\n with fits.open(file) as hdul:\n data = hdul[2].data\n\n return data['energ_lo'], data['energ_hi'], data['n_grp'], data['f_chan'], data['n_chan'], data['matrix']", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def load_spe(filename):\n def read_at(data, pos, size, ntype):\n raw.seek(pos)\n return np.fromfile(raw, ntype, size)\n raw = open(filename, 'rb')\n xdim = np.int64(read_at(raw, 42, 1, np.int16)[0])\n ydim = np.int64(read_at(raw, 656, 1, np.int16)[0])\n arr = read_at(raw, 4100, xdim*ydim, np.uint16)\n arr = arr.reshape((ydim, xdim))\n print('data shape: {}'.format(np.shape(arr)))\n if np.shape(arr)[0] == 1:\n arr = arr[0]\n print('data shape: {}'.format(np.shape(arr)))\n return arr", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def read(self, src):\n self.read_mesh(src)\n self.read_data(src)", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def readRawSamples(fname):\n\n d = numpy.fromfile(fname, dtype=numpy.float32)\n #d = d.astype(numpy.float64)\n #d = (d - 128) / 128.0\n\n return d[::2] + 1j * d[1::2]", "def load_data(filename):\n data = []\n with open('data/' + filename) as raw_data:\n for line in raw_data.readlines():\n data.append(float(line.strip('\\n')))\n return data\n # data = np.mat(np.genfromtxt('data/' + filename)).T\n # return data", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def input_data(self):\n return read_file(self.file_path)", "def read_mhd_and_raw(path, numpyFlag=True):\n img = sitk.ReadImage(path)\n if not numpyFlag:\n return img\n\n nda = sitk.GetArrayFromImage(img) # (img(x,y,z)->numpyArray(z,y,x))\n return nda", "def read_infile(infile):\n # There are a variable header lengths possible.\n # Loop through and look for when the line starts\n # with '1', the first index.\n nheader = 0\n try:\n with open(infile, 'r') as f:\n for line in f:\n if line.strip().startswith('1'):\n break\n nheader += 1\n except IOError:\n message = f'Unable to open {infile} in modconvert.'\n raise PipeCalError(message)\n index, freq, tbr, flux, trj = np.genfromtxt(infile, unpack=True,\n skip_header=nheader)\n return index, freq, tbr, flux, trj", "def _read_data(self, fh, byteorder='>'):\r\n fh.seek(len(self.header))\r\n data = fh.read()\r\n dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'\r\n depth = 1 if self.magicnum == b\"P7 332\" else self.depth\r\n shape = [-1, self.height, self.width, depth]\r\n size = numpy.prod(shape[1:])\r\n if self.magicnum in b\"P1P2P3\":\r\n data = numpy.array(data.split(None, size)[:size], dtype)\r\n data = data.reshape(shape)\r\n elif self.maxval == 1:\r\n shape[2] = int(math.ceil(self.width / 8))\r\n data = numpy.frombuffer(data, dtype).reshape(shape)\r\n data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]\r\n else:\r\n data = numpy.frombuffer(data, dtype)\r\n data = data[:size * (data.size // size)].reshape(shape)\r\n if data.shape[0] < 2:\r\n data = data.reshape(data.shape[1:])\r\n if data.shape[-1] < 2:\r\n data = data.reshape(data.shape[:-1])\r\n if self.magicnum == b\"P7 332\":\r\n rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)\r\n rgb332 *= [36, 36, 85]\r\n data = numpy.take(rgb332, data, axis=0)\r\n return data", "def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True", "def read_flow(filename):\n with open(filename, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n data = np.fromfile(f, np.float32, count=int(2*w*h))\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h[0], w[0], 2))", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data", "def get_data():\n return np.genfromtxt(FILENAME, delimiter=',', skip_header=1)", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def read_mat_file(filename):\n mat_contents = sio.loadmat(filename)\n image_data = mat_contents['image']['img'][0, 0]\n return image_data", "def __load_raw_data(path: str,\n filename: str):\n filepath = os.path.join(path, filename)\n f = open(filepath)\n data = f.read()\n f.close()\n\n lines = data.split('\\n')\n header = lines[0].split(',')\n lines = lines[1:]\n\n float_data = np.zeros((len(lines), len(header) - 1))\n for i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i, :] = values\n\n return float_data", "def datread(file=None, header=0):\n with open(file, 'r') as fr:\n op = np.array([list(map(float, l.split())) for l in fr.readlines()[header:]])\n return op", "def load(filename):\n return sio.loadmat(filename, appendmat=False, squeeze_me=True)['data']", "def load_data(self):\n return numpy.fromfile(self.data_fname, dtype=numpy.float32)", "def read_raw_data(self):\n # Must be set by the user\n raise Exception(\"not implemented\")", "def load_raw_data(dir, matlab=False):\n\n\tcurrent_dir = os.getcwd() \n\t\n\tos.chdir(dir)\n\t\n\tfile_names = []\n\tdata = {}\n\t\n\t\n\t## For text files\n\tif not matlab:\n\t\tfiles = glob.glob('*.txt')\n\t\t\n\t\tassert len(files) > 0, 'No *.txt files found!'\n\n\t\tif len(glob.glob('*.mat')) > 0:\n\t\t\tprint('WARNING: matlab files also found in directory: \\t%s'%dir)\n\t\t\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\tdata['markers'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('spike') > -1:\n\t\t\t\tdata['spikes'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('shape') > -1:\n\t\t\t\tdata['shape'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\n\n\t## For matlab files\n\t# These matlab files have more useful data than is extracted here.\n\telif matlab:\n\t\tfiles = glob.glob('*.mat')\n\t\t\n\t\tassert len(files) > 0, 'No matlab files found!'\n\t\t\n\t\tif len(glob.glob('*.txt')) > 0:\n\t\t\tprint('WARNING: text files also found in directory: \\t%s' %dir)\n\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\t\n\t\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\t\n\t\t\t\tmark_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tmark_key = mark_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the markers\n\t\t\t\tdata['markers'] = np.array(mark_file['%s/times' %mark_key])\n\t\t\t\tdata['markers'] = np.reshape(data['markers'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\t\n\t\t\t\t# Extract the numerical codes of the markers, which are listed one-to-one\n\t\t\t\t# with the times extracted above. Useful for an integrity check.\n\t\t\t\t# Zero index necessary as marker codes has three empty columns\n\t\t\t\tdata['marker_codes'] = np.array(mark_file['%s/codes' %mark_key][0])\n\t\t\t\tdata['marker_codes'] = np.reshape(data['marker_codes'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\tfile_names.append(f)\n\n\t\t\telif f_name.find('spike') > -1:\n\n\t\t\t\tspike_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tspike_key = spike_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the spikes\n\t\t\t\tdata['spikes'] = np.array(spike_file['%s/times' %spike_key])\n\t\t\t\tdata['spikes'] = np.reshape(data['spikes'], -1) # turn to 1D array, as first axis redundant\n\n\n\t\t\t\t#Extract trace for each spike. First Dim-trace, second-spikes.\n\t\t\t\tspike_traces = np.array(spike_file['%s/values' %spike_key])\n\t\t\t\t\n\t\t\t\t# Calculate Average shape (for all templates, which are coded in '/codes')\n\t\t\t\tavg_spike_trace = np.mean(spike_traces, axis=1)\n\t\t\t\tsem_avg_spike_trace = stats.sem(spike_traces, axis=1, ddof=1)\n\t\t\t\t\n\t\t\t\tdata['shape'] = avg_spike_trace\n\t\t\t\tdata['shape_SEM'] = sem_avg_spike_trace\n\t\t\t\tfile_names.append(f) \n\t\t\t\t\n\t\t\t\t\t\t\n\tos.chdir(current_dir)\n\n\t\t\t\n\tif len(data.keys()) != len(files):\n\t\tmesg = 'Not all of your file names are recognised; they may not have been imported appropriately.'\n\t\tmesg2 = 'File names must contain the key words \"mark\", \"spike\" and/or \"shape.\"'\n\t\tprint(mesg)\n\t\tprint(mesg2)\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data\n\n\t\n\telif len(data.keys()) == len(files):\n\t\tprint('All files imported and assigned')\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data", "def edf_read(file_name, verbose=False):\n header_values = edf_info(file_name, verbose=verbose)\n f = open(file_name, 'r')\n data_type = esrf_to_numpy_datatype(header_values['DataType'])\n if verbose:\n print(header_values['DataType'], data_type)\n # get the payload size\n payload_size = int(header_values['Size'].split('.')[0])\n # get the image size from the ascii header\n dim_1 = int(header_values['Dim_1'].split('.')[0])\n try:\n dim_2 = int(header_values['Dim_2'].split('.')[0])\n except KeyError:\n if verbose:\n print('Dim_2 not defined in header')\n dim_2 = None\n try:\n dim_3 = int(header_values['Dim_3'].split('.')[0])\n except KeyError:\n if verbose:\n print('Dim_3 not defined in header')\n dim_3 = None\n # now read binary data\n header_size = os.path.getsize(file_name) - payload_size\n f.seek(header_size)\n payload = np.fromfile(f, dtype=data_type)\n if dim_1 and dim_2 and dim_3:\n data = np.reshape(payload, (dim_3, dim_2, dim_1)).transpose(2, 1, 0)\n elif dim_1 and dim_2:\n data = np.reshape(payload, (dim_2, dim_1)).transpose(1, 0)\n else:\n data = np.reshape(payload, (dim_1))\n f.close()\n # pay attention to byte order\n if header_values['ByteOrder'] == 'HighByteFirst':\n data = data.byteswap()\n return data", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label", "def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)", "def read_raw(self):\n return self._FITS.read_raw()", "def _process_data_file(self):\n \n with open(self.data_file, 'r') as f:\n self.description = f.readline().strip()\n data = np.loadtxt(self.data_file, skiprows=1)\n\n return data", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label", "def load_raw(fname):\n # Read all the data from the file\n ctd = []\n with open(fname) as ctdfile:\n \n for line in ctdfile:\n \n if (line.find('*') < 0) and (line.find('#') < 0):\n \n # This line contains data; parse the line\n entries = line.strip().split()\n # Convert data to float64\n entries = [np.float64(entries[i]) \n for i in range(len(entries))]\n # Append to list\n ctd.append(entries)\n \n # Return the raw data as an numpy array\n return np.array(ctd)", "def load_signal_raw(filepath):\n signal_raw = sio.loadmat(filepath)['signal_raw']\n return signal_raw", "def readfile(file, sub_im, cr):\n\n root, ext = os.path.splitext(file)\n\n if ext == '.tif':\n print('Reading tiff image:', file)\n par = readpar(root + '.mli.par')\n data = readtiff(file, sub_im, cr)\n\n else: # must be GAMMA flat binary float format\n print('Reading flat binary image', file)\n par = readpar(root + ext + '.par')\n data = readmli(file, par, sub_im, cr)\n\n # extract relevant metadata\n rho_r = float(par['range_pixel_spacing'].split()[0])\n rho_a = float(par['azimuth_pixel_spacing'].split()[0])\n theta = float(par['incidence_angle'].split()[0])\n\n return data, rho_r, rho_a, theta", "def parse_data(fp):\n pass", "def __readData(self, f, nRows, nCols):\n # Efficiently allocate all the memory we'll need.\n data = numpy.empty( (nCols, nRows), float )\n\n # Import data from the LFM Solar Wind file\n rowIndex = 0\n for row in f.readlines():\n if len(row.split()) != nCols: continue\n\n for col, field in enumerate(row.split()):\n data[col, rowIndex] = field\n\n rowIndex += 1\n\n # Bad things can happen if the file header says there is more\n # (or less) data than there actually is within the file!\n assert(rowIndex == nRows)\n\n return data", "def read_single_analysis_data(f):\n \n data = np.loadtxt(f, dtype=np.float64)\n\n return data", "def readData(self):\n self._readHeader()\n self._readSize()\n self._readComments()\n self._readAllROI()\n self._readDate()\n self._readArray()", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def read_file(self, fullname):\n\n data = np.genfromtxt(fullname, dtype=None, names=True, skip_header=0)\n return data", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def loadtext(infile):\n warr, farr, earr=np.loadtxt(infile, usecols=(0,1,2), unpack=True)\n return create_spectrum(warr, farr, earr)", "def load_dat(file_name):\n data = loadmat(file_name)\n y = data['y']\n X = data['X']\n return X,y", "def read():\n # TODO", "def read_data(self, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n data = []\n with open(path,'r') as f:\n for line in f:\n data.append([float(line[k:(k + length)]) for k in range(\n 0, len(line.strip('\\n')),length)])\n return np.array(data)", "def readData(self, det,lowneu):\n\tf = open(self.file,\"rb\")\n\tfortran.skip(f)\t# Skip header\n\tfor i in range(2*det):\n\t fortran.skip(f)\t# Detector Header & Data\n\tfortran.skip(f)\t\t# Detector Header\n if lowneu:\n fortran.skip(f) # skip low enery neutron data\n\tdata = fortran.read(f)\n\tf.close()\n\treturn data", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def read(self, filename):\n lines = []\n rawData = []\n file = open(filename, \"rU\")\n csv_reader = csv.reader( file )\n for line in csv_reader:\n lines.append(line)\n for item in range(len(line)):\n line[item] = line[item].replace(\" \",\"\")\n self.headers = lines[0]\n self.types = lines[1]\n rawData = lines[2:]\n for row in rawData:\n newRow = []\n for i in range(len(row)):\n if self.types[i] != 'numeric':\n continue\n else:\n newRow.append(float((row[i].strip())))\n self.finalData.append(newRow)\n self.data = np.matrix(self.finalData)\n\n for i in range(len(self.types)):\n if self.types[i] == 'numeric':\n self.numHeadList.append(self.headers[i])\n i = 0\n for header in self.numHeadList:\n self.header2col[header] = i\n i += 1\n\n return self.data", "def rdspecdat(self):\n # TODO : ugh. this is crude. Should have some checks for file format\n # and probably better to use the astropy.io functions now.\n try:\n w, f, e = np.loadtxt(self.filename, unpack=True)\n except:\n w, f = np.loadtxt(self.filename, unpack=True)\n e = []", "def read_data(self):\n raise NotImplementedError", "def read_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']", "def load_data(path):\n data = loadmat(path)\n return data['X'], data['y']", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return", "def read(self):", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def cam_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n return M,N", "def readFlow(fn):\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n return np.resize(data, (int(h), int(w), 2))", "def read_fortran(filename):\n with open(filename, 'rb') as file:\n # read size of record\n file.seek(0)\n n = np.fromfile(file, dtype='int32', count=1)[0]\n\n # read contents of record\n file.seek(4)\n v = np.fromfile(file, dtype='float32')\n\n return v[:-1]", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n\n data, label=data[:,:,:,0:2], label[:,:,:,0]\n #data=np.expand_dims(data,axis=-1)\n label=np.expand_dims(label,axis=-1)\n\n return data, label", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def loadascii(self, filename=None):\n data = np.loadtxt(filename)\n if len(data.shape) == 1:\n self.flux = data\n elif len(data.shape) == 2:\n self.wavelength = data[:,0]\n self.flux = data[:,1]", "def pt3_reader(filename):\n with open(filename, 'rb') as f:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Binary file header\n header_dtype = np.dtype([\n ('Ident', 'S16' ),\n ('FormatVersion', 'S6' ),\n ('CreatorName', 'S18' ),\n ('CreatorVersion', 'S12' ),\n ('FileTime', 'S18' ),\n ('CRLF', 'S2' ),\n ('Comment', 'S256' ),\n ('NumberOfCurves', 'int32' ),\n ('BitsPerRecord', 'int32' ), # bits in each T3 record\n ('RoutingChannels', 'int32' ),\n ('NumberOfBoards', 'int32' ),\n ('ActiveCurve', 'int32' ),\n ('MeasurementMode', 'int32' ),\n ('SubMode', 'int32' ),\n ('RangeNo', 'int32' ),\n ('Offset', 'int32' ),\n ('AcquisitionTime', 'int32' ), # in ms\n ('StopAt', 'uint32'),\n ('StopOnOvfl', 'int32' ),\n ('Restart', 'int32' ),\n ('DispLinLog', 'int32' ),\n ('DispTimeAxisFrom', 'int32' ),\n ('DispTimeAxisTo', 'int32' ),\n ('DispCountAxisFrom', 'int32' ),\n ('DispCountAxisTo', 'int32' ),\n ])\n header = np.fromfile(f, dtype=header_dtype, count=1)\n\n if header['FormatVersion'][0] != b'2.0':\n raise IOError((\"Format '%s' not supported. \"\n \"Only valid format is '2.0'.\") % \\\n header['FormatVersion'][0])\n\n dispcurve_dtype = np.dtype([\n ('DispCurveMapTo', 'int32'),\n ('DispCurveShow', 'int32')])\n dispcurve = np.fromfile(f, dispcurve_dtype, count=8)\n\n params_dtype = np.dtype([\n ('ParamStart', 'f4'),\n ('ParamStep', 'f4'),\n ('ParamEnd', 'f4')])\n params = np.fromfile(f, params_dtype, count=3)\n\n repeat_dtype = np.dtype([\n ('RepeatMode', 'int32'),\n ('RepeatsPerCurve', 'int32'),\n ('RepeatTime', 'int32'),\n ('RepeatWaitTime', 'int32'),\n ('ScriptName', 'S20' )])\n repeatgroup = np.fromfile(f, repeat_dtype, count=1)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Hardware information header\n hw_dtype = np.dtype([\n ('HardwareIdent', 'S16' ),\n ('HardwarePartNo', 'S8' ),\n ('HardwareSerial', 'int32'),\n ('SyncDivider', 'int32'),\n ('CFDZeroCross0', 'int32'),\n ('CFDLevel0', 'int32'),\n ('CFDZeroCross1', 'int32'),\n ('CFDLevel1', 'int32'),\n ('Resolution', 'f4'),\n ('RouterModelCode', 'int32'),\n ('RouterEnabled', 'int32')])\n hardware = np.fromfile(f, hw_dtype, count=1)\n\n rtr_dtype = np.dtype([\n ('InputType', 'int32'),\n ('InputLevel', 'int32'),\n ('InputEdge', 'int32'),\n ('CFDPresent', 'int32'),\n ('CFDLevel', 'int32'),\n ('CFDZCross', 'int32')])\n router = np.fromfile(f, rtr_dtype, count=4)\n\n # Time tagging mode specific header\n ttmode_dtype = np.dtype([\n ('ExtDevices', 'int32' ),\n ('Reserved1', 'int32' ),\n ('Reserved2', 'int32' ),\n ('InpRate0', 'int32' ),\n ('InpRate1', 'int32' ),\n ('StopAfter', 'int32' ),\n ('StopReason', 'int32' ),\n ('nRecords', 'int32' ),\n ('ImgHdrSize', 'int32')])\n ttmode = np.fromfile(f, ttmode_dtype, count=1)\n\n # Special header for imaging. How many of the following ImgHdr\n # array elements are actually present in the file is indicated by\n # ImgHdrSize above.\n ImgHdr = np.fromfile(f, dtype='int32', count=ttmode['ImgHdrSize'][0])\n\n # The remainings are all T3 records\n t3records = np.fromfile(f, dtype='uint32', count=ttmode['nRecords'][0])\n\n timestamps_unit = 1./ttmode['InpRate0']\n nanotimes_unit = 1e-9*hardware['Resolution']\n\n metadata = dict(header=header, dispcurve=dispcurve, params=params,\n repeatgroup=repeatgroup, hardware=hardware,\n router=router, ttmode=ttmode, imghdr=ImgHdr)\n return t3records, timestamps_unit, nanotimes_unit, metadata", "def read(filename):\r\n with open(filename, \"rb\") as f:\r\n data = pic.load(f)\r\n return data", "def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n mag.append(float(line.strip().split(' ')[2]))\n emag.append(line.strip().split(' ')[3])\n fmag.append(line.strip().split(' ')[4])\n unit.append(line.strip().split(' ')[5])\n beam.append(line.strip().split(' ')[6])\n odate.append(line.strip().split(' ')[7])\n ref.append(line.strip().split(' ')[8])\n \n return wvlen, band, mag, emag, fmag, unit, beam, odate, ref", "def read_flow(filename):\n f = open(filename, 'rb')\n magic = np.fromfile(f, np.float32, count=1)\n data2d = None\n\n if 202021.25 != magic:\n print 'Magic number incorrect. Invalid .flo file'\n raise ValueError\n else:\n w = np.fromfile(f, np.int32, count=1)[0]\n h = np.fromfile(f, np.int32, count=1)[0]\n #print \"Reading %d x %d flo file\" % (h, w)\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (h, w, 2))\n f.close()\n return data2d", "def read_array(self, filename):\n extension = filename.split('.')[-1] # Get file extension\n if extension == 'mat':\n array = sci.loadmat(filename)\n elif extension == 'npy':\n array = np.load(filename)\n else:\n print('Error!!! Unrecognised file type for read_array()')\n array = None\n return array", "def read_fortran(filename):\n with open(filename, 'rb') as f:\n # read size of record\n f.seek(0)\n n = np.fromfile(f, dtype='int32', count=1)[0]\n\n # read contents of record\n f.seek(4)\n v = np.fromfile(f, dtype='float32')\n\n return v[:-1]", "def getRawData(fileName):\n jointDict = {1:0,4:1,8:2,16:3,20:4,12:5, 3:6,6:7,10:8,14:9,18:10} #HipCenter, Head, RightHand, RightFoot, LeftFoot, LeftHand. Values just for indexing.\n f = open(fileName, 'r') #Shoulder center, LElbow, RElbow, lKnee, rKnee\n frameNum = getFrameNumber(fileName, 20) #gets number of frames. Assuming 20 lines per joint\n rawData = np.zeros((frameNum, len(jointDict), 3))\n for line in f:\n words = line.split()\n if int(words[1]) in jointDict: #Add new data\n frame = int(words[0])-1 #who starts indexes at 1 ew\n joint = jointDict[int(words[1])]\n x,y,z = words[2:]\n rawData[frame][joint] = float(x),float(y),float(z)\n f.close()\n # Use these if you want to plot 3d data of the joints through all frames. Maybe make scatter to better see noise?\n #fig = plt.figure()\n #ax = fig.add_subplot(111, projection='3d')\n #ax.plot(rawData[:, 1, 0], rawData[:, 1, 1], rawData[:, 1, 2])\n return rawData", "def readdata(filename):\n\tdt = np.dtype([('date','int'),('val','<f8')])\n\tdata = np.loadtxt(filename,dtype = dt,skiprows = 1)\n\treturn data", "def read_data(self, content_path):\n\n if not os.path.basename(content_path).endswith(\".dat\"):\n raise ValueError(\"this content path is not a data file\")\n\n try:\n # read binary data\n data = self._zip_file.read(content_path)\n\n # decode using big-endian integer\n result = []\n for i in range(int(len(data) / 4)):\n result.append(unpack('!i', data[i * 4:(i + 1) * 4]))\n\n # returning integer-encoded raw data vector\n return np.array(result)\n except IOError:\n print(\"can't read data file\")", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def read_raw_data(datafile, dtype, shape, use_mmap=False):\n\n # first check to be sure there is the right number of bytes in the file\n number_of_values = reduce(lambda x, y: x * y, shape)\n expected_number_of_bytes = number_of_values * dtype.itemsize\n actual_number_of_bytes = os.path.getsize(datafile)\n if expected_number_of_bytes != actual_number_of_bytes:\n raise IOError('File `%s` does not have the correct size '\n '(expected %g, found %g)' %\n (datafile,\n expected_number_of_bytes,\n actual_number_of_bytes))\n if use_mmap:\n # print(\"Reading %s using memmap\" % datafile)\n d = np.memmap(datafile, dtype, 'r')\n else:\n # print(\"Reading %s using fromfile\" % datafile)\n d = np.fromfile(datafile, dtype)\n d.shape = shape\n return d", "def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)", "def read_lensum(fname, nbin, shear_style):\n from esutil.recfile import Recfile\n\n dt=get_lensum_dtype(nbin, shear_style)\n\n print(\"reading:\",fname)\n with Recfile(fname, 'r', dtype=dt, delim=' ') as robj:\n data=robj.read()\n\n return data", "def _read_smat(filename):\n return _read_hcore(filename)", "def read(file):\n\n blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',\n 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',\n 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']\n nol = [1, 1, 1, 1, 1, 4, 1,\n 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0]\n rawd = re.compile('rawd\\d\\d')\n\n retval = True\n version = 0\n b = 0 # current block index\n raw = {}\n for item in blocks:\n raw[item] = []\n\n data = []\n mdata = [] # multi-line data\n mline = 0 # line counter for multi-line models\n\n # parse file into raw with to_number conversions\n fid = open(file, 'r')\n for num, line in enumerate(fid.readlines()):\n line = line.strip()\n if num == 0: # get basemva and frequency\n data = line.split('/')[0]\n data = data.split(',')\n\n mva = float(data[1])\n freq = float(data[5])\n version = int(data[2])\n\n if not version:\n version = int(rawd.search(line).group(0).strip('rawd'))\n if version < 32 or version > 33:\n logging.warning('RAW file version is not 32 or 33. Error may occur.')\n continue\n elif num == 1: # store the case info line\n logging.info(line)\n continue\n elif num == 2:\n continue\n elif num >= 3:\n if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block\n b += 1\n continue\n elif line[0] is 'Q': # end of file\n break\n data = line.split(',')\n\n data = [to_number(item) for item in data]\n mdata.append(data)\n mline += 1\n if mline == nol[b]:\n if nol[b] == 1:\n mdata = mdata[0]\n raw[blocks[b]].append(mdata)\n mdata = []\n mline = 0\n fid.close()\n\n # add device elements params and add to PSAT formatted dictionary\n\n for data in raw['bus']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10\n ID, NAME, BasekV, Type, Area Zone Owner Va, Vm, latitude longitude\n \"\"\"\n idx = data[0]\n ty = data[3]\n angle = data[8]\n try:\n lat = data[9]\n except:\n # logging.warning('<No Coordinates in .raw file>')\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5]]\n else:\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n 'latitude': data[9],\n 'longitude': data[10]\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5], data[9], data[10]]\n Settings.Bus.append(psatlist)\n Settings.BusNames.append(data[1])\n # Add BusSTORE Dictionary For Later Reference\n Settings.BusStore[idx] = param\n\n xcoord = [34.560040, 34.938385, 34.360040, 40.5152473, 40.3142473, 36.527401, 36.857401, 36.687401, 36.856401,\n 40.487041, 36.903901, 36.702901, 35.832561, 33.386047, 33.185047, 37.105571, 37.104154, 33.706718,\n 37.103549, 36.703539, 37.103559, 36.703549, 36.033561, 35.631561, 36.032561, 35.732561, 36.525401,\n 36.857401, 49.869314, 50.969314, 51.979314, 52.481674, 54.973192, 56.276212, 41.734596, 34.551015,\n 34.652015, 34.537507, 34.587507, 34.157904, 33.714453, 33.762453, 39.548160, 39.496160, 34.313143,\n 34.545782, 34.380686, 34.111686, 34.137762, 34.118650, 34.158650, 33.918650, 33.718650, 34.018650,\n 34.018650, 34.018650, 34.018650, 34.018650, 34.312456, 34.315456, 34.243600, 34.566258, 34.565258,\n 46.064672, 46.565672, 45.514571, 45.606833, 45.806833, 44.890000, 45.596416, 45.295416, 45.891161,\n 47.954899, 46.511440, 45.913936, 45.713936, 46.669335, 47.954899, 47.624154, 43.784730, 44.482350,\n 42.006860, 42.934919, 42.731919, 43.013135, 44.068350, 43.558350, 42.438350, 42.938350, 44.068350,\n 43.558350, 43.048350, 42.638350, 44.068350, 43.558350, 43.048350, 42.638350, 43.620189, 39.120428,\n 40.398031, 35.216200, 35.215200, 36.202099, 39.777745, 39.539598, 37.052929, 35.403217, 35.352217,\n 36.807243, 39.567450, 40.807689, 40.806689, 41.008689, 39.555494, 37.954721, 38.406721, 38.906721,\n 38.656721]\n ycoord = [-109.277313, -110.303798, -109.777313, -107.546455, -107.546455, -108.325669, -108.654569, -108.486669,\n -108.325669, -107.185575, -111.390408, -111.390408, -111.448566, -112.860397, -112.659397, -108.243555,\n -108.441191, -112.322033, -111.590816, -111.190816, -111.190816, -111.590806, -111.648566, -111.248566,\n -111.249566, -111.647566, -108.655669, -108.323669, -122.150895, -122.150895, -122.150895, -121.61684,\n -121.924221, -122.21370, -108.790427, -117.568105, -117.538105, -118.607375, -118.658375, -118.280282,\n -118.146319, -118.096319, -112.52797, -112.72797, -118.690631, -118.389938, -118.478496, -118.478496,\n -118.299917, -118.095428, -118.095428, -118.095428, -118.095428, -118.195428, -118.395428, -117.995428,\n -117.795428, -117.995428, -118.481217, -118.891217, -118.391667, -117.166428, -117.368428, -106.60906,\n -106.80906, -122.681289, -121.114785, -122.113785, -123.29000, -121.312202, -121.114202, -106.612578,\n -118.997945, -112.88531, -120.692286, -120.693974, -119.571501, -120.997945, -122.219492, -118.77463,\n -121.019484, -121.316546, -114.419206, -114.419206, -120.956476, -120.79484, -120.93484, -121.216546,\n -121.156546, -121.215484, -121.135484, -121.255484, -121.175484, -121.013484, -120.733484, -121.053484,\n -120.973484, -118.865882, -122.073631, -122.263453, -120.847567, -120.900567, -120.129849, -122.142965,\n -122.262993, -121.021929, -119.450452, -119.450452, -121.779037, -122.276225, -122.135718, -121.935718,\n -121.935718, -121.24000, -121.18379, -121.10879, -121.27379, -121.23979]\n\n #for idx, line in enumerate(Settings.Bus):\n # line.extend([xcoord[idx], ycoord[idx]])\n\n maxV = 1.1\n minV = 0.9\n maxQ = 1\n minQ = 0\n convimp = 0\n status = 1\n loss = 1\n\n for data in raw['load']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER\n \"\"\"\n\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n voltage = Settings.BusStore[busidx]['voltage']\n param = {'bus': busidx,\n 'Vn': vn,\n 'Sn': mva,\n 'p': (data[5] + data[7] * voltage + data[9] * voltage ** 2) / mva,\n 'q': (data[6] + data[8] * voltage - data[10] * voltage ** 2) / mva,\n 'owner': data[11],\n 'type': Settings.BusStore[busidx]['type'],\n 'voltage': voltage\n }\n\n psatlist = [busidx, mva, vn, param['p'], param['q'], maxV, minV, convimp, status]\n Settings.PQ.append(psatlist)\n \"\"\"CONFIRM THAT OTHER BUSES HAVE 0 P and 0 Q which are not added\"\"\"\n\n for data in raw['fshunt']:\n \"\"\"\n 0, 1, 2, 3, 4\n Bus, name, Status, g (MW), b (Mvar)\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n param = {'bus': busidx,\n 'Vn': vn,\n 'status': data[2],\n 'Sn': mva,\n 'g': data[3] / mva,\n 'b': data[4] / mva,\n }\n\n psatlist = [busidx, mva, vn, freq, param['g'], param['b'], param['status']]\n Settings.Shunt.append(psatlist)\n\n gen_idx = 0\n type = 6\n\n for data in raw['gen']:\n \"\"\"\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19\n I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n gen_mva = data[8]\n gen_idx += 1\n status = data[14]\n leak = 0\n param = {'Sn': gen_mva,\n 'Vn': vn,\n 'u': status,\n 'idx': gen_idx,\n 'bus': busidx,\n 'pg': status * data[2] / mva,\n 'qg': status * data[3] / mva,\n 'qmax': data[4] / mva,\n 'qmin': data[5] / mva,\n 'v0': data[6],\n 'ra': data[9], # ra armature resistance\n 'xs': data[10], # xs synchronous reactance\n 'pmax': data[16] / mva,\n 'pmin': data[17] / mva,\n }\n\n if Settings.BusStore[busidx]['type'] == 3: #Check Bus Type for Slack\n refangle = 0\n refBus = 1\n PGuess = 1\n swlist = [busidx, gen_mva, vn, param['v0'], refangle, param['qmax'], param['qmin'],\n maxV, minV, PGuess, loss, refBus, status]\n SW = swlist\n Settings.SW.append(swlist)\n Settings.SWStore[busidx] = param\n Settings.SynStore[busidx] = param\n continue\n\n if busidx not in Settings.BusStore.keys():\n \"\"\" Need data from .dyr file. Create initial list, then append data from .dyr\"\"\"\n else:\n # psatlist = [busidx, gen_mva, vn, freq, type, leak, param['ra'],param['xs']]\n # Syn.append(psatlist)\n Settings.SynStore[busidx] = param\n pvlist = [busidx, gen_mva, vn, param['pg'], Settings.BusStore[busidx]['voltage'],\n param['qmax'], param['qmin'], maxV, minV, loss, status]\n Settings.PV.append(pvlist)\n\n\n for data in raw['branch']:\n \"\"\"\n I,J,ID,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4\n \"\"\"\n param = {'bus1': data[0],\n 'bus2': data[1],\n 'id' : data[2],\n 'r': data[3],\n 'x': data[4],\n 'b': data[5],\n 'rate_a': data[6],\n 'rate_b': data[7],\n 'rate_c': data[8],\n 'Vn': Settings.BusStore[data[0]]['Vn'],\n 'Vn2': Settings.BusStore[data[1]]['Vn'],\n 'length': data[14],\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n 'status': data[13]\n }\n\n psatlist = [param['bus1'], param['bus2'], param['rate_c'], param['Vn'], freq, EMPTY,\n param['length'], param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['status']]\n Settings.Lineij.append([data[0], data[1], data[2]])\n Settings.Lineji.append([data[1], data[0], data[2]])\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.branches += 1\n Settings.linecount += 1\n Settings.LineBusMatij[param['bus2']].append(Settings.branches)\n Settings.LineBusMatji[param['bus1']].append(Settings.branches)\n\n for data in raw['transf']:\n \"\"\"\n I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4\n R1-2,X1-2,SBASE1-2\n WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1\n WINDV2,NOMV2\n \"\"\"\n if len(data[1]) < 5:\n ty = 2\n else:\n ty = 3\n if ty == 3:\n continue\n # raise NotImplementedError('Three-winding transformer not implemented')\n\n tap = data[2][0]\n phi = data[2][2]\n\n if tap == 1 and phi == 0:\n trasf = False\n else:\n trasf = True\n param = {'trasf': trasf,\n 'bus1': data[0][0],\n 'bus2': data[0][1],\n 'u': data[0][11],\n 'b': data[0][8],\n 'r': data[1][0],\n 'x': data[1][1],\n 'tap': tap,\n 'phi': phi,\n 'rate_a': data[2][3],\n 'Vn': Settings.BusStore[busidx]['Vn'],\n 'Vn2': Settings.BusStore[busidx]['Vn'],\n # 'length': data[?][?], FIND CORRECT INDEX\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n }\n psatlist = [param['bus1'], param['bus2'], param['rate_a'], param['Vn'], freq, EMPTY,\n EMPTY, param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['u']]\n\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.linecount += 1\n Settings.transformers += 1\n # ADD Line Data(All Branch Types) to Sys Param Dict after .dyr Transformer Data Added\n # Re-Order Line Data for correct sequence\n for key in Settings.LineOrd:\n for item in Settings.LineOrd[key]:\n Settings.Line.append(item)\n\n for data in raw['area']:\n Settings.Areas.append(data[4])\n\n for data in raw['zone']:\n Settings.Regions.append(data[1])\n\n return retval", "def get_data(datapath, asfile=False):\n import os\n\n ## The file is a local file - try to get it\n if not os.path.isfile(datapath) :\n print \"The file %s you are trying to access does not exist\" %(datapath)\n raise IOError\n fn = datapath\n if asfile:\n return open(fn)\n else:\n import numpy as np\n return np.loadtxt(fn)", "def read(normalize=False):\n fname = join(dirname(abspath(__file__)), 'datasets', 'Medulloblastoma', 'Medulloblastoma_data.txt')\n V = np.loadtxt(fname)\n if normalize:\n V = (V - V.min()) / (V.max() - V.min())\n return V", "def _read_libffm_file(self, filename):\n\n X_true = np.zeros((self.num_rows, self.num_features))\n y_true = np.zeros((self.num_rows, 1))\n field_true = np.zeros((self.num_features, 1))\n with open(filename, 'r') as f:\n i = 0\n for line in f:\n tmp_row = line.replace('\\n', '').split(' ')\n\n # extract label\n y_true[i] = int(tmp_row[0])\n\n # extract data and fields\n for k in range(1, len(tmp_row)):\n if len(tmp_row[k]) > 0:\n tmp_str = tmp_row[k].split(':')\n j = int(tmp_str[1])\n field_true[j] = int(tmp_str[0])\n tmp_data = float(tmp_str[2])\n X_true[i, j] = tmp_data\n i = i + 1\n\n return X_true, y_true, field_true", "def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes", "def raw_data(self):\n return self.tif_file.raw_data()" ]
[ "0.72182655", "0.7005095", "0.6830073", "0.67079604", "0.6699259", "0.66830707", "0.66713506", "0.65715826", "0.6527353", "0.6522386", "0.64666754", "0.6460394", "0.6455985", "0.6454096", "0.6370719", "0.6363364", "0.6317326", "0.6306467", "0.6299659", "0.6298647", "0.62925696", "0.62722564", "0.62655884", "0.62628096", "0.62614614", "0.62599945", "0.62562096", "0.62460375", "0.62327677", "0.6229304", "0.62249035", "0.6214677", "0.62107956", "0.6209627", "0.62002784", "0.6197968", "0.6191262", "0.6182198", "0.617973", "0.61789554", "0.6167007", "0.6162626", "0.61519355", "0.61486197", "0.6144949", "0.61361206", "0.6134219", "0.6128125", "0.6120085", "0.6118516", "0.61150616", "0.6113474", "0.6107429", "0.61005414", "0.60966516", "0.60922325", "0.60905266", "0.60820955", "0.6080704", "0.60645986", "0.60642403", "0.604327", "0.602123", "0.601663", "0.60140675", "0.60016453", "0.6001618", "0.5992484", "0.5992484", "0.5992322", "0.5990754", "0.5990498", "0.5989064", "0.59880865", "0.598679", "0.598232", "0.5981764", "0.5976688", "0.59760565", "0.5975541", "0.59703666", "0.59648633", "0.59644836", "0.5961244", "0.5954751", "0.59446084", "0.59343946", "0.59336835", "0.5929501", "0.5920339", "0.5910049", "0.5909137", "0.5907878", "0.5904682", "0.5900192", "0.5892295", "0.5892138", "0.5886292", "0.5883898", "0.5880751" ]
0.61785805
40
Gets a plane from 3 points
def plane_equation(p1, p2, p3): a1 = p2[0] - p1[0] b1 = p2[1] - p1[1] c1 = p2[2] - p1[2] a2 = p3[0] - p1[0] b2 = p3[1] - p1[1] c2 = p3[2] - p1[2] a = b1 * c2 - b2 * c1 b = a2 * c1 - a1 * c2 c = a1 * b2 - b1 * a2 # Points are collinear if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6): return None # All clear d = (- a * p1[0] - b * p1[1] - c * p1[2]) return a, b, c, d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_3d_points_to_plane(points, p1, p2 ,p3, numpoints):\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # get vectors in plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # compute cross product\n cp = np.cross(v1, v2)\n a, b, c = cp # normal to plane is ax + by + cz\n\n # evaluate d\n d = np.dot(cp, p3)\n\n # thus, normal is given by\n plane = vtk.vtkPlane()\n origin = p1\n normal = normalize(np.array([a,b,c]))\n plane.SetOrigin(p1)\n plane.SetNormal(normal)\n\n if numpoints == 1:\n proj = [0,0,0]\n plane.ProjectPoint(points, origin, normal, proj)\n return proj\n else:\n projected_pts = np.zeros((numpoints, 3), dtype=float)\n\n for i in range(numpoints):\n proj = [0,0,0]\n plane.ProjectPoint(points[i], origin, normal, proj)\n projected_pts[i] = proj\n\n return projected_pts", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def plane_from_multiple_points(pnts: Iterable[Point]) -> Plane:\n n = len(pnts)\n x = [pnt.x for pnt in pnts]\n y = [pnt.y for pnt in pnts]\n z = [pnt.z for pnt in pnts]\n pntc = Point(sum(x)/n, sum(y)/n, sum(z)/n)\n x = [pnt.x-pntc.x for pnt in pnts]\n y = [pnt.y-pntc.y for pnt in pnts]\n z = [pnt.z-pntc.z for pnt in pnts]\n sxx = sum([x[i]**2 for i in range(n)])\n sxy = sum([x[i]*y[i] for i in range(n)])\n sxz = sum([x[i]*z[i] for i in range(n)])\n syy = sum([y[i]**2 for i in range(n)])\n syz = sum([y[i]*z[i] for i in range(n)])\n d = sxx*syy-sxy**2\n a = (syz*sxy-sxz*syy)/d\n b = (sxy*sxz-sxx*syz)/d\n nrm = Vector(a, b, 1.0)\n return Plane(pntc, nrm)", "def plane(self):\n return plane(self.N, self.o)", "def from_3p(cls, a: Vector, b: Vector, c: Vector) -> 'Plane':\n n = (b - a).cross(c - a).normalize()\n return Plane(n, n.dot(a))", "def plane_from_points(a, b, c):\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = normalize_vector(cross_vectors(ab, ac))\n return a, n", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def get_plane_of_points(\n self,\n normal_vector=\"z\",\n planar_coordinate=None,\n ):\n # Get results vectors\n if (normal_vector == \"z\"):\n x_flat = self.floris.grid.x_sorted_inertial_frame[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted_inertial_frame[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted_inertial_frame[0, 0].flatten()\n else:\n x_flat = self.floris.grid.x_sorted[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted[0, 0].flatten()\n u_flat = self.floris.flow_field.u_sorted[0, 0].flatten()\n v_flat = self.floris.flow_field.v_sorted[0, 0].flatten()\n w_flat = self.floris.flow_field.w_sorted[0, 0].flatten()\n\n # Create a df of these\n if normal_vector == \"z\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": y_flat,\n \"x3\": z_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"x\":\n df = pd.DataFrame(\n {\n \"x1\": y_flat,\n \"x2\": z_flat,\n \"x3\": x_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"y\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": z_flat,\n \"x3\": y_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n\n # Subset to plane\n # TODO: Seems sloppy as need more than one plane in the z-direction for GCH\n if planar_coordinate is not None:\n df = df[np.isclose(df.x3, planar_coordinate)] # , atol=0.1, rtol=0.0)]\n\n # Drop duplicates\n # TODO is this still needed now that we setup a grid for just this plane?\n df = df.drop_duplicates()\n\n # Sort values of df to make sure plotting is acceptable\n df = df.sort_values([\"x2\", \"x1\"]).reset_index(drop=True)\n\n return df", "def project_points_plane(points, plane):\n return [project_point_plane(point, plane) for point in points]", "def find_plane_eq(p1, p2, p3):\n\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # the cross product is a vector normal to the plane\n cp = np.cross(v1, v2)\n a, b, c = cp\n\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = np.dot(cp, p3)\n\n plane_eq = np.array([a, b, c, d])\n\n return plane_eq", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def xyz2plane(x,y,z, new_x=[], plane=[], origin=None):\n # preliminary stuff\n if origin != None: x = x - origin\n a,b,c,d = plane\n bottom = np.sqrt(a*a + b*b + c*c) # normalize\n a,b,c,d = a/bottom, b/bottom, c/bottom, d/bottom\n px, py, pz = new_x\n bot = np.sqrt(px*px + py*py + pz*pz) #normalize\n px, py, pz = px/bot, py/bot, pz/bot\n p0 = [px,py,pz]\n # do rotation\n z_hat = [a,b,c]\n y_hat = cross(z_hat, p0)\n x_hat = cross(y_hat, z_hat)\n if type(x)==type(arr) or type(x)==type([]):\n xp, yp, zp = [], [], []\n for i in range(len(x)):\n xp.append(dot([x[i],y[i],z[i]], x_hat))\n yp.append(dot([x[i],y[i],z[i]], y_hat))\n zp.append(dot([x[i],y[i],z[i]], z_hat))\n else:\n xp = dot([x,y,z], x_hat)\n yp = dot([x,y,z], y_hat)\n zp = dot([x,y,z], z_hat)\n return xp, yp, zp", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def hyperplane(self):\n origin = (self.a+self.b+self.c)/3.\n normal = np.cross(self.a-self.b, self.a-self.c)\n return Hyperplane(origin, normal)", "def createThreePoints(cls, x1, y1, z1, x2, y2, z2, x3, y3, z3):\n d = np.array([x2 - x1, y2 - y1, z2 - z1])\n p0 = np.array([x1, y1, z1])\n return cls(p0, d)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def point_and_plane_pose(plane_point, plane_orientation, points=None, xyz=None):\n vector = plane_orientation\n vector = vector / np.linalg.norm(vector)\n a = vector[0]\n b = vector[1]\n c = vector[2]\n\n d = -a * plane_point[0] - b * plane_point[1] - c * plane_point[2]\n\n if xyz is not None:\n xyz = np.asarray(xyz)\n if points.shape[0] != 3:\n logger.error(\n \"Wrong points shape. [3, N] expected, \" + str(points.shape) + \" given.\"\n )\n elif points is not None:\n points = np.asarray(points)\n if points.shape[1] != 3:\n logger.error(\n \"Wrong points shape. [N, 3] expected, \" + str(points.shape) + \" given.\"\n )\n xyz = points.T\n else:\n logger.error(\"points or xyz must be declared\")\n\n x, y, z = xyz\n z_out = (a * x + b * y + c * z + d) / (a ** 2 + b ** 2 + c ** 2) ** 0.5\n\n return z_out", "def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def test_CoordinatePlane(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n plane = shapes_nd.Plane(origin, normal)\n cplane = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n \n np.testing.assert_almost_equal(cplane.dim, plane.dim)\n np.testing.assert_almost_equal(cplane.origin, plane.origin)\n np.testing.assert_almost_equal(cplane.normal, plane.normal)\n \n p3 = [0, 1, 0]\n c, d = cplane.project_point(p3, ret_dist=True)\n np.testing.assert_almost_equal(p3, cplane.revert_projection(c, d))\n p3 = np.random.randn(5, 3)\n c, d = cplane.project_point(p3, ret_dist=True)\n np.testing.assert_almost_equal(p3, cplane.revert_projection(c, d))", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def _fit_plane_to_point_cloud(\n points_xyz: NDArrayFloat,\n) -> Tuple[float, float, float, float]:\n center_xyz: NDArrayFloat = np.mean(points_xyz, axis=0)\n out: Tuple[NDArrayFloat, NDArrayFloat, NDArrayFloat] = np.linalg.svd(\n points_xyz - center_xyz\n )\n vh = out[2]\n\n # Get the unitary normal vector\n a, b, c = float(vh[2, 0]), float(vh[2, 1]), float(vh[2, 2])\n d: float = -np.dot([a, b, c], center_xyz)\n return (a, b, c, d)", "def get_plane(self, quantity, plane, pval):\n\n self.log.info('Retrieving plane for %s', quantity)\n scalar = self.get_scalar_quantity(quantity)\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]", "def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')", "def GetPlane(plane):\r\n pass", "def rotate_to_xz_plane(self, point):\n if len(point) == 2:\n x, z = point\n else:\n x, y, z = point\n if y != 0.0:\n x = np.sqrt(x**2 + y**2)\n return abs(x), z", "def function_3d(point):\n return point[0]**2 + point[1]**2 + point[2]**2 - 1", "def get_plane(self, scalar, plane, pval):\n\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]", "def vector_3D(pt1, pt2, t):\n x1, y1, z1 = pt1\n x2, y2, z2 = pt2\n \n modulus = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2)\n \n x = x1 + (x2 - x1) / modulus * t\n y = y1 + (y2 - y1) / modulus * t\n z = z1 + (z2 - z1) / modulus * t\n \n return [x, y, z]", "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "def triangle_plane_intersection(self,p0,p1,p2,point,normal):\n\t\ttol=0.00001\n\t\n\t\t# handle all of the stupid cases before we do costly math\n\t\n\t\t#basic stuff\n\t\tp0dp=numpy.dot(p0-point,normal)\n\t\tp1dp=numpy.dot(p1-point,normal)\n\t\tp2dp=numpy.dot(p2-point,normal)\n\t\tp0ip=numpy.abs(p0dp)<tol # p0 in-plane\n\t\tp1ip=numpy.abs(p1dp)<tol # p1 in-plane\n\t\tp2ip=numpy.abs(p2dp)<tol # p02in-plane\n\n\t\t# are all vertices of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(p2ip): # yes, triangle is in the plane\n\t\t\treturn [p0,p1,p2]\n\t\n\t\t# are all vertices of the triangle on the same side?\n\t\tif (not(p0ip))&(not(p1ip))&(not(p2ip))&(numpy.sign(p0dp)==numpy.sign(p1dp))&(numpy.sign(p0dp)==numpy.sign(p2dp)): # yup, they are all on the same side\n\t\t\treturn []\n\t\n\t\t# is one vertex in the plane?\n\t\tif (p0ip)&(not(p1ip))&(not(p2ip)): #just p0 in plane\n\t\t\treturn [p0]\n\t\telif (not(p0ip))&(p1ip)&(not(p2ip)): #just p1 in plane\n\t\t\treturn [p1]\n\t\telif (not(p0ip))&(not(p1ip))&(p2ip): #just p2 in plane\n\t\t\treturn [p2]\n\t\n\t\t# is one line of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(not(p2ip)): #L1 in plane\n\t\t\treturn [p0,p1]\n\t\telif (not(p0ip))&(p1ip)&(p2ip): #L2 in plane\n\t\t\treturn [p1,p2]\n\t\telif (p0ip)&(not(p1ip))&(p2ip): #L3 in plane\n\t\t\treturn [p0,p2]\n\t\n\t\t# if we have gotten this far, we have to actually calculate intersections\n\t\tif numpy.sign(p0dp)==numpy.sign(p1dp):\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l2b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l2i,l3i]\n\t\telif numpy.sign(p2dp)==numpy.sign(p1dp):\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l1b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l3i]\n\t\telse:\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tif (l1b)&(l2b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l2i]\n\t\n\t\t# If the function makes it this far, I have no idea what is going on.\n\t\treturn \"bananna pants\"", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def mirror_point_to_plane(point, plane):\n assert isinstance(plane, cg3d_plane.CGPlane)\n pn, norm = plane.get_point_and_normal()\n norm.normalize()\n return point - 2.0 * ((point - pn) * norm) * norm", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def create_plane(self):\n\n # First we calculate our point increment for both the x and y values\n inc_x = (self.xmax - self.xmin)/(self.xlen - 1)\n inc_y = (self.ymax - self.ymin)/(self.ylen - 1)\n\n # This for-loop will add every x-value with every y-value, saving the values column wise\n # i.e. (-10,-10), (-10,-9), (-10.-8),...,(-10,n) for n = our y-values.\n # store these combinations into a list, and add that to our plane. \n # The nested loop will then traverse again and will get the combinations for the next x-value.\n # The loop will continue until all x-values and y-value combinations are added to our plane.\n for y in range(0, self.ylen + 1):\n temp_list = []\n for x in range(0, self.xlen + 1):\n temp_list.append(self.f((self.xmin + x*inc_x) + (self.ymin + y*inc_y)*1j))\n self.plane.append(temp_list)", "def fit_to_plane(pts):\n # Compute x_mean, y_mean, z_mean\n \n n = len(pts)\n \n x_total = 0\n y_total = 0\n z_total = 0\n\n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_mean = x_total * 1.0 / n\n y_mean = y_total * 1.0 / n\n z_mean = z_total * 1.0 / n\n\n # Compute the p[i] = [x[i]-x_mean,y[i]-y.mean,z[i]-z.mean]\n p = []\n for i in range(n):\n p1 = pts[i][0] - x_mean\n p2 = pts[i][1] - y_mean\n p3 = pts[i][2] - z_mean\n p.append([p1, p2, p3])\n \n # Compute the matrix A\n a1 = 0\n a2 = 0\n a3 = 0\n a4 = 0\n a5 = 0\n a6 = 0\n for i in range(n):\n a1 += p[i][0] * p[i][0]\n a2 += p[i][0] * p[i][1]\n a3 += p[i][0] * p[i][2]\n a4 += p[i][1] * p[i][1]\n a5 += p[i][1] * p[i][2]\n a6 += p[i][2] * p[i][2]\n\n A = np.array([[a1, a2, a3], [a2, a4, a5], [a3, a5, a6]])\n\n # Compute the smallest eigen value and accordingly eigen vector of A\n w, v = np.linalg.eigh(A)\n\n # The minimal eigenvalue is w[0]\n eig = w[0]\n\n # The norm is eigenvector v[:,0]\n norm = v[:,0].tolist()\n d = -norm[0] * x_mean - norm[1] * y_mean - norm[2] * z_mean\n\n return norm, d", "def fit_to_plane(pts):\n # Compute x_mean, y_mean, z_mean\n \n n = len(pts)\n \n x_total = 0\n y_total = 0\n z_total = 0\n\n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_mean = x_total * 1.0 / n\n y_mean = y_total * 1.0 / n\n z_mean = z_total * 1.0 / n\n\n # Compute the p[i] = [x[i]-x_mean,y[i]-y.mean,z[i]-z.mean]\n p = []\n for i in range(n):\n p1 = pts[i][0] - x_mean\n p2 = pts[i][1] - y_mean\n p3 = pts[i][2] - z_mean\n p.append([p1, p2, p3])\n \n # Compute the matrix A\n a1 = 0\n a2 = 0\n a3 = 0\n a4 = 0\n a5 = 0\n a6 = 0\n for i in range(n):\n a1 += p[i][0] * p[i][0]\n a2 += p[i][0] * p[i][1]\n a3 += p[i][0] * p[i][2]\n a4 += p[i][1] * p[i][1]\n a5 += p[i][1] * p[i][2]\n a6 += p[i][2] * p[i][2]\n\n A = np.array([[a1, a2, a3], [a2, a4, a5], [a3, a5, a6]])\n\n # Compute the smallest eigen value and accordingly eigen vector of A\n w, v = np.linalg.eigh(A)\n\n # The minimal eigenvalue is w[0]\n eig = w[0]\n\n # The norm is eigenvector v[:,0]\n norm = v[:,0].tolist()\n d = -norm[0] * x_mean - norm[1] * y_mean - norm[2] * z_mean\n\n return norm, d", "def three_d_vector_plane_intersection(point_a, point_b, point_c, point_d, point_e):\n a = np.array(point_a)\n b = np.array(point_b)\n c = np.array(point_c)\n nv = plane_equation(point_c, point_d, point_e)\n t = (nv[0] * c[0] + nv[1] * c[1] + nv[2] * c[2] - nv[0] * a[0] - nv[1] * a[1] - nv[2] * a[2]) / \\\n (nv[0] * (b[0] - a[0]) + nv[1] * (b[1] - a[1]) + nv[2] * (b[2]-a[2]))\n x = a[0] + t * (b[0] - a[0])\n y = a[1] + t * (b[1] - a[1])\n z = a[2] + t * (b[2] - a[2])\n intersection = np.array([x, y, z])\n return intersection", "def fit_plane_to_point_cloud(pc: np.ndarray) -> Tuple[Any, Any, Any, Any]:\n center = pc.sum(axis=0) / pc.shape[0]\n u, s, vh = np.linalg.svd(pc - center)\n\n # Get the unitary normal vector\n u_norm = vh[2, :]\n d = -np.dot(u_norm, center)\n a, b, c = u_norm\n return a, b, c, d", "def vector(x, y, z):\n return point_or_vector(x,y,z,0.0)", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def get_transformable_plane(self, x_range = None, y_range = None):\n plane_config = dict(self.plane_config)\n shift_val = ORIGIN\n if x_range is not None:\n x_min, x_max = x_range\n plane_config[\"x_radius\"] = x_max - x_min\n shift_val += (x_max+x_min)*RIGHT/2.\n if y_range is not None:\n y_min, y_max = y_range\n plane_config[\"y_radius\"] = y_max - y_min\n shift_val += (y_max+y_min)*UP/2.\n plane = ComplexPlane(**plane_config)\n plane.shift(shift_val)\n if self.use_multicolored_plane:\n self.paint_plane(plane)\n return plane", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def from_points(cls, point_a: array_like, point_b: array_like, point_c: array_like, **kwargs) -> Plane:\n if Points([point_a, point_b, point_c]).are_collinear(**kwargs):\n raise ValueError(\"The points must not be collinear.\")\n\n vector_ab = Vector.from_points(point_a, point_b)\n vector_ac = Vector.from_points(point_a, point_c)\n\n return Plane.from_vectors(point_a, vector_ab, vector_ac)", "def cartesian(self) -> Tuple[np.number, np.number, np.number, np.number]:\n if self.dimension > 3:\n raise ValueError(\"The plane dimension must be <= 3.\")\n\n # The normal must be 3D to extract the coefficients.\n a, b, c = self.normal.set_dimension(3)\n\n d = -self.normal.dot(self.point)\n\n return a, b, c, d", "def _constructClippingPlane( self, viewProj, positive, axis):\r\n if positive: scale = 1\r\n else: scale = -1\r\n\r\n return Plane(viewProj[0,3] + scale*viewProj[0, axis],\r\n viewProj[1,3] + scale*viewProj[1, axis],\r\n viewProj[2,3] + scale*viewProj[2, axis],\r\n viewProj[3,3] + scale*viewProj[3, axis] )", "def get_plane(dset, xaxis, yaxis, slices, **kw):\n\n # Build quad meshes from sorted grids\n xgrid = dset.dims[xaxis][0][indices[xaxis]]\n ygrid = dset.dims[yaxis][0][indices[yaxis]]\n xorder = np.argsort(xgrid)\n yorder = np.argsort(ygrid)\n xmesh, ymesh = quad_mesh(xgrid[xorder], ygrid[yorder], **kw)\n\n # Select and arrange data\n data = dset[slices]\n if xi < yi:\n data = data.T\n data = data[yorder]\n data = data[:, xorder]\n\n return xmesh, ymesh, data", "def bestfit_plane_from_points(points):\n centroid = centroid_points(points)\n\n xx, xy, xz = 0., 0., 0.\n yy, yz, zz = 0., 0., 0.\n\n for point in points:\n rx, ry, rz = subtract_vectors(point, centroid)\n xx += rx * rx\n xy += rx * ry\n xz += rx * rz\n yy += ry * ry\n yz += ry * rz\n zz += rz * rz\n\n det_x = yy * zz - yz * yz\n det_y = xx * zz - xz * xz\n det_z = xx * yy - xy * xy\n\n det_max = max(det_x, det_y, det_z)\n\n if det_max == det_x:\n a = (xz * yz - xy * zz) / det_x\n b = (xy * yz - xz * yy) / det_x\n normal = (1., a, b)\n elif det_max == det_y:\n a = (yz * xz - xy * zz) / det_y\n b = (xy * xz - yz * xx) / det_y\n normal = (a, 1., b)\n else:\n a = (yz * xy - xz * yy) / det_z\n b = (xz * xy - yz * xx) / det_z\n normal = (a, b, 1.)\n\n return centroid, normalize_vector(normal)", "def invert_point_on_plane(point, plane):\n _, _, proj = project_point_to_plane(point, plane)\n\n u, v = proj[0][1]\n return u, v", "def point(chordwise, spanwise, vertical):\n return Point3D.create(chordwise, spanwise, vertical)", "def vector3(x, y, z):\n return np.array([x, y, z], dtype=np.float)", "def vector3(x, y, z):\n return np.array([x, y, z], dtype=float)", "def point(x, y, z):\n return point_or_vector(x,y,z,1.0)", "def render_vertices_3d(self, **kwds):\n return point3d(self.coordinates_of(self.points), **kwds)", "def getPlane(entry):\n\n \n \n a,b,c = getNewLattice(entry,2)\n a_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,a)\n b_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,b)\n fracs = np.cross(a_vector,b_vector)\n fracs /= min([x for x in fracs if abs(x)>1E-4])\n \n return(fracs)", "def _generate_random_points_in_plane(nvect, dparam, npts, eps=0.0):\n np.random.seed(12345)\n a, b, c = nvect / np.linalg.norm(nvect)\n x, y = np.random.rand(npts), np.random.rand(npts)\n z = (dparam - a * x - b * y) / c\n if eps > 0:\n z += np.random.normal(loc=0., scale=eps, size=npts)\n return np.column_stack((x, y, z))", "def readPoint3( node ):\r\n return Point3( float( node.getAttribute( 'x' ) ),\r\n float( node.getAttribute( 'y' ) ),\r\n float( node.getAttribute( 'z' ) ) )", "def intersection_plane_plane_plane(plane1, plane2, plane3, epsilon=1e-6):\n line = intersection_plane_plane(plane1, plane2, epsilon)\n if not line:\n return None\n pt = intersection_line_plane(line, plane3, epsilon)\n if pt:\n return pt\n return None", "def planeFit(points):\n\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1], points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n\n return ctr, np.linalg.svd(M)[0][:,-1]", "def plane_fit(points):\n import numpy as np\n from numpy.linalg import svd\n\n points = np.reshape(\n points, (np.shape(points)[0], -1)\n ) # Collapse trialing dimensions\n assert (\n points.shape[0] <= points.shape[1]\n ), \"There are only {} points in {} dimensions.\".format(\n points.shape[1], points.shape[0]\n )\n ctr = points.mean(axis=1)\n x = points - ctr[:, np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:, -1]", "def ProjectToPlane(self):\n\n self.__do_essential_memebers_exist__()\n if self.element_type != \"tri\":\n raise ValueError(\"Project to plane is only applicable to triangles\")\n\n imesh = deepcopy(self)\n coordinates = []\n connectivities = []\n for counter, elem in enumerate(imesh.elements):\n\n elementCoordinates = imesh.points[elem,:]\n\n A = elementCoordinates[0,:]\n B = elementCoordinates[1,:]\n C = elementCoordinates[2,:]\n\n X = (B - A); X /= np.linalg.norm(X)\n Z = np.cross(X, C - A); Z /= np.linalg.norm(Z)\n Y = np.cross(Z, X)\n\n # PROJECT THE TRIANGLE TO THIS BASES\n a = [0., 0.]\n b = [np.linalg.norm((B - A)), 0.]\n c = [(C - A).dot(X), (C - A).dot(Y)]\n\n coordinates.append(a)\n coordinates.append(b)\n coordinates.append(c)\n\n elementConnectivity = [3 * counter, 3 * counter + 1, 3 * counter + 2]\n connectivities.append(elementConnectivity)\n\n coordinates = np.array(coordinates)\n connectivities = np.array(connectivities)\n imesh.points = coordinates\n imesh.elements = connectivities\n imesh.nelem = imesh.elements.shape[0]\n imesh.nnode = imesh.points.shape[0]\n\n return imesh", "def planeFit(points):\n import numpy as np\n from numpy.linalg import svd\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1], points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:,-1]", "def plane_fit(points):\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1],\n points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:, np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:, -1]", "def get_pca_o3d(w, v, points):\n\n # calculate centroid & variation along main axis:\n centroid = points.mean()\n projs = np.dot(points.to_numpy(), v[:, 0])\n scale = projs.max() - projs.min()\n\n points = centroid.to_numpy() + np.vstack((np.asarray([0.0, 0.0, 0.0]), scale * v.T)).tolist()\n lines = [[0, 1],[0, 2],[0, 3]]\n # from the largest to the smallest: RGB\n colors = np.identity(3).tolist()\n\n # build pca line set:\n pca_o3d = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n pca_o3d.colors = o3d.utility.Vector3dVector(colors)\n\n return pca_o3d", "def xyplane(draw, r, x, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [x,-extent,0],\n [x,extent,0],\n [x,extent,extent*2],\n [x,-extent,extent*2]\n ]\n )\n pln = np.dot(pln,np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def plane_list(self):\n return self.__plane_list", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def invert_points_on_plane(pnts, plane):\n ptol = Settings.ptol\n vu = plane.vu.ijk\n vv = plane.vv.ijk\n p0 = plane.p0.xyz\n vu2 = dot(vu, vu)\n vv2 = dot(vv, vv)\n\n pnts = array(pnts, dtype=float64)\n npts = pnts.shape[0]\n params = zeros((npts, 2), dtype=float64)\n for i in range(0, npts):\n pi = pnts[i]\n u = dot(pi - p0, vu) / vu2\n v = dot(pi - p0, vv) / vv2\n if abs(u) <= ptol:\n u = 0.\n if abs(v) <= ptol:\n v = 0.\n params[i, :] = [u, v]\n\n return params", "def clip_segment_v3_plane_n(\n p1: np.ndarray, p2: np.ndarray, planes: List[np.ndarray]\n) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:\n dp = p2 - p1\n\n p1_fac = 0.0\n p2_fac = 1.0\n\n for p in planes:\n div = p[:3].dot(dp)\n\n # check if line vector and plane normal are perpendicular\n # if perpendicular, line and plane are parallel\n if div != 0.0:\n # if not perpendicular, find intersection\n t = -plane_point_side_v3(p, p1)\n if div > 0.0: # clip p1 lower bounds\n if t >= div:\n return None, None\n if t > 0.0:\n fac = t / div\n if fac > p1_fac:\n p1_fac = fac\n if p1_fac > p2_fac:\n # intersection occurs outside of segment\n return None, None\n elif div < 0.0: # clip p2 upper bounds\n if t > 0.0:\n return None, None\n if t > div:\n fac = t / div\n if fac < p2_fac:\n p2_fac = fac\n if p1_fac > p2_fac:\n return None, None\n\n p1_clip = p1 + (dp * p1_fac)\n p2_clip = p1 + (dp * p2_fac)\n return p1_clip, p2_clip", "def get_normal_vectors(self, p, x1, y1, z1, x2, y2, z2, x3, y3, z3):\n x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value = self._get_normal_vectors(p, x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value)", "def surfcut_points(**kwargs):\n npoints = kwargs.get( 'npoints', 240 )\n origin = kwargs.get( 'origin', vec3(0.,0.,0.)) \n normal = kwargs.get( 'normal', (np.pi/2., 0.) ) \n lims0 = kwargs.get( 'lims0', (-50., 50.) ) \n lims1 = kwargs.get( 'lims1', (-50., 50.) ) \n extents = kwargs.get( 'extents', None) \n \n if extents is not None:\n lims0 = (-extents, extents)\n lims1 = (-extents, extents)\n \n # Make the unit vectors that define the plane\n unit = vec3()\n th = normal[0]\n ph = normal[1]\n unit.set_spherical( 1, th, ph) \n orth0 = vec3( -1.*np.sin(ph), np.cos(ph), 0. )\n orth1 = cross(unit,orth0)\n \n t0 = np.linspace( lims0[0], lims0[1], npoints )\n t1 = np.linspace( lims1[0], lims1[1], npoints ) \n \n # Obtain points on which function will be evaluated\n T0,T1 = np.meshgrid(t0,t1)\n X = origin[0] + T0*orth0[0] + T1*orth1[0] \n Y = origin[1] + T0*orth0[1] + T1*orth1[1]\n Z = origin[2] + T0*orth0[2] + T1*orth1[2] \n \n\n # If given an axes it will plot the reference surface to help visusalize\n # the surface cut\n \n # Note that the axes needs to be created with a 3d projection. \n # For example: \n # fig = plt.figure( figsize=(4.,4.) ) \n # gs = matplotlib.gridspec.GridSpec( 1,1 ) \n # ax0 = fig.add_subplot( gs[0,0], projection='3d' ) \n \n ax0 = kwargs.get( 'ax0', None ) \n if ax0 is not None: \n\n # Plot the reference surface\n ax0.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, linewidth=0.)\n ax0.set_xlabel('X')\n ax0.set_ylabel('Y')\n ax0.set_zlabel('Z')\n lmin = min([ ax0.get_xlim()[0], ax0.get_ylim()[0], ax0.get_zlim()[0] ] )\n lmax = max([ ax0.get_xlim()[1], ax0.get_ylim()[1], ax0.get_zlim()[1] ] )\n ax0.set_xlim( lmin, lmax )\n ax0.set_ylim( lmin, lmax )\n ax0.set_zlim( lmin, lmax )\n ax0.set_yticklabels([])\n ax0.set_xticklabels([])\n ax0.set_zticklabels([])\n \n # If given an axes and a potential it will plot the surface cut of the \n # potential \n\n ax1 = kwargs.get( 'ax1', None) \n pot = kwargs.get( 'potential', None) \n\n if (ax1 is not None) and (pot is not None):\n # Evaluate function at points and plot\n EVAL = pot.evalpotential(X,Y,Z)\n\n im =ax1.pcolormesh(T0, T1, EVAL, cmap = plt.get_cmap('jet')) \n # cmaps: rainbow, jet\n\n plt.axes( ax1)\n cbar = plt.colorbar(im)\n cbar.set_label(pot.unitlabel, rotation=0 )#self.unitlabel\n \n return T0, T1, X, Y, Z", "def normalize(x: float, y: float, z: float) -> Point3D:\n mag = math.sqrt(x*x + y*y + z*z)\n return x/mag, y/mag, z/mag", "def load_plane(image):\n pixels = image.getPrimaryPixels()\n return pixels.getPlane(0, 0, 0)", "def proj3d(v):\n v = normalize(v)\n x, y, z, w = v\n return np.array([x, y, z]) / (1 + 1e-8 - w) # avoid divide by zero", "def svm_add_3d_hyperplane(model, ax, plotted_points):\n SPACE_SAMPLING_POINTS = 70\n X_MIN = np.min(plotted_points[:, 0])\n X_MAX = np.max(plotted_points[:, 0])\n Y_MIN = np.min(plotted_points[:, 1])\n Y_MAX = np.max(plotted_points[:, 1])\n Z_MIN = np.min(plotted_points[:, 2])\n Z_MAX = np.max(plotted_points[:, 2])\n xx, yy, zz = np.meshgrid(np.linspace(X_MIN, X_MAX, SPACE_SAMPLING_POINTS),\n np.linspace(Y_MIN, Y_MAX, SPACE_SAMPLING_POINTS),\n np.linspace(Z_MIN, Z_MAX, SPACE_SAMPLING_POINTS))\n if hasattr(model, 'decision_function'):\n Z = model.decision_function(np.c_[xx.ravel(), yy.ravel(), zz.ravel()])\n elif hasattr(model, 'predict_proba'):\n Z = model.predict_proba(\n np.c_[xx.ravel(), yy.ravel(), zz.ravel()])[:, 1]\n else:\n exit('No decision function or predict_proba for classifer')\n Z = Z.reshape(xx.shape)\n verts, faces, _, _ = measure.marching_cubes(Z, 0)\n verts = verts * \\\n [X_MAX - X_MIN, Y_MAX - Y_MIN, Z_MAX - Z_MIN] / SPACE_SAMPLING_POINTS\n verts = verts + [X_MIN, Y_MIN, Z_MIN]\n mesh = Poly3DCollection(verts[faces],\n facecolor='orange', edgecolor='gray', alpha=0.4)\n ax.add_collection3d(mesh)", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def get_surface_normals_o3d(normals, points, scale=2):\n # total number of points:\n N = points.shape[0]\n\n points = np.vstack(\n (points.to_numpy(), points.to_numpy() + scale * normals)\n )\n lines = [[i, i+N] for i in range(N)]\n colors = np.zeros((N, 3)).tolist()\n\n # build pca line set:\n surface_normals_o3d = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)\n\n return surface_normals_o3d", "def _get_data_on_3d_points(self, varname, record, points):\n if self.get_mesh_dimension() != 3:\n raise TelemacException(\"Action possible only on 3d mesh\")\n\n res = float('nan')*np.ones((len(points)), dtype=np.float64)\n for i, point in enumerate(points):\n elev = self.get_data_on_vertical_segment(\\\n 'ELEVATION Z', record, point[:-1])\n values = self.get_data_on_vertical_segment(\\\n varname, record, point[:-1])\n for plan in range(self.nplan-1):\n if elev[plan] <= point[-1] and point[-1] <= elev[plan+1]:\n shz = (point[-1]-elev[plan])/max((elev[plan+1]\\\n -elev[plan]), 1.e-6)\n res[i] = (1.0-shz)*values[plan]+shz*values[plan+1]\n return res", "def eq_to_3d(ra, dec):\r\n x = np.cos(ra) * np.cos(dec)\r\n y = np.sin(ra) * np.cos(dec)\r\n z = np.sin(dec)\r\n return x, y, z", "def project_point(self, point: Point3D) -> Point3D:\n x, y, z = point\n cam_x, cam_y, cam_z = self._pos\n x -= cam_x\n y -= cam_y\n z -= cam_z\n dx = self._cy*(self._sz*y + self._cz*x) - self._sy*z\n dy = self._sx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) + self._cx*(self._cz*y - self._sz*x)\n dz = self._cx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) - self._sx*(self._cz*y - self._sz*x)\n return self._scale * dx/dz, self._scale * dy/dz, dz", "def verify_plane_endpoints(self):\n return [self.x0 + self.nx * self.dx, self.y0 + self.ny * self.dy, self.z0 + self.nz * self.dz]", "def xzplane(draw, r, y, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [-extent,y,0],\n [extent,y,0],\n [extent,y,extent*2],\n [-extent,y,extent*2]\n ]\n )\n pln = np.dot(pln, np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def plane(*args, length: float=0.0, name: AnyStr=\"\", position: List[float, float, float]=None,\n rotation: List[float, float, float]=None, size: float=0.0, width: float=0.0,\n **kwargs)->AnyStr:\n pass", "def polyPlane(*args, axis: Union[List[float, float, float], bool]=None, createUVs: Union[int,\n bool]=1, height: Union[float, bool]=1.0, subdivisionsHeight: Union[int, bool]=0,\n subdivisionsWidth: Union[int, bool]=10, subdivisionsX: Union[int, bool]=5,\n subdivisionsY: Union[int, bool]=5, texture: Union[int, bool]=1, width:\n Union[float, bool]=1.0, caching: bool=True, constructionHistory: bool=True, name:\n AnyStr=\"\", nodeState: Union[int, bool]=0, object: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def test_projection_v3_z(self):\n\n from pedemath.vec3 import projection_v3\n\n vec_a = Vec3(3, 4, 5)\n vec_b = Vec3(0, 0, 1)\n\n result = projection_v3(vec_a, vec_b)\n\n self.assertEqual(5, result)", "def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()", "def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)", "def vector_3d_magnitude(x, y, z):\n return math.sqrt((x * x) + (y * y) + (z * z))", "def drawVector3D(x0,y0,z0,x1,y1,z1, vtype='normal'):\n dislin.vectr3(x0,y0,z0,x1,y1,z1, vectordict[vtype])", "def TriangularProjection(c1=(0,0), c2=(2,0), c3=(2,2), points=None, npoints=10, equally_spaced=True):\n\n if points is None or not isinstance(points,np.ndarray):\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):\n raise ValueError(\"coordinates should be given in tuples of two elements (x,y)\")\n else:\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)\n opoints = np.vstack((c1,c2,c3))\n else:\n opoints = points\n\n from scipy.spatial import Delaunay\n from Florence.FunctionSpace import Tri\n from Florence.QuadratureRules.EquallySpacedPoints import EquallySpacedPointsTri\n from Florence.QuadratureRules.FeketePointsTri import FeketePointsTri\n\n if equally_spaced:\n points = EquallySpacedPointsTri(npoints)\n else:\n points = FeketePointsTri(npoints)\n\n BasesTri = np.zeros((3,points.shape[0]),dtype=np.float64)\n hpBases = Tri.hpNodal.hpBases\n for i in range(points.shape[0]):\n BasesTri[:,i] = hpBases(0,points[i,0],points[i,1],\n EvalOpt=1,equally_spaced=equally_spaced,Transform=1)[0]\n\n func = Delaunay(points,qhull_options=\"QJ\")\n triangles = func.simplices\n nnode = func.points.shape[0]\n nelem = func.nsimplex\n nsize = int((npoints+2)*(npoints+3)/2.)\n\n mesh = Mesh()\n mesh.element_type=\"tri\"\n mesh.points = np.dot(BasesTri.T, opoints)\n mesh.elements = triangles\n mesh.nelem = mesh.elements.shape[0]\n mesh.nnode = mesh.points.shape[0]\n mesh.GetBoundaryEdges()\n\n return mesh", "def bfplane(x, y, z):\n n = float(len(x))\n A = np.array([[sum(x*x),sum(x*y),sum(x)],[sum(x*y),sum(y*y),sum(y)],[sum(x),sum(y),n]])\n B = np.array([sum(x*z),sum(y*z),sum(z)])\n res = np.linalg.solve(A,B)\n return res", "def from_three_points(cls, a, b, c):\n circle = Circle.from_three_points(a, b, c)\n return cls(circle.radius, frame=circle.frame)", "def get_3d_points(preds_3d):\n for i,p in enumerate(preds_3d):\n preds_3d[i] = preds_3d[i] - preds_3d[i].mean(0)*np.ones((16,1));\n return preds_3d;", "def fit_plane_to_points(points: np.ndarray, eps: float=1.0e-5):\n # Compute plane origin and subract it from the points array.\n plane_origin = np.mean(points, axis=0)\n x = points - plane_origin\n\n # Dot product to yield a 3x3 array.\n moment = np.dot(x.T, x)\n\n # Extract single values from SVD computation to get normal.\n plane_normal = np.linalg.svd(moment)[0][:,-1]\n small = np.where(np.abs(plane_normal) < eps)\n plane_normal[small] = 0.0\n plane_normal /= np.linalg.norm(plane_normal)\n if (plane_normal[-1] < 0.0):\n plane_normal *= -1.0\n\n return (plane_normal, plane_origin)" ]
[ "0.75500053", "0.75069356", "0.7475429", "0.7409829", "0.7394293", "0.72587293", "0.70594555", "0.6911756", "0.68775445", "0.6856724", "0.68564415", "0.68380785", "0.68015355", "0.68015355", "0.67951053", "0.67129314", "0.6660467", "0.66463685", "0.6608463", "0.6591655", "0.65495497", "0.6548557", "0.65468985", "0.6541973", "0.65384865", "0.6526709", "0.6523478", "0.6494743", "0.64741755", "0.6463691", "0.6376814", "0.6376372", "0.6372751", "0.6350577", "0.6349976", "0.6275894", "0.6250931", "0.61992145", "0.61768216", "0.61768216", "0.61601347", "0.61593276", "0.61582524", "0.6151917", "0.60956967", "0.60817534", "0.60782003", "0.6070231", "0.606828", "0.6057172", "0.6054443", "0.60515875", "0.6034775", "0.6030497", "0.60128605", "0.59922194", "0.59871835", "0.5982535", "0.59731185", "0.594752", "0.59462005", "0.5940506", "0.59344107", "0.5933947", "0.5926393", "0.59248495", "0.5899512", "0.58819526", "0.58798", "0.5878162", "0.5866916", "0.5863863", "0.58299893", "0.58245194", "0.5808893", "0.5801871", "0.5795039", "0.5778381", "0.5775757", "0.5774414", "0.5770388", "0.57666975", "0.5758093", "0.5756315", "0.5750374", "0.57491803", "0.57437396", "0.57145", "0.5708802", "0.5708685", "0.5705135", "0.5703689", "0.5702379", "0.56851995", "0.56738853", "0.5669634", "0.5665076", "0.5663991", "0.5649351", "0.5640297" ]
0.7128078
6
Returns distance from the point to the plane
def distance_to_plane(plane, pt): if plane is None: return None d = abs((plane[0] * pt[0] + plane[1] * pt[1] + plane[2] * pt[2] + plane[3])) e = (math.sqrt(plane[0] * plane[0] + plane[1] * plane[1] + plane[2] * plane[2])) # Not the best assumption, but will work for the task. if abs(e) < 1e-10: return 1e10 return d / e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distanceTo(self, point):\n return np.linalg.norm([self.x - point.x, self.y - point.y, self.z - point.z])", "def distance_point_plane(point, plane):\n base, normal = plane\n vector = subtract_vectors(point, base)\n return fabs(dot_vectors(vector, normal))", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def distanceFromPoint(self, point):\n return Vector.createFromTwoPoints(point, self.crossLine(self.getHeight(point))).norm", "def distance_from_xy_plane(p,r):\n return np.abs(p[2]-r[2])", "def distance_from_plane(self, points, params, sqrt=False):\n a, d = params\n a = a.reshape((3, 1))\n\n # check for the orientation\n try:\n distance = torch.sum((points @ a - d) ** 2, 1)\n except:\n import ipdb;\n ipdb.set_trace()\n\n if sqrt:\n distance = guard_sqrt(distance)\n if self.reduce:\n distance = torch.mean(distance)\n\n # Note that this is distance square\n return distance", "def distance(self, pt):\n return math.sqrt((self.x - pt.x) ** 2 + (self.y - pt.y) ** 2)", "def distance_from_plane(n,p,r,nnorm=None):\n #return np.abs(np.dot(n,(p-r)))/np.linalg.norm(n)\n #return np.abs(np.dot(n,(p-r)))/nnorm\n # the normal vector is already a unit vector!\n return np.abs(np.dot(n,(p-r)))", "def dist(self, point: np.array):\n return np.linalg.norm(\n np.cross(point - self.r_start, self.direction), axis=1) / \\\n np.linalg.norm(self.direction)", "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1):\n point = np.array(self.image.size()) / 2\n return spsd.euclidean(point, [self.x, self.y])", "def distance_to(self, point: Union[\"Unit\", Point2, Point3]) -> Union[int, float]:\n return self.position.distance_to_point2(point.position)", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def dist(self, p):\n return math.sqrt((p.x - self.x)**2 + (p.y - self.y)**2)", "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1 and len(self)):\n point = self[0].image.size()\n\n return spsd.cdist(self.coordinates(), [point])[:,0]", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5", "def calc_dist(self, p):\n p = np.array((p.x, p.y, p.z))\n return LA.norm(p - self.car_pos)", "def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))", "def get_distance_to_plane(planepoints, otherpoint):\n # from\n # https://en.wikipedia.org/wiki/Plane_(geometry)#Describing_a_plane_through_three_points\n p0, p1, p2 = planepoints\n x1, y1, z1 = p0.getArray()\n x2, y2, z2 = p1.getArray()\n x3, y3, z3 = p2.getArray()\n D = np.linalg.det(np.array([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]]))\n if D != 0:\n d = -1\n at = np.linalg.det(np.array([[1, y1, z1], [1, y2, z2], [1, y3, z3]]))\n bt = np.linalg.det(np.array([[x1, 1, z1], [x2, 1, z2], [x3, 1, z3]]))\n ct = np.linalg.det(np.array([[x1, y1, 1], [x2, y2, 1], [x3, y3, 1]]))\n a = (-d / D) * at\n b = (-d / D) * bt\n c = (-d / D) * ct\n\n numer = np.abs(a * otherpoint.x +\n b * otherpoint.y +\n c * otherpoint.z + d)\n denom = np.sqrt(a**2 + b**2 + c**2)\n dist = numer / denom\n else:\n dist = 0\n return dist", "def get_distance_from_point(self, pstart, p_end):\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n\n distance = numpy.linalg.norm(a - b)\n\n return distance", "def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)", "def distance_to(self, p):\n closest_pt = self.closest_point_to(p)\n return np.linalg.norm(p - closest_pt)", "def distance_to(self, n):\n\n d = ( (self.x - n.x) ** 2 + (self.y - n.y) ** 2 + (self.z - n.z) ** 2 ) ** 0.5\n \n return d", "def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def get_dist(self, point_x, point_y):\n dist = sqrt((point_x - self.player_x) ** 2 + (point_y -\n self.player_y) ** 2)\n return dist", "def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))", "def distance(self, point):\r\n assert a6checks.is_point(point)\r\n assert len(point)==len(self._centroid)\r\n\r\n sum=0\r\n for i in range (len(self._centroid)):\r\n sum+=(point[i]-self._centroid[i])*(point[i]-self._centroid[i])\r\n dist=math.sqrt(sum)\r\n return dist", "def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)", "def distance(d1, d2):\n projection_onto_plane = d2 - projection(d1, d2)\n dist = np.linalg.norm(projection_onto_plane)\n\n return dist", "def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5", "def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )", "def distancia(self, punto):\n return math.hypot(self.x - punto.x, self.y - punto.y)", "def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)", "def distance(pt, pt2=(0,0,0)):\n x, y, z = pt\n x2, y2, z2 = pt2\n return ((x-x2)**2 + (y-y2)**2 + (z-z2)**2)**0.5", "def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def get_sag_plane_dist(self, P):\n if self.sp is None:\n print('ERROR: sagittal plane not setted yet!')\n return 0\n else:\n sp = self.sp\n #return abs(P[0]*sp[0] + P[1]*sp[1] + P[2]*sp[2] + sp[3]) / math.sqrt(sp[0]**2 + sp[1]**2 + sp[2]**2)\n return abs(P[0]*sp[0] + P[1]*sp[1] + P[2]*sp[2] + sp[3]) / math.sqrt(sp.dot(sp))", "def distance_point(self, point: array_like) -> np.float64:\n return abs(self.distance_point_signed(point))", "def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')", "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def distance_to(self, obj):\n\t\tx, y = self.position\n\t\tobj_x, obj_y = obj.position\n\t\treturn hypot(x - obj_x, y - obj_y)", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)", "def distance(self, coord1, coord2):\n return (abs(coord1.x - coord2.x) + abs(coord1.y - coord2.y) + abs(coord1.z - coord2.z))//2", "def distance_to(self, p):\n sign = 1 # -1 if self.is_inside(p) else 1\n dist = min([tri.distance_to(p) for tri in self.triangles])\n return sign * dist", "def calculate_distance(self, other_point):\n return math.sqrt(\n (self._x - other_point._x)**2 +\n (self._y - other_point._y)**2)", "def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def distance_to(self, other):\n return abs(self.x-other.x) + abs(self.y-other.y) + abs(self.z-other.z)", "def distance_point_line_3d(point: Vector, start: Vector, end: Vector) -> float:\n if start.isclose(end):\n raise ZeroDivisionError('Not a line.')\n v1 = point - start\n # point projected onto line start to end:\n v2 = (end - start).project(v1)\n # Pythagoras:\n return math.sqrt(v1.magnitude_square - v2.magnitude_square)", "def getDistanceToPoint(self, p, returnParaPerp = False):\n if not isinstance(p, VectorN) or len(p) != len(self.mOrigin):\n raise ValueError(\"p must be a point of dimension \" + str(len(self.mOrigin)))\n dirToP = p - self.mOrigin\n if dirToP.dot(self.mDirection) < 0:\n return None\n paraPart = dirToP.dot(self.mDirection) * self.mDirection\n perpPart = dirToP - paraPart\n if returnParaPerp:\n return (perpPart.magnitude(), paraPart, perpPart)\n else:\n return perpPart.magnitude()", "def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))", "def euclidean_distance(self, point: List[int]) -> float:\n return sqrt(point[0] ** 2 + point[1] ** 2)", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def distance_to(self, p):\n return (self - p).length()", "def distance_to(self, p):\n return (self - p).length()", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def _distance(self, new_pt):\n\t\tnew_pt = np.resize(new_point, (self.n_row, new_pt.shape[0]))\n\t\tdist = euclidean_distance(self.data[:,0:-1], new_pt)\n\n\t\treturn dist", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))", "def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def distance(self, other_pt, is_lla=True):\n return 0.0", "def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def z_distance(self):\n return self.get_distance(self.Z_INDEX)", "def dist(self, other):\n return math.sqrt((self.x - other.x)**2 +\n (self.y - other.y)**2 +\n (self.z - other.z)**2)", "def distance(self, point1, point2):\n\n\t\tprint \"Inside Distance!-----\"\n\t\tdist = math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2);\n\t\treturn dist", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))", "def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def distance((x,y,z),(x0,y0,z0)):\n return sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2)", "def distance(x1, y1, z1, x2, y2, z2):\n return math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)", "def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def distance(self, other):\n x, y, z = (self.x-other.x), (self.y-other.y), (self.z-other.z)\n return math.sqrt(x**2 + y**2 + z**2)", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "def euclidean_distance(self, other_point):\n return sqrt((self.x - other_point.x)**2 + (self.y - other_point.y)**2)", "def compute_distance(point, dataset):\n\n return np.abs(dataset - point)", "def euclidean_distance(self, point):\n mean = self.mean()\n dist = euclidean(mean, point)\n radius = self.radius * self.distance_factor()\n if radius == 0.0:\n # corner case: the ball consists of a single point only\n # distance is defined as > 1 for flat dimensions unless point lies inside\n if point == mean:\n dist = 0.0\n else:\n dist += 1\n else:\n # normalization so that result 1.0 corresponds to dist == radius (i.e., point is on the border)\n dist /= radius\n return dist", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def _distance(point_a: tuple, point_b: tuple):\n # rgb values\n x1, y1, z1 = point_a\n x2, y2, z2 = point_b\n\n # distances\n dx = x1 - x2\n dy = y1 - y2\n dz = z1 - z2\n\n # final distance\n return sqrt(dx**2 + dy**2 + dz**2)", "def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def distance_vehicle(waypoint, vehicle_transform):\n loc = vehicle_transform.location\n x = waypoint.transform.location.x - loc.x\n y = waypoint.transform.location.y - loc.y\n\n return math.sqrt(x * x + y * y)", "def distance_vehicle(waypoint, vehicle_transform):\n loc = vehicle_transform.location\n x = waypoint.transform.location.x - loc.x\n y = waypoint.transform.location.y - loc.y\n\n return math.sqrt(x * x + y * y)", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result", "def distance_from_center(self, x: int, y: int) -> float:\n width, height = self.width, self.height\n dis = distance(x, y, width/2, height/2)\n return dis" ]
[ "0.8156893", "0.8134189", "0.7893171", "0.7741359", "0.7599741", "0.759446", "0.749491", "0.7362298", "0.73259467", "0.7189307", "0.7166196", "0.71618855", "0.71567243", "0.7114216", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.70056736", "0.6994237", "0.6994224", "0.69799006", "0.69745016", "0.6928665", "0.69253826", "0.6893024", "0.68927854", "0.6867367", "0.6861775", "0.6843397", "0.6836917", "0.68173254", "0.6817029", "0.6799994", "0.6793699", "0.6787422", "0.6785238", "0.6767198", "0.67381936", "0.6727265", "0.6719631", "0.6718167", "0.66860116", "0.66813666", "0.6662222", "0.6645315", "0.66362846", "0.6635779", "0.6613167", "0.6586198", "0.65841675", "0.65820366", "0.6577302", "0.65766704", "0.6568", "0.65553725", "0.6544936", "0.6539099", "0.65198123", "0.6510379", "0.64988214", "0.64988214", "0.64874226", "0.64703435", "0.6458745", "0.64492446", "0.6445951", "0.6440238", "0.6433583", "0.64250165", "0.6423851", "0.6419513", "0.6404812", "0.6385179", "0.6385064", "0.6379913", "0.63791037", "0.6373971", "0.63705987", "0.6363865", "0.6360697", "0.6344798", "0.63247806", "0.63241553", "0.632056", "0.63175917", "0.6313385", "0.6298738", "0.6298553", "0.62951076", "0.62768024", "0.6268864", "0.6263068", "0.6263068", "0.6263023", "0.6262758", "0.62617725" ]
0.70035726
22
Check if more than 50% of the points match the condition.
def points_match(plane, p, points, threshold): match = 0 for point in points: if distance_to_plane(plane, point) <= p: match += 1 if match >= threshold: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_check(self):\n if sum(x >= y for x, y in zip(self.elbos[-100:], self.elbos[-99:])) > 50 and\\\n self.elbos[-1] - self.elbos[-100] < 1e-3*abs(self.elbos[-100]):\n return True", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def checkPointInLampsReach(self, p):\n v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y)\n v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y)\n\n q = XYPoint(p.x - self.Red.x, p.y - self.Red.y)\n s = self.crossProduct(q, v2) / self.crossProduct(v1, v2)\n t = self.crossProduct(v1, q) / self.crossProduct(v1, v2)\n\n return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)", "def goalReached(self, rewards):\n return len(rewards) >= 100 and np.mean(rewards[-100:]) >= 18", "def test_perc(perc):\n num_wet = perc.num_wet()\n\n while True:\n perc.step()\n\n if perc.bottom_row_wet():\n return True\n\n new_num_wet = perc.num_wet()\n if new_num_wet == num_wet:\n return False\n\n num_wet = new_num_wet", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def evaluation_point(self):\n if self.turns in range(40,(self.game_length - 19), self.policy_eval_point):\n return True", "def perform_strategy(self, counter):\r\n if counter < self.percent * len(self.envelopes): # in the first self.percent percent\r\n self.curr_max = max(self.curr_max, self.envelopes[counter].money)\r\n return\r\n return self.envelopes[counter].money > self.curr_max", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def brute_force(savedPnts, unitRadius, point):\n for pnt in savedPnts:\n d = distance(pnt, point)\n if d < unitRadius: return False\n return True", "def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total", "def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys", "def check_probability(self, x, k_neighbours, expected_class, classifier, view = 0):\n match_number = 0\n distances, indexes = classifier.kneighbors(x.reshape(1, -1), k_neighbours)\n for idx in indexes:\n for element in idx:\n predict = classifier.predict(self.data[view][element].reshape(1, -1))\n if predict[0] and predict[0] == expected_class:\n match_number += 1\n return float(match_number)/k_neighbours", "def contains_point(self, point):\n\t\tthreshold = 0.6\n\t\tx = point[0]\n\t\ty = point[1]\n\t\tif (x >= (self.xmin - threshold) and x <= (self.xmax + threshold) and\n\t\t\ty >= (self.ymin - threshold) and y <= (self.ymax + threshold)):\n\t\t return True\n\t\treturn False", "def check_coll(self, particle):\r\n \r\n r1, r2 = self.radius, particle.radius\r\n x1, x2 = self.position, particle.position\r\n di = x2-x1\r\n norm = np.linalg.norm(di)\r\n if norm-(r1+r2)*1.1 < 0:\r\n return True\r\n else:\r\n return False", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def cointoss():\n return random.random() < 0.5", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def __contains__(self, point, e=1e-10):\n if point == self.p1:\n return True\n v1 = Vector.createFromTwoPoints(self.p1, point)\n v2 = self.getVector()\n return (abs(v1.angle - v2.angle) % (2 * math.pi) < e) and (v1.norm <= v2.norm)", "def _success(self, roll, resistance):\n hits = roll.count('black')\n if 'Wild White' in self.special_powers:\n hits += roll.count('white')\n return hits >= resistance", "def inside( self, point ):\n for i in range( 0, len(point) ):\n if math.fabs( self.center[i] - point[i] ) > self.dimLens[i]/2.0:\n return False;\n return True;", "def willcollide(self, p, c, r, v=None):\n return (p.step(dt).vec(c)).len() > r", "def check_points_and_level_up(self):\n if self.points > 20 * self.level:\n self.level += 1\n self.refresh_rate = self.refresh_rate * 0.75", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def passes_thr(self, x, y, values):\n if self.cutmap is None:\n return None\n\n _, _, _, binnumber = binned_statistic_2d(\n x, y, values,\n statistic=\"count\",\n bins=[self.x_bins, self.y_bins],\n expand_binnumbers=True\n )\n\n x_idx, y_idx = binnumber[0, :] - 1, binnumber[1, :] - 1\n\n return values > self.cutmap[x_idx, y_idx]", "def test_check_single_threshold(self):\n data = np.array(\n [\n [[13.2, 8.0, 13.2], [-46.0, 8.0, -78.4], [-78.4, -86.5, -89.2]],\n [[34, 31.1111, 34.0], [27.5, 31.1111, 8.0], [8.0, -32.5, -46.0]],\n [[54.8, 54.2222, 54.8], [53.5, 54.2222, 49.6], [49.6, 34, -2.8]],\n ],\n dtype=np.float32,\n )\n\n threshold_coord = find_threshold_coordinate(self.cube)\n cube = next(self.cube.slices_over(threshold_coord))\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=4)", "def thresh_vote(lst, f):\n\n if len(lst) == 0: # guess 0 by default (appropriate for our dataset)\n q = 0\n else:\n q = float(sum(lst)) / len(lst)\n\n return q >= f", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def decision():\n return random.random() > 0.5", "def chance(c: float) -> bool:\n return c > r()", "def test_simple_check_data_above(self):\n expected = np.array([8.15384615, 9.38461538, 11.6])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def classify(self, testInstance):\n return self.fire(testInstance) > 0.5", "def _prediction_match(self, thermo, ref_values, eps=0.05):\n singlet_array = self._get_singlet_array(thermo)\n for cur_array, ref_array in zip(singlet_array, ref_values):\n for cur_val, ref_val in zip(cur_array, ref_array):\n if abs(cur_val - ref_val) > eps:\n return False\n return True", "def pops_agree_50(x):\n return x.open_closed_freqs[0] > .5", "def rectangle_already_tracked(rectangles, rectangle):\n for current_rectangle in rectangles:\n if rectangle_percentage_coincidence(current_rectangle, rectangle) > 0.6:\n return True \n return False", "def contains(self, possible_point):\n# if possible_point == self.endpoints[0] or possible_point == self.endpoints[1]:\n# return False\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.0000001", "def _is_percent_of_time(percent_of_time):\n assert 0 <= percent_of_time\n assert percent_of_time <= 100\n random_number = random.uniform(0, 100)\n return random_number <= percent_of_time", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def is_point_exist(point, a_value, b_value, field):\n\n return (\n (point.y_crd ** 2 -\n (point.x_crd ** 3 + a_value *\n point.x_crd + b_value)) % field == 0 and\n 0 <= point.x_crd < field and 0 <= point.y_crd < field)", "def current_threshold_hit(self):\n\n\t\tnew_current = self.robot.pdp.getCurrent(const.CARGO_PDP_ID)\n\n\t\tself._current_samples.append(new_current)\n\n\t\tif len(self._current_samples) > 10:\n\t\t\tself._current_samples.pop(0)\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._current_samples) / len(self._current_samples)\n\n\t\treturn new_avg > const.CARGO_INTAKE_THRESHOLD", "def test_F(x, y, level):\n if len(x) < 2 or len(y) < 2:\n return True\n vx = np.var(x, 0, ddof=1)\n vy = np.var(y, 0, ddof=1)\n vx, vy = vx[vx*vy>0], vy[vx*vy>0]\n if len(vx)==0:\n return False\n F = vx/vy\n p_value = stat.f.cdf(F, len(x)-1, len(y)-1)\n p_value = 2*np.min([p_value, 1-p_value], axis=0)\n if np.any(p_value < level):\n return False\n else:\n return True", "def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001", "def colisiona(self, r, p):\n # Esta en el eje de las x?\n if p[0] >= r[0] and p[0] <= r[0] + 10:\n # Esta en el eje de las y?\n if p[1] >= r[1] and p[1] <= r[1] + 5:\n return True\n else:\n return False\n else:\n return False", "def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False", "def check_if_stopping_criterion_is_met(original_training_data_values):\n if len(original_training_data_values)<23:\n return True\n else:\n target_column = original_training_data_values[:, -1]\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n cupcake_ratio = cupcake_muffin_count[0] / (cupcake_muffin_count.sum())\n muffin_ratio = cupcake_muffin_count[1] / (cupcake_muffin_count.sum())\n\n if cupcake_ratio >= 0.9 or muffin_ratio >= 0.9:\n return True\n else:\n return False", "def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return abs(v1.angle - v2.angle) < e", "def check_price(self, price_diff):\n chance = exp(price_diff / self.T)\n\n if price_diff < 0 and not chance > random():\n return True\n \n return False", "def bisecter(func, step=0.1):\n points = list(func.points(step))\n area = sum(map(lambda p: p[1], points))\n\n current = 0.\n for x, y in points:\n current += y\n if current >= area / 2:\n return x", "def isInProlate(sample, alpha, beta): \n E = sample[0] * sample[0] / (alpha * alpha)\n E += (sample[1] * sample[1] + sample[2] * sample[2] ) / (beta * beta)\n if E > 1.0:\n return False\n else:\n return True", "def point_valid(self, pt, samples):\n\n\t cell_coords = self.get_cell_coords(pt)\n\t for idx in self.get_neighbours(cell_coords):\n\t nearby_pt = samples[idx]\n\t # Squared distance between or candidate point, pt, and this nearby_pt.\n\t distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2\n\t if distance2 < (self.r)**2:\n\t # The points are too close, so pt is not a candidate.\n\t return False\n\t # All points tested: if we're here, pt is valid\n\t return True", "def test_visible_ramp(self):\n total_number = 100000\n expected_percentage = .10\n self.feature_test.set_percentage(expected_percentage * 100)\n # Generate a range of user ids and map these ids to the feature\n # test result.\n user_ids = list(range(1, total_number + 1))\n visibility_map = [\n self.feature_test.is_visible(user_id)\n for user_id\n in user_ids\n ]\n # Count the number of success conditions.\n visibility_count = visibility_map.count(True)\n # This should match 10%.\n actual_percentage = visibility_count / float(total_number)\n self.assertAlmostEqual(\n actual_percentage, expected_percentage, delta=.012\n )", "def conf_test(self, trial_coords: np.ndarray) -> bool:\n #N = len(trial_coords)\n r2: float = 0\n i =0\n r2 = np.inner(trial_coords, trial_coords)\n if (r2> self.m_radius2):\n return False\n return True", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3", "def is_over(self, state: StonehengeState) -> bool:\n total_result = state.hori_result + state.left_result + state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n # all_taken = True\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item =='2':\n p2_taken += 1\n # else:\n # all_taken = False\n # print('p1 taken:' + str(p1_taken))\n # print('p2 taken:' + str(p2_taken))\n # print('p1_taken more than half?')\n # print(float(p1_taken) >= total_line/2)\n # print('p2_taken more than half?')\n # print(float(p2_taken) >= total_line/2)\n return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2", "def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def all_shares_above_initial_val(certificate, share_prices, percentage):\n all_above = [True if share_prices[name] > percentage * certificate['initial_prices'][name] else False for name in certificate['underlyings']]\n return False not in all_above", "def vf_eval_point(self):\n if self.turns in range(0,(self.game_length + 1),self.Vs_eval_point):\n return True", "def test_samples_close_to_inclusion_probability_ppswor(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n for i in range(n):\n s.process(i, math.log(2.0, math.e))\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def scaffold(points):\n return (\n ((points.sum(dim=-1, keepdim=True) * 10**2 % 10).long() % 2) == 1\n ).float()", "def percent_accuracy(self, true_values, predicted_values):\n\n correct = 0\n size = len(true_values)\n for i in range(len(true_values)):\n true_labels = true_values[i]\n predicted_labels = predicted_values[i]\n predicted_index = np.argmax(predicted_labels)\n\n if true_labels[predicted_index] == 1:\n correct += 1", "def accept_move(misfit_current, likelihood_current, misfit_proposednext):\n if misfit_proposednext <= misfit_current:\n return True\n\n # gaussian likelihood\n P = np.exp(-misfit_proposednext) / likelihood_current\n return True if sample_uniform() < P else False", "def decision(self, probability):\n return random.random() < probability", "def click(self, event):\n now = time.time() * 1000\n if self.time - now > 1000: \n return None\n if distance(self.x, event.x, self.y, event.y) > self.size:\n return False\n elif self.time - now > 500:\n return 50\n elif self.time - now > 200:\n return 100\n else: \n return 200", "def decide(el, il, model, threshold):\n\n if model == 0:\n return el >= threshold[0] and il >=threshold[1]\n elif model == 1:\n return el >= threshold[0] or il >= threshold[1]\n elif model == 2:\n return harmonic_mean([el, il]) >= harmonic_mean(threshold)\n else:\n return bool(round(random.random()))", "def within_value(v1, v2):\n percentage = 0.1\n error_allowed = percentage * v1\n high = v1 + error_allowed\n low = v1 - error_allowed\n\n return low <= v2 <= high", "def check_reached(self):\n m_x, m_y = self.destination.get_pos()\n m_radius = self.destination.radius\n distance_centre = math.sqrt((m_x - self.x)**2 + (m_y - self.y)**2)\n sum_radii = m_radius + self.radius\n if distance_centre < sum_radii:\n self.color = pygame.colordict.THECOLORS['green']\n self.has_reached = True", "def __contains__(self, point):\n if not isinstance(point, np.ndarray):\n point = np.array(point)\n test = self.A.dot(point.flatten()) - self.b < ABS_TOL\n return np.all(test)", "def is_confident(self, pi, percentage):\n\n # Compute the upper bound that contains 'percentage' percent of the distribution\n upper_confidence_bound = beta.interval(percentage, self._alpha, self._beta)[1]\n return pi < upper_confidence_bound", "def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0", "def evaluate(self, threshold=0.5):\n pass", "def match_marking_points(point_a, point_b):\n \n squared_distance_thresh = 0.000277778 # 10 pixel in 600*600 image\n direction_angle_thresh = 0.5235987755982988 # 30 degree in rad \n \n dist_square = calc_point_squre_dist(point_a, point_b)\n #if min(point_a.shape[1], point_b.shape[1]) <= 2:\n if True:\n return dist_square < squared_distance_thresh\n\n angle = calc_point_direction_angle(point_a, point_b)\n if point_a[3] > 0.5 and point_b[3] < 0.5:\n return False\n if point_a[3] < 0.5 and point_b[3] > 0.5:\n return False\n return (dist_square < squared_distance_thresh\n and angle < direction_angle_thresh)", "def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0", "def check_star(peaks,data):\n star = 0\n for i in peaks:\n max = data[i]\n if i<3 or i+4>data.size:\n continue\n mean = data[i-3:i+4].mean()\n if (max-mean)<0.1*max:\n star += 1\n if star*2>peaks.size:\n return True\n else:\n return False", "def Q4_test():\n chemin = [3,2,1,0]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n d = longueur(chemin, tab_dist)\n return (d > 13.34523076) and (d < 13.34523077)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def checks(self, poly_fit, poly_fitx, poly_fity):\n if self.best_fit is not None:\n if not (np.abs(self.best_fit-poly_fit) <\n np.array([0.001, 1, 500])).all():\n return False\n if self.bestx is not None:\n if np.mean(np.abs(self.bestx-poly_fitx)) > 200:\n return False\n\n return True", "def inconst(p,c):\n for q in c:\n if dist(p,q) <= 3:\n return True\n return False", "def check_point(point,points):\n if point in points:\n return True\n else:\n return False", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def error(self, in_sample=True):\n if in_sample:\n error = 0.0\n for i, point in enumerate(self.X):\n if self.Y[i] != self.rbf_classify(point):\n error += 1\n return error / 100\n else:\n error = 0.0\n for i, point in enumerate(self.test_X):\n if self.test_Y[i] != self.rbf_classify(point):\n error += 1\n return error / 10000", "def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def withinPercent(val1, val2, percent = 1.):\n if (val1 == np.nan) | (val2 == np.nan) :\n print(\"One of your values is NOT A NUMBER\")\n lowval = np.min(np.array([val1, val2]))\n meanval = np.mean(np.array([val1, val2]))\n absDif = np.abs(np.subtract(val1, val2))\n percentDif = np.abs(100* (absDif/lowval))\n within_percent_bool = percentDif <= percent\n return within_percent_bool, percentDif", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def check_cpgram(y, freq, crit, threshold=0.95):\n cum_sum = np.cumsum(y) / np.sum(y) - 2 * freq\n in_band = np.sum(np.abs(cum_sum) < crit) / len(freq)\n return in_band >= threshold, in_band", "def is_satisfied(self, offer, basket):\n value_of_matches = D(\"0.00\")\n for line in basket.all_lines():\n if (\n self.can_apply_condition(line)\n and line.quantity_without_offer_discount(offer) > 0\n ):\n price = unit_price(offer, line)\n value_of_matches += price * int(\n line.quantity_without_offer_discount(offer)\n )\n if value_of_matches >= self.value:\n return True\n return False", "def is_on_line(p0, p1, p2, threshold = 0.01):\n p0, p1, p2 = map(lambda tup : np.array(tup[:2]), [p0, p1, p2])\n p1 -= p0\n p2 -= p0\n return abs((p1[0] / p1[1]) - (p2[0] / p2[1])) < threshold", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12" ]
[ "0.64471173", "0.63462853", "0.6216969", "0.62000114", "0.61590135", "0.6073671", "0.60690844", "0.60135484", "0.5999499", "0.59925807", "0.59915054", "0.5986727", "0.59851974", "0.5972579", "0.5971275", "0.5952998", "0.59379154", "0.5935169", "0.59073", "0.58985734", "0.5869307", "0.58553404", "0.5843659", "0.5831268", "0.5830887", "0.5828994", "0.5826468", "0.58240426", "0.5819239", "0.5819239", "0.5819239", "0.5808428", "0.5807951", "0.58070457", "0.5784899", "0.5766322", "0.57618475", "0.5747282", "0.57246387", "0.57109934", "0.5707029", "0.5691354", "0.56904894", "0.5684937", "0.5684937", "0.5684937", "0.5667854", "0.56579924", "0.5656337", "0.5651831", "0.56452656", "0.56445277", "0.5637378", "0.563538", "0.5622732", "0.56204873", "0.56181645", "0.56119084", "0.5603839", "0.5591457", "0.55896556", "0.5582345", "0.5581584", "0.5580683", "0.5576777", "0.55713993", "0.5570226", "0.556577", "0.556214", "0.5558942", "0.55573976", "0.5553471", "0.55438095", "0.5542089", "0.55362767", "0.55323374", "0.55305856", "0.55256516", "0.55253994", "0.5518194", "0.55175376", "0.55167246", "0.55130285", "0.55064327", "0.5504381", "0.5497652", "0.5496848", "0.5495306", "0.54940796", "0.54905176", "0.5485903", "0.54845434", "0.5484494", "0.54841727", "0.54753816", "0.5470682", "0.54690546", "0.54690206", "0.5468605", "0.5468322" ]
0.59552014
15
Gets the exact of the points which falls into the "condition" (50% or more)
def points_percentage(plane, p, points, total): match = 0 for point in points: if distance_to_plane(plane, point) <= p: match += 1 return match / total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bisecter(func, step=0.1):\n points = list(func.points(step))\n area = sum(map(lambda p: p[1], points))\n\n current = 0.\n for x, y in points:\n current += y\n if current >= area / 2:\n return x", "def condition_bounds(self) -> Tuple[float, float]:\n raise NotImplementedError", "def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5", "def compute_bayesian_threshold(points, nominal_point, confidence_level):\n distances = [np.linalg.norm(p - nominal_point, ord = 1) for p in points]\n confidence_rank = min(math.ceil(len(points) * confidence_level),len(points)-1)\n #print(confidence_level, confidence_rank)\n threshold = np.partition(distances, confidence_rank)[confidence_rank]\n return threshold", "def nearest_test_pulse(self):", "def _find_percents(self, cutoff=0.6):\n self.logger.debug('Attempting to find percents')\n # Only process the bottom 4th of a frame\n cropped_frame = self.frame[(self.height * 3) // 4:self.height, 0:self.width]\n cropped_offset = (0, (self.height * 3) // 4)\n gray_frame = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)\n _, threshold = cv2.threshold(gray_frame, 20, 255, cv2.THRESH_BINARY)\n # Scale the template and attempt a match\n percent_template = self.percent_template\n res = cv2.matchTemplate(threshold, percent_template, cv2.TM_CCOEFF_NORMED)\n # Select all points where match value is above threshold\n loc = np.where(res >= cutoff)\n # Remove near points\n cropped_points = remove_neighbors(list(zip(*loc[::-1])))\n # Return points relative to their original location\n percent_points = [offset(point, cropped_offset) for point in cropped_points]\n return percent_points", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def _find_cutoff(self):\n cutoff = 1\n while ((self.linear_rstar_unnorm(cutoff) -\n self.turing_rstar_unnorm(cutoff))**2\n > self.approx_turing_variance(cutoff)):\n cutoff += 1\n return cutoff", "def gets_discount(x, y):\n \"*** YOUR CODE HERE ***\"\n return (x <= 12 and y >=65) or (x >=65 and y <= 12)", "def evaluate(self, threshold=0.5):\n pass", "def check_positions_in_range(self):\n reachable = 0\n total = 0\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.close_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.medium_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.far_positions_world)\n\n return float(reachable) / float(total)", "def discrete_potential(function, threshold):\n\n return np.where(function >= threshold, 1, 0)", "def fpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[self.test_errors] >= threshold) / float(numpy.sum(self.test_errors))", "def get_points(self, guess, drawn_card, prev_card):\n points_owed = 0\n\n if guess.lower() == \"higher\":\n if drawn_card >= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n elif guess.lower() == \"lower\":\n if drawn_card <= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n return points_owed", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def tracker(world):\n world[world==8] +=92 \n \n conditions = [(world >=1) & (world <10), world >= 50] #Quelle: https://stackoverflow.com/questions/39109045/numpy-where-with-multiple-conditions/39111919\n choices = [True, False]\n infected = np.select(conditions, choices, default=False) #default = False, bedeutet, dass alle, die nicht davon erfasst sind False sind\n return infected", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def get_thresh(amp,c): \n #Helper functions for fitting the psychometric curve, need to be\n #defined within the local scope, so that they can grok the data:\n \n def weib_fit(pars):\n thresh,slope = pars\n return weibull(x,thresh,slope,guess,flake)\n\n def err_func(pars):\n return y-weib_fit(pars)\n\n #Throw away the None's:\n hit_amps = amp[c==1]\n miss_amps = amp[c==0]\n\n # Get rid of floating point error:\n hit_amps = defloaterrorize(hit_amps)\n miss_amps = defloaterrorize(miss_amps)\n\n all_amps = np.hstack([hit_amps,miss_amps])\n stim_intensities = np.unique(all_amps)\n\n n_correct = [len(np.where(hit_amps==i)[0]) for i in stim_intensities]\n n_trials = [len(np.where(all_amps==i)[0]) for i in stim_intensities]\n Data = zip(stim_intensities,n_correct,n_trials)\n x = []\n y = []\n n = []\n for idx,this in enumerate(Data):\n #Take only cases where there were at least n_up observations:\n if n_trials[idx]>=self.n_up:\n #Contrast values: \n x = np.hstack([x,this[2] * [this[0]]])\n #% correct:\n y = np.hstack([y,this[2] * [this[1]/float(this[2])]])\n\n initial = np.mean(x),slope\n this_fit , msg = leastsq(err_func,initial)\n return this_fit,x,y", "def calcCondition(edge, x1, y1, x2, y2, left, right, top, bottom):\n\n stat1 = insideWindow(edge, x1, y1, left, right, top, bottom)\n stat2 = insideWindow(edge, x2, y2, left, right, top, bottom);\n\n if(not stat1 and stat2):\n return 1;\n if(stat1 and stat2):\n return 2;\n if(stat1 and not stat2):\n return 3;\n if(not stat1 and not stat2):\n return 4;\n return 0 #never executed", "def filter_out_rare_points(points, threshold_pct=0.5):\n \n c = Counter(points)\n total = sum(c.values())\n l = []\n for p in points:\n v = c[p]\n if v/total * 100 <= threshold_pct:\n l.append(np.nan)\n else:\n l.append(p)\n \n return l", "def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id", "def lower_bound(self) -> float:\n ...", "def current_threshold_hit(self):\n\n\t\tnew_current = self.robot.pdp.getCurrent(const.CARGO_PDP_ID)\n\n\t\tself._current_samples.append(new_current)\n\n\t\tif len(self._current_samples) > 10:\n\t\t\tself._current_samples.pop(0)\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._current_samples) / len(self._current_samples)\n\n\t\treturn new_avg > const.CARGO_INTAKE_THRESHOLD", "def piece_wise_cutoff(dist, cutoff):\n return (0.5 * np.cos(np.pi * dist / cutoff) + 0.5) * (dist <= cutoff)", "def calc(self,newValue):\n return np.sum(self.values<=newValue)/self.n", "def find_points(self, angle):\n angle_rads = angle * 1.0 / 360 * 2 * pi\n points = []\n self.pic.setPenColor(255, 0, 0)\n cur_pos_x = self.xcenter\n cur_pos_y = self.ycenter\n in_line = False\n while self.in_bounds(cur_pos_x, cur_pos_y):\n if in_line:\n if not is_dark(self.pic, cur_pos_x, cur_pos_y):\n in_line = False\n points.append(self.distance(cur_pos_x, cur_pos_y))\n # self.pic.drawCircleFill(cur_pos_x, cur_pos_y, 2)\n else:\n if is_dark(self.pic, cur_pos_x, cur_pos_y):\n in_line = True\n points.append(self.distance(cur_pos_x, cur_pos_y))\n # self.pic.drawCircleFill(cur_pos_x, cur_pos_y, 2)\n cur_pos_x += 3 * cos(angle_rads)\n cur_pos_y += 3 * sin(angle_rads)\n return points", "def test_bounds_of_threshold_points(self):\n result = Plugin()._add_bounds_to_thresholds_and_probabilities(\n self.threshold_points, self.probabilities_for_cdf, self.bounds_pairing\n )\n self.assertArrayAlmostEqual(result[0][0], self.bounds_pairing[0])\n self.assertArrayAlmostEqual(result[0][-1], self.bounds_pairing[1])", "def calculate(memory):\r\n dThthreshold = \r\n Ththreshold = f(memory['latitude'],memory['longitude'])\r\n \r\n dTh = sum(i > dThthreshold for i in memory['dTh'])\r\n \r\n return", "def cointoss():\n return random.random() < 0.5", "def checkPointInLampsReach(self, p):\n v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y)\n v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y)\n\n q = XYPoint(p.x - self.Red.x, p.y - self.Red.y)\n s = self.crossProduct(q, v2) / self.crossProduct(v1, v2)\n t = self.crossProduct(v1, q) / self.crossProduct(v1, v2)\n\n return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)", "def __call__(self, x):\n return np.mean(self.observations <= x)", "def to_points(self, divisions=100):", "def False_positive_rate(result,start,end):\n Wider = 0.0002\n if start.loc[0,'lat'] > end.loc[0,'lat']:\n top = start.loc[0,'lat'] + Wider\n bottom = end.loc[0,'lat'] - Wider\n else:\n top = end.loc[0,'lat'] + Wider\n bottom = start.loc[0,'lat'] - Wider\n if start.loc[0,'lon'] > end.loc[0,'lon']:\n right = start.loc[0,'lon'] + Wider\n left = end.loc[0,'lon'] - Wider\n else:\n right = start.loc[0,'lon'] + Wider\n left = start.loc[0,'lon'] - Wider\n return (~(((result['lat'] >= bottom) & (result['lat'] <= top) \n & (result['lon'] >= left) & (result['lon'] <= right)))).sum()/result.shape[0]", "def heuristic_cost_estimate(self, current):\n relevants = 0\n accurate_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], current.anchor):\n relevants += 1\n if self.pred_sample.iloc[i] == self.pred_example:\n accurate_relevants += 1\n accuracy = accurate_relevants/relevants\n if self.threshold-accuracy <= 0:\n x = 5\n return max(0, self.threshold - accuracy)", "def get_thresholds(kalpha, deltaelow, deltaehigh, maxphotons, nscatter, scatter):\n thresholds = tuple(\n [\n (float(n), float(s), n * kalpha + s * scatter - deltaelow, n * kalpha + s * scatter + deltaehigh, s * scatter)\n for s in range(nscatter + 1, -1, -1)\n for n in range(maxphotons - s + 1)\n if not (n == 0 and s == 0)\n ]\n )\n return thresholds", "def find_exceedences(temp, clim):\n exceed_bool = temp - clim[\"thresh\"]\n exceed_bool[exceed_bool <= 0] = False\n exceed_bool[exceed_bool > 0] = True\n\n # Find contiguous regions of exceed_bool = True\n events, n_events = ndimage.label(exceed_bool)\n return events, n_events", "def passes_thr(self, x, y, values):\n if self.cutmap is None:\n return None\n\n _, _, _, binnumber = binned_statistic_2d(\n x, y, values,\n statistic=\"count\",\n bins=[self.x_bins, self.y_bins],\n expand_binnumbers=True\n )\n\n x_idx, y_idx = binnumber[0, :] - 1, binnumber[1, :] - 1\n\n return values > self.cutmap[x_idx, y_idx]", "def evaluation_point(self):\n if self.turns in range(40,(self.game_length - 19), self.policy_eval_point):\n return True", "def find_x(p, c):\n if p == 0:\n x = int(math.sqrt(c)) + 1\n else:\n x = int(c / (20. * p)) + 1\n while True:\n y = (20 * p + x)*x\n if y <= c:\n return x\n else:\n x -= 1", "def compareEvaluate(self, x):\n if abs(x[0]) <= self.scale and abs(x[1]) <= self.scale:\n y = x[1]\n x = x[0]\n return (np.multiply(np.multiply(np.sin(x), np.sin(y)), np.power(x,2)) + np.power(y,2)) - 5 #if x,y are feasible -> solve normally\n else:\n return 20000 # set high to invalidate any answers where x and y are outside our feasible region", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def prevalence(series: np.ndarray,\n condition: Union[bool, int, str, list, tuple, set],\n rate: bool = False) -> Union[int, float]:\n if isinstance(condition, (str, bool, int)):\n condition = [condition]\n elif isinstance(condition, set):\n condition = list(condition)\n elif isinstance(condition, (list, tuple)):\n pass\n else:\n raise TypeError(f\"Type of true conditions {type(condition)} is not acceptable.\")\n\n if rate:\n return np.isin(series, condition).sum() / series.size if series.size > 0 else 0\n else:\n return np.isin(series, condition).sum()", "def cond_prob(self, event, context):\n if self.margin[context] > self.k:\n return super().cond_prob(event, context)\n else:\n return self.backoff.cond_prob(event, context[1:])", "def _compute_cutoffs(self):\n self._cutoffidx=np.zeros(self.nsamples,dtype=np.int)\n # Find the inlfection point\n # TODO: check robustness of this method against fluctuations in the data\n self.samplesdatadiff=np.diff(self.samplesdata,axis=0)\n flex=np.argmax(self.samplesdatadiff,axis=0)\n # if the detected cycles is the last one, then the flex has not yet been reached, warn.\n for i,f in enumerate(flex):\n #self._message(\"(%s) Preanalysis - detection of inflection point.\"%(self.samples[i])) \n if f==(self.nvalues-1):\n self._cutoffidx[i]=f\n self._message(\"Warning: (%s) Inflection point not detected. Using all fluorescent values available (%d cycles).\"%(self.samples[i],f)) \n elif f<10:\n self._message(\"Warning: (%s) Early inflection point (cycle %d).\"%(self.samples[i],f))\n else: \n self._cutoffidx[i]=np.minimum(f+2,self.nvalues)\n #self._message(\"(%s) Inflection point found at cycle %d).\"%(self.samples[i],f)) ", "def findCrossing(y,data):\n y=1; data = wValsArray;\n #a for loop itterates over an array's rows. transpose it to get the cols.\n armed = True #\n xsPoints = []\n for w,rho in data:\n if rho > y and armed == True:\n xsPoints.append(w)\n armed = False\n if rho < y:\n armed = True\n return xsPoints", "def prob6():\n domain = np.linspace(-5, 5, 200)\n x = sy.symbols('x')\n poly = 2*x**6 - 51*x**4 + 48*x**3 + 312*x**2 - 576*x - 100\n f = sy.lambdify(x, poly)\n _1deriv = sy.diff(poly, x)\n critical_pts = sy.solve(_1deriv, x)\n _2deriv = sy.diff(_1deriv, x)\n f_2deriv = sy.lambdify(x, _2deriv)\n loc_min = []\n loc_max = []\n for x0 in critical_pts:\n if f_2deriv(x0) > 0:\n loc_min.append(x0)\n if f_2deriv(x0) < 0:\n loc_max.append(x0)\n\n plt.ion()\n plt.plot(domain, f(domain))\n plt.plot(loc_min, f(np.array(loc_min)), 'ro', label=\"local minimum\")\n plt.plot(loc_max, f(np.array(loc_max)), 'bo', label=\"local maximum\")\n plt.legend()\n plt.show()\n\n return set(loc_min), set(loc_max)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def resolve_condition(self, yname, lo, hi):\n return [x for x, y in enumerate(self.ycomp(yname)) if y >= lo and y <= hi]", "def _get_breaking_point(x, y):\n # select threshold where curve break\n slope = (y[-1] - y[0]) / len(y)\n y_grad = np.gradient(y)\n m = list(y_grad >= slope)\n j = m.index(False)\n m = m[j:]\n x = x[j:]\n y = y[j:]\n if True in m:\n i = m.index(True)\n else:\n i = -1\n breaking_point = float(x[i])\n\n return breaking_point, x, y", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def get_mode_c_points(self, dist_mask, threshold_min, threshold_max):\n d_max = dist_mask.max()\n # apply the threshold- different for different type of points\n threshold_mask = (dist_mask > (threshold_min * d_max)) & (\n dist_mask <= (threshold_max * d_max)\n )\n # extract the locations of the points that fulfill that threshold condition\n locations_true = numpy.argwhere(threshold_mask == 1)\n # Randomly pick a point from that list\n smooth_point_list = random.sample(locations_true.tolist(), 1)\n # random.sample returns a list (with one element in this case).\n # Hence extract the element\n smooth_point = smooth_point_list[0]\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n return smooth_point_corrected", "def identify_flux(xyz: list) -> list:\n flagged_lines = []\n\n for line in xyz:\n *orig,dollar_amount,pct_amount = line\n if abs(dollar_amount) > THRESHOLDS[0] and abs(pct_amount) > THRESHOLDS[1]:\n flagged_lines.append(line)\n\n\n\n\n return flagged_lines", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def flowvec_points(threshold):\n gmm = GMM(n_components=2).fit(PC_matrix)\n cov = gmm.covariances_\n prob_distr = gmm.predict_proba(PC_matrix)\n \n # determine to which of the two gaussians each data point belongs by looking at probability distribution \n gauss_idx = [i for i in range(len(prob_distr)) \n if (prob_distr[i][0] <= threshold and prob_distr[i][1] <= threshold)]\n return gauss_idx", "def test_samples_close_to_inclusion_probability_ppswor(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n for i in range(n):\n s.process(i, math.log(2.0, math.e))\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def test_simple_check_data_above(self):\n expected = np.array([8.15384615, 9.38461538, 11.6])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def get_reward(self, physics):\n current_mask = np.any(self.image < 100, axis=-1).astype(int)\n area = np.sum(current_mask * self.mask)\n reward = area / np.sum(self.mask)\n\n return reward", "def coverage(self):\r\n return 0, 1", "def precondition(amp):\n n = len(amp)\n mean = np.mean(amp[:n/5])\n return -(amp-mean)", "def expected_return(self, n_step):\r\n value = 0\r\n n_experiences = 50\r\n for i in range(n_experiences):\r\n trajectory = self.domain_exploration(n_step)\r\n value += self.compute_j(trajectory)\r\n return value/n_experiences", "def condition_check(self, tic, condition1=100, condition2=10, condition3=10, earned_animation_duration=1,\r\n defense_duration_mean=30,\r\n defense_duration_sd=5):\r\n if self.counters['1'] == condition1: # changed: added provoked negation from fulfilling any condition\r\n self.provoked = 0\r\n self.provoked_timer = 0\r\n self.points += 1\r\n self.earned_point_animation_timer = earned_animation_duration\r\n self.counters['1'] = 0\r\n if self.counters['2'] == condition2:\r\n self.provoked = 0\r\n self.provoked_timer = 0\r\n self.stole_point = 1\r\n self.counters['2'] = 0\r\n if self.counters['3'] == condition3: # changed\r\n self.provoked = 0\r\n self.provoked_timer = 0\r\n block_value = abs(np.random.normal(defense_duration_mean, defense_duration_sd, 1))\r\n self.defense_blocks = np.append(self.defense_blocks, block_value)\r\n self.defense_blocks_data = np.append(self.defense_blocks_data, block_value)\r\n self.counters['3'] = 0", "def inPointing(self, pulsar):\n # initialise offset_deg to be a big old number\n # FWHM is in arcmin so always multiply by 60\n offset_deg = 5.\n\n # loop over pointings\n for point in self.pointingslist:\n # do a really basic check first\n\n glterm = (pulsar.gl - point.gl)**2\n gbterm = (pulsar.gb - point.gb)**2\n offset_new = math.sqrt(glterm + gbterm)\n\n # if the beam is close enough, break out of the loop\n if offset_new < offset_deg:\n offset_deg = offset_new\n self.gain = point.gain\n self.tobs = point.tobs\n \n return offset_deg", "def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1", "def pull(self):\n chance = np.random.uniform()\n return chance < self.winning_prob", "def _collect_points(self, image, point_value=0):\n return zip(*np.where(image == point_value))", "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def _get_pck(self, kp_id, threshold):\n if len(self.data[kp_id]) == 0:\n return None\n\n data = np.array(self.data[kp_id])\n pck = np.mean((data <= threshold).astype('float'))\n return pck", "def afindwithin(data):\r\n numfact = len(data[0])-2\r\n withinvec = [0]*numfact\r\n for col in range(1,numfact+1):\r\n rows = pstats.linexand(data,col,pstats.unique(pstats.colex(data,1))[0]) # get 1 level of this factor\r\n if len(pstats.unique(pstats.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor\r\n withinvec[col-1] = 1\r\n return withinvec", "def expected_result(self, other):\r\n return float(1) / (1 + math.pow(10, float(other.elo - self.elo) / DIVIDER))", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def get_point(img, threshold):\n binary = np.zeros_like(img)\n binary[\n (img > threshold)\n ] = 1\n\n nonzero = binary.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n return nonzeroy, nonzerox", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def hitTest( a, b ):\n r = a.radius + b.radius\n x = abs( a.x - b.x )\n y = abs( a.y - b.y )\n if x <= r and y <= r and x*x + y*y <= r*r:\n return 1\n return 0", "def contains ( self, pos ):\n dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2\n # which points are in the circle?\n if self.include_border:\n inds = (dr2 - self.r**2) < self.abs_tol\n else:\n inds = (dr2 - self.r**2) < -self.abs_tol\n \n \n # if there's no poit inside\n if ~inds.any() and self.default_nearest: \n inds[argmin(dr2)] = True\n \n return inds", "def get_limit(series, k=3):\n q1 = series.quantile(q=0.25)\n q3 = series.quantile(q=0.75)\n iqr = q3 - q1\n upper = q3 + 3 * iqr\n lower = q1 - 3 * iqr\n return upper, lower", "def coverage(y_true, y_pred):\n m = tf.shape(y_pred)[1] - tf.constant(1, dtype=tf.int32)\n n_samples = tf.cast(tf.shape(y_pred)[0], tf.float32)\n n_abstain = tf.reduce_sum(\n tf.where(tf.argmax(y_pred, axis=1, output_type=tf.int32) == m, 1.0, 0.0)\n )\n return tf.constant(1.0) - n_abstain / n_samples", "def find_optimal_threshold(self, hist):\n\n # print(\"number of pixels using sum: \", sum(hist))\n probability = np.array((1/sum(hist))*hist)\n expected_value = probability*np.array(range(256))\n # print(\"probability: \\n\", probability)\n # print(\"expected_value: \\n\", expected_value)\n\n threshold = len(hist)/2\n temp_threshold = 0\n\n while abs(threshold - temp_threshold) > 0.001:\n temp1 = []\n temp2 = []\n print(\"New threshold: \", threshold)\n for i in range(len(hist)):\n if i < threshold:\n temp1.append(expected_value[i])\n else:\n temp2.append(expected_value[i])\n mean1 = sum(temp1)\n print(\"mean1: \\n\", mean1)\n mean2 = sum(temp2)\n print(\"mean2: \\n\", mean2)\n temp_threshold = threshold\n threshold = (mean1+mean2)/2\n print(\"threshold: \", threshold)\n print(\"temp_threshold: \", temp_threshold)\n\n return threshold", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def test_check_single_threshold(self):\n data = np.array(\n [\n [[13.2, 8.0, 13.2], [-46.0, 8.0, -78.4], [-78.4, -86.5, -89.2]],\n [[34, 31.1111, 34.0], [27.5, 31.1111, 8.0], [8.0, -32.5, -46.0]],\n [[54.8, 54.2222, 54.8], [53.5, 54.2222, 49.6], [49.6, 34, -2.8]],\n ],\n dtype=np.float32,\n )\n\n threshold_coord = find_threshold_coordinate(self.cube)\n cube = next(self.cube.slices_over(threshold_coord))\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=4)", "def get_bounds():\n return [0.00], [1.00]", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def working(self, location):\n # say we can detect a thermal if the updraft is 5% of it's peak...\n R = numpy.linalg.norm(\n (self._x - location) * numpy.array([1.0, 1.0, 0.0]))\n min_distance = -numpy.log(0.05) * self._r\n if R < min_distance:\n return self._w > 0\n else:\n return None", "def test_samples_close_to_inclusion_probability_priority(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n for i in range(n):\n s.process(i, 1.0)\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def pointfind(plat, plon, lat, lon, pdif = 1):\n\t\n\tfff = 10\n\twhile (fff > 1):\n\t\t\n\t\t#conditions for latitude (lat - 2d array of latitudes)\n\t\tc_lat=(lat>(plat-pdif))&(lat<(plat+pdif))\n\t\t#conditions for longiyude (lon - 2d array of longitudes)\n\t\tc_lon=(lon>(plon-pdif))&(lon<(plon+pdif))\n\t\t\n\t\t#combine both conditions together\n\t\tc_all=c_lat&c_lon\n\t\t\n\t\t#values of the points that fulfil conditions\n\t\tplatf = lat[numpy.nonzero(c_all)]\n\t\tplonf = lon[numpy.nonzero(c_all)]\n\t\t\n\t\t\t\t\n\t\t#indeces of the poin that fulfil conditions \n\t\tg = numpy.nonzero(c_all)\n\t\t\n\t\t\n\t\t#check if we have found uniq solution\n\t\tfff = platf.shape[0]\n\t\t# decrease window to reduce amount of solutions if we have more than one\n\t\t#print(pdif)\n\t\tpdif = pdif-0.001\n\tprint(\"coordinates of the point that fulfil conditions: \"+str(platf)+\" \"+str(plonf))\n\tprint(\"indeces of the point that fulfil conditions: \"+str(g[0])+\" \"+str(g[1]))\n\t\n\treturn(g, platf, plonf)", "def current_points(self):\n return self.points_on_level_exit * self.has_exited()", "def perfect_acc(abst_setpoint, gameboard):\n correct_fraction = (gameboard.ncell - gameboard.pr_mislabel * gameboard.nnoisy) / gameboard.ncell\n predicted_fraction = 1.0 - abst_setpoint\n return np.minimum(1.0, correct_fraction/predicted_fraction)", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def example2(N, x):\n\n\tX = np.random.randn(N)\n\tI_estm = np.mean([0 if s>=x else 1 for s in X])\n\tprint(\"simulation estimate:\", I_estm)\n\tprint(\"true value: \", norm.cdf(x))", "def find_points(self):\n\n points = [\n (self.inner_radius, 0, \"straight\"),\n (self.inner_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.arc_height / 2, \"circle\"),\n (self.mid_radius, 0, \"circle\"),\n (self.outer_radius, -self.arc_height / 2, \"straight\"),\n (self.outer_radius, -self.height / 2, \"straight\"),\n (self.inner_radius, -self.height / 2, \"straight\")\n ]\n\n self.points = points" ]
[ "0.6175329", "0.60130394", "0.5999688", "0.5822206", "0.5822206", "0.5821196", "0.5762961", "0.5731761", "0.572735", "0.5670634", "0.5668073", "0.5665829", "0.56631714", "0.56522", "0.5645953", "0.5621847", "0.5578821", "0.55761683", "0.5570446", "0.55677325", "0.55677325", "0.55677325", "0.5560083", "0.5552592", "0.5535444", "0.55279195", "0.551391", "0.5505786", "0.5505169", "0.54972243", "0.5482076", "0.5473096", "0.5470257", "0.5468801", "0.5459324", "0.5442482", "0.5440803", "0.5432981", "0.543033", "0.5429065", "0.54260015", "0.5423305", "0.542021", "0.5418202", "0.54151267", "0.54138386", "0.5409498", "0.5407399", "0.5397801", "0.539662", "0.5392939", "0.5390397", "0.5390397", "0.5390397", "0.5385726", "0.5384069", "0.53774446", "0.53730655", "0.53668785", "0.536403", "0.53629273", "0.53626263", "0.53604674", "0.5359247", "0.53589594", "0.53573954", "0.5354075", "0.5351231", "0.5348443", "0.5346316", "0.5342077", "0.532253", "0.5319551", "0.53167886", "0.5313813", "0.531332", "0.53094745", "0.530238", "0.52994937", "0.52972025", "0.5295237", "0.5288343", "0.5288343", "0.52831143", "0.5280581", "0.5277515", "0.52750903", "0.52746886", "0.5268067", "0.5265292", "0.5264897", "0.5264556", "0.52625436", "0.5257356", "0.52547604", "0.5254031", "0.5251845", "0.5241364", "0.52410334", "0.5237981", "0.5234345" ]
0.0
-1
Running functional tests for multiple parameters/fixtures.
def test_sequence(self, output, input_): input_ = "\n".join(input_) g = Genes(input_) s = Sequence(genes=g, ages=g.size) s.run() self.assertEquals(s.population.get_survivor(Sequence.IMPOSSIBLE), output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tests():", "def run_tests(tests):\n return [test(t) for t in tests]", "def runTests(self):\n \n pass", "def pytest_generate_tests(metafunc):\n for param in ['env', 'browser', 'logging_level', 'env_file', 'name', 'jenkins_url', 'slack', 'output', 'email_retries',\n 'email_search_errors']:\n option_value = getattr(metafunc.config.option, param)\n if param in metafunc.fixturenames:\n metafunc.parametrize(param, [option_value], scope='session')", "def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def pytest_generate_tests(metafunc):\n if \"retrospective\" in metafunc.fixturenames:\n metafunc.parametrize(\"retrospective\", [False, True])\n if \"test_type\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_type\", [FILES_TEST, STATE_TEST])\n if \"raise_error\" in metafunc.fixturenames:\n metafunc.parametrize(\"raise_error\", [False, True])", "def run_suite(func):\n print 'testing ',func.__name__\n # create a TestSuite object\n suite = poc_simpletest.TestSuite()\n \n # test func on various inputs\n hand = tuple([])\n suite.run_test(func(hand, 6, 1), 5,\"Test #1:\")", "def pytest_can_run_together(item1, item2):", "def test_1():", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def test_3():", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def pytest_generate_tests(metafunc):\n if \"worker_type\" in metafunc.fixturenames:\n test_params = [[\"thread\", 1, 1], [\"thread\", 2, 2]]\n # if the OS is not Windows / OS X and python version > 2.7 then also do the multiprocess workers testing.\n if platform.system() not in [\"Windows\", \"Darwin\"] and sys.version_info >= (\n 2,\n 7,\n ):\n test_params.extend([[\"process\", 1, 1], [\"process\", 2, 2]])\n\n metafunc.parametrize(\n \"worker_type, workers_count, worker_sessions_count\", test_params\n )", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def pytest_generate_tests(metafunc):\n if \"size1\" in metafunc.fixturenames and \"size2\" in metafunc.fixturenames:\n metafunc.parametrize(\n [\"size1\", \"size2\"], itertools.product([1, 4], [2, 8]))\n if \"lines\" in metafunc.fixturenames:\n metafunc.parametrize(\"lines\", [[], [\"line1\"], [\"line1\", \"line2\"]])", "def test_basic_execution(self):", "def spec_tests():\n pass", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def test_by_variable():\n pass", "def run_tests(self, cov, functionsToRun): # pragma: nested\n print(\"runed cases\")\n for context in functionsToRun:\n #print(context)\n info = context.split(\".\")\n suite_name =info[0]\n #print(suite_name)\n className = info[1]\n caseName = info[2]\n cov.start()\n suite = import_local_file(suite_name)\n #print(dir(suite))\n try:\n # Call all functions in this module\n for name in dir(suite):\n variable = getattr(suite, name)\n #print(\"variable.__name__\")\n #print(variable.__name__)\n if inspect.isclass(variable) and variable.__name__== className:\n obj = variable()\n \n memberNames = inspect.getmembers(variable,inspect.isfunction)\n \n for member in memberNames:\n if member[0].startswith('test_') and member[0] == caseName:\n \n print(context)\n getattr(obj, member[0])()\n #if inspect.isfunction(variable):\n # variable()\n finally:\n cov.stop()", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def main():\n run_test_all()", "def _run_local_tests(self, *args, **kwargs):\n pass", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def run(self):\n if self.verbose:\n print(f'Running {self.name} tests...')\n\n # try running setup if there is one\n if self.setup:\n self.__process_setup()\n\n final_report = [None] * len(self.tests)\n\n for test_in, test_out in sorted(self.tests.items()):\n # increment total num of tests\n self.total += 1\n\n if self.verbose:\n print(f'#{self.total}')\n\n # evaluate test input w/ setup vars, if any\n try:\n inp = eval(test_in, self.vars)\n except Exception as err:\n print(f'Issue during evaluation of test input: {err}')\n final_report[self.total - 1] = 'input eval error'\n if self.verbose:\n print(f'Test input was: {test_in}')\n print('Vars from execution: {}'.format({k : v for k, v in self.vars.items() if k != '__builtins__'}))\n continue\n\n \n # checking if function input has more than one arg\n if type(inp) in (list, tuple):\n try:\n student_out = self.student_function(*inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Inputs were: {inp}')\n continue\n else:\n try:\n student_out = self.student_function(inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Input was: {inp}')\n continue\n\n # ans alias for ease of answer checking\n self.vars['ans'] = student_out\n\n if self.schema:\n format_vals = eval(test_out, self.vars)\n results, maybe_failed_schema = self.__process_schema(format_vals)\n if all(results):\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed at least one of the tests\n failed_str = \" and \".join([\", \".join(maybe_failed_schema[:-1]),maybe_failed_schema[-1]] if len(maybe_failed_schema) > 2 else maybe_failed_schema)\n final_report[self.total - 1] = f'FAILED; failed following assertion(s): {failed_str}'\n else:\n expected_ans = eval(test_out, self.vars)\n if student_out == expected_ans:\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed the only test\n final_report[self.total - 1] = f'FAILED; got {repr(student_out)} but expected {repr(expected_ans)}'\n\n # run callback function, if there is one\n if self.callback:\n if self.verbose:\n print('Running callback...')\n print('call back is:', self.callback)\n\n # once done, put the final report on the queue\n self.queue.put((self.student_username, self.name, f'{self.correct}/{self.total}', final_report))", "def run_tests():\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Suite or test name\")\n parser.add_argument('-b','--bin-dir',help=\"Directory where Firebird binaries tools are\")\n parser.add_argument('-d','--db-dir',help=\"Directory to use for test databases\")\n parser.add_argument('--archive',action='store_true',help=\"Save last run results to archive\")\n parser.add_argument('--rerun',action='store_true',help=\"Run only tests that don't PASSed in last run\")\n parser.add_argument('--untested',action='store_true',help=\"Run only tests that were UNTESTED in last run\")\n parser.add_argument('-v','--verbose',action='store_true',help=\"Be more verbose\")\n parser.add_argument('--verbosity',type=int,choices=[0,1,2],default=1,help=\"Set verbosity; --verbosity=2 is the same as -v\")\n parser.add_argument('-q','--quiet',action='store_true',help=\"Be less verbose\")\n parser.add_argument('-x','--xunit',action='store_true',help=\"Provides test results also in the standard XUnit XML format\")\n parser.add_argument('-e','--expect',type=str,metavar=\"FILENAME\",help=\"Test results file to be used as expeted outcomes\")\n if rpyc_available:\n parser.add_argument('--remote',action='store_true',help=\"Connect to remote fbtest server\")\n\n parser.add_argument('-u','--update',action='store_true',help=\"Update last run results with re-run results\")\n parser.add_argument('-w','--password',help=\"SYSDBA password\")\n parser.add_argument('-o','--host',help=\"Remote Firebird or fbtest host machine identification\")\n parser.add_argument('-p','--person',help=\"QA person name\")\n parser.add_argument('-a','--arch',help=\"Firebird architecture: SS, CS, SC, EM\")\n parser.add_argument('-s','--sequence',type=int,help=\"Run sequence number for this target\")\n parser.add_argument('-k','--skip',help=\"Suite or test name or name of file with suite/test names to skip\")\n parser.add_argument('-c','--client',help=\"Use specified Firebird client library\")\n parser.set_defaults(rerun=False,untested=False,update=False,server=False,register=False,\n remote=False,host='localhost',password='masterkey',\n sequence=1,arch='SS',person=UNKNOWN)\n\n script_runner.run_tests(parser.parse_args())", "def pytest_generate_tests(metafunc):\n\n # test is setup or teardown - parametrize to all scenarios\n if metafunc.function.__name__ in [\"test_setup\", \"test_teardown\"]:\n metafunc.parametrize(\n \"scenario\", Scenario.scenarios.values())\n\n # parameterize test for each scenario it is included in\n else:\n metafunc.parametrize(\n \"scenario\", metafunc.cls._scenarios)", "def task_test(argv):\n run_tests(\"python2\", argv)\n run_tests(\"python3\", argv)", "def test_Utilities__test_1():\n assert test(True, 1, False, 2) == 1\n assert test(False, 1, True, 2) == 2\n assert test(False, 1, False, 2, True, 3) == 3", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def runtest(self):", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def execute_tests():\n\n if len(sys.argv) > 1:\n # Filter test list based on command line requests\n tests_to_run = []\n for requested in sys.argv[1:]:\n for func, param in registered_tests:\n if param == requested:\n tests_to_run += [(func, param)]\n break\n else:\n print('Unknown test ' + requested)\n sys.exit(1)\n else:\n tests_to_run = registered_tests\n\n failing_tests = []\n for func, param in tests_to_run:\n print(param + (' ' * (OUTPUT_ALIGN - len(param))), end='')\n sys.stdout.flush()\n try:\n func(param)\n print(COLOR_GREEN + 'PASS' + COLOR_NONE)\n except KeyboardInterrupt:\n sys.exit(1)\n except TestException as exc:\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, exc.args[0])]\n except Exception as exc: # pylint: disable=W0703\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, 'Test threw exception:\\n' +\n traceback.format_exc())]\n\n if failing_tests:\n print('Failing tests:')\n for name, output in failing_tests:\n print(name)\n print(output)\n\n print(str(len(failing_tests)) + '/' +\n str(len(tests_to_run)) + ' tests failed')\n if failing_tests != []:\n sys.exit(1)", "def run_tests():\n\n parser = argparse.ArgumentParser(description=\"Run tests for the T2K Data Manager.\")\n parser.add_argument(\n \"-w\", \"--write\", action=\"store_true\", help=\"do write tests. Default: read only\"\n )\n parser.add_argument(\n \"-t\",\n \"--tape\",\n action=\"store_true\",\n help=\"do write tape storage tests. Default: disks only\",\n )\n parser.add_argument(\n \"-p\",\n \"--parallel\",\n default=2,\n type=int,\n help=\"specify how many parallel processes to test. Defaul: 2\",\n )\n parser.add_argument(\n \"-b\", \"--backend\", default=None, help=\"specify which backend to use\"\n )\n\n args = parser.parse_args()\n if args.backend is not None:\n dm.config.backend = args.backend\n dm.backend = backends.get_backend(dm.config)\n\n run_read_only_tests(tape=args.tape, parallel=args.parallel)\n if args.write:\n run_read_write_tests(tape=args.tape, parallel=args.parallel)\n\n print_(\"All done.\")", "def test_single_test_case():\n pass", "def test_a():\n foo_do(4)\n foo_do(\"hello\")\n bar_do([1,2,3])", "def test_2():", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def run_tests(self):\n raise NotImplementedError", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def test_main():\n # Setup\n # Exercise\n # Verify", "def _run_test_fn(self, artifacts, test_fn):\n for artifact in artifacts:\n self._context.set_any(artifact.type.value, artifact)\n test_fn(self._context)", "def runalltests():\n doctest.testmod()", "def runTest(self):\n self.setUp()\n self.test_FiducialTransform1()", "def run_tests(params):\n default_test_params = [\n \"./tests\",\n \"-vv\",\n \"--json-report\",\n \"--key-file\",\n DEFAULT_KEY_FILE.name\n ]\n\n with DEBUG_LOG_FILE.open(\"a\") as debug_out:\n with redirect_stdout(debug_out):\n with redirect_stderr(sys.stdout):\n return pytest.main(default_test_params + params)", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def pytest_generate_tests(metafunc):\n if (\"solver\" in metafunc.fixturenames\n and \"coefficients\" in metafunc.fixturenames):\n _parametrize_solver_coefficients(metafunc)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def _validate_tests(self):\n tests_path = Path(__file__).parent / \"testing_functions\"\n self.params.setdefault(\"tests\", [])\n if not isinstance(self.params[\"tests\"], (list, tuple)):\n raise SpecificationError(\n \"Value of key 'tests' must be an iterable of dictionaries\"\n )\n else:\n if any(not isinstance(j, dict) for j in self.params[\"tests\"]):\n raise SpecificationError(\"Every item in 'tests' must be a dictionary.\")\n for el in self.params[\"tests\"]:\n test_script = el.get(\"script\", None)\n if not test_script or not isinstance(test_script, str):\n raise SpecificationError(\n \"'tests' have to have 'script' field and it has to be a str\"\n )\n if (self.params[\"scripts\"] / test_script).is_file():\n el[\"script\"] = self.params[\"scripts\"] / test_script\n elif (tests_path / el[\"script\"]).is_file():\n el[\"script\"] = tests_path / el[\"script\"]\n else:\n raise FileNotFoundError(\n \"Script from test does not exist: {}\".format(test_script)\n )\n # TODO: adding checks for each of the element of tests", "def test(command, options=\"\"):\n\n print(\n \"\"\"\nRunning pytest the test framework\n=================================\n\"\"\"\n )\n for dir_ in TEST_DIRECTORIES:\n test_dir(command, options=options, dir_=dir_)\n # command.run(f\"python -m pytest {options} {' '.join(dir_ for dir_ in TEST_DIRECTORIES)}\", echo=True, pty=POSIX)\n\n print(\n \"\"\"\nAll Testing Directories Passed Successfully\n===========================================\n\"\"\"\n )", "def main():\n for filename in sys.argv[1:]:\n test(filename)", "def run_tests(self, test_labels, extra_tests=None, **kwargs):\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n elif self.verbosity == 2:\n argv.append('--verbose')\n elif self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n if self.junit_xml:\n argv.append(f'--junit-xml={self.junit_xml}')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def test_T1():", "def test_T1():", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def test_get_scenarios(self):\n pass", "def test_all_args(self, junit4_hooks, full_args):\n junit4_hooks.parse_args(full_args)\n\n assert junit4_hooks._master_repo_names == MASTER_REPO_NAMES\n assert junit4_hooks._reference_tests_dir == RTD\n assert junit4_hooks._ignore_tests == IGNORE_TESTS\n assert junit4_hooks._hamcrest_path == HAMCREST_PATH\n assert junit4_hooks._junit_path == JUNIT_PATH\n assert junit4_hooks._verbose == VERBOSE\n assert junit4_hooks._very_verbose == VERY_VERBOSE\n assert junit4_hooks._disable_security == DISABLE_SECURITY\n assert junit4_hooks._run_student_tests == RUN_STUDENT_TESTS\n assert junit4_hooks._timeout == TIMEOUT", "def main(args):\n\n if 'log' in args and args['log'] is not None:\n logging.basicConfig(level=LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))\n\n test_structure = read_test_file(args['test'])\n tests = build_testsets(args['url'], test_structure)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None:\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n # Execute all testsets\n failures = execute_testsets(tests)\n\n sys.exit(failures)", "def run_tests(event, context):\n try:\n jobId = event['CodePipeline.job']['id']\n user_parameters = json.loads(event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters'])\n runscope_trigger_url = user_parameters['runscopeTriggerUrl']\n runscope_access_token = user_parameters['runscopeAccessToken']\n\n tests = start_tests(runscope_trigger_url)\n aggregate_status = wait_for_tests_to_complete(tests, runscope_access_token)\n if aggregate_status == \"pass\":\n code_pipeline.put_job_success_result(jobId=jobId)\n else:\n code_pipeline.put_job_failure_result(jobId=jobId, failureDetails={\n 'type': 'JobFailed',\n 'message': 'One or more tests failed'\n })\n except:\n code_pipeline.put_job_failure_result(jobId=jobId, failureDetails={\n 'type': 'JobFailed',\n 'message': 'Unhandled exception during Runscope tests execution'\n })", "def test_something():", "def test_5():", "def RunSuite(config, files, extra_flags, errors):\n global ERRORS, CONCURRENCY\n Banner('running %d tests' % (len(files)))\n pool = multiprocessing.Pool(processes=CONCURRENCY)\n # create a list of run arguments to map over\n argslist = [(num, len(files), config, test, extra_flags)\n for num, test in enumerate(files)]\n # let the process pool handle the test assignments, order doesn't matter\n pool.map(RunTest, argslist)\n while not ERRORS.empty():\n phase, test = ERRORS.get()\n errors[phase].append(test)", "def help_test_runner(self, ninputs, niter):\n v_val = []\n w_val = []\n for i in range(ninputs):\n v_val += [np.random.rand(100, 10)]\n w_val += [np.random.rand(100, 1)]\n fvals = self.runner.run({'v': v_val[i], 'w': w_val[i]})\n self.assertTrue(len(fvals) == 0)\n self.assertFalse(self.runner.is_finished())\n\n for i in range(niter-ninputs-1):\n self.assertFalse(self.runner.is_finished())\n fvals = self.runner.run()\n self.assertTrue(len(fvals) == 0)\n self.assertFalse(self.runner.is_finished())\n\n for i in range(ninputs):\n self.assertFalse(self.runner.is_finished())\n fvals = self.runner.run()\n self.assertTrue('v' in fvals and 'w' in fvals)\n self.assertTrue(np.allclose(fvals['v'], v_val[i]))\n self.assertTrue(np.allclose(fvals['w'], w_val[i]))\n\n self.assertTrue(self.runner.is_finished())", "def test_4():", "def test_T3():", "def test_T3():", "def fixtures():", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def test_examples():\n argv = [\"py.test\", \"-examples\"]\n assert get_sargs(argv) is None", "def test_1():\n results = base_tests()\n assert type(results) is list\n assert type(results[0]) is dict\n assert len(results) == 3", "def run_test_suite(*args):\n test_args = list(args) or []\n execute_from_command_line([\"manage.py\", \"test\"] + test_args)", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def test1():\n for test in pkl.load(open(TEST_RESOURCES_DIR / \"regression_vault.pkl\", \"rb\"))[:5]:\n init_dict, rslt = test\n np.testing.assert_array_equal(run_regression_test(init_dict), rslt)", "def test_all_envs(func):\n register_tests(func, [func.__name__ + '_emulator',\n func.__name__ + '_verilator'])", "def test_all_asserts():\n \n test_remove_punctuation()\n test_prepare_text()\n test_string_concatenator()\n test_list_to_string()\n test_end_chat()\n test_check_link()\n test_check_region()\n test_check_area()\n test_check_city()\n test_check_industry()\n test_check_back()\n test_check_alumni_region()\n test_check_alumni_area()\n test_check_alumni_city()\n test_check_alumni_industry()", "def run_tests(args, applog):\n try:\n additional_args = []\n additional_args.extend([\"--pyargs\", \"dent_os_testbed.test.test_suite\", \"--strict-markers\"])\n additional_args.append(\"--durations=0\")\n if args.stdout:\n additional_args.append(\"--capture=tee-sys\")\n\n suite_groups = args.suite_groups if args.suite_groups else PYTEST_SUITE_GROUPS.keys()\n for sg_name in suite_groups:\n sg = PYTEST_SUITE_GROUPS[sg_name]\n pytest_args = []\n if not sg:\n continue\n pytest._current_suite = sg_name\n pytest_args.append(\"-m\")\n markers_string = sg[0] + \"\".join([(\" or %s\" % suite) for suite in sg[1:]])\n pytest_args.append(markers_string)\n pytest_args.append(\"--html=%s/report_%s.html\" % (LOGDIR, sg_name))\n pytest_args.append(\"--junitxml=%s/junit_%s.xml\" % (LOGDIR, sg_name))\n pytest_args.append(\"--self-contained-html\")\n if args.suite_tests:\n pytest_args.append(\"-k\")\n pytest_args.append(args.suite_tests)\n input_args = additional_args + pytest_args\n applog.info(\"Triggering pytest with args : %s\" % input_args)\n pytest.main(input_args)\n except Exception as e:\n applog.exception(\"Error running tests\", exc_info=e)\n raise", "def tests():\n api.local('nosetests')", "def test_multiple_commands_at_same_time(self):", "def test_run_all_searches(): # ***Incomplete test\n ##########################\n # Arrange.\n query_file_list = \"query_file_list\"\n\n ##########################\n # Act.\n #x = run_all_searches(query_file_list)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def pytest_generate_tests(metafunc):\n\t\n\tif not metafunc.cls:\n\t\treturn\n\t\n\tinst = metafunc.cls()\n\t\n\tif 'valid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('valid', inst.valid)\n\t\n\tif 'invalid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('invalid', inst.invalid)", "def run_tests():\n argument_parser = ArgumentParser(description=\"Run all tests for {{project name}}\")\n #TODO add some configuration here\n\n settings.configure(**{\n \"DATABASE_ENGINE\" : \"django.db.backends.sqlite3\",\n \"DATABASE_NAME\" : \"sqlite://:memory:\",\n \"ROOT_URLCONF\" : \"tests.urls\",\n \"TEMPLATE_LOADERS\" : (\n \"django.template.loaders.filesystem.load_template_source\",\n \"django.template.loaders.app_directory.load_template_source\",\n ),\n \"TEMPLATE_DIRS\" : (\n path(__file__).dirname() / 'templates',\n ),\n \"INSTALLED_APPS\" : (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n '{{ project_name }},\n ),\n })\n call_command(\"test\")", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def main(*arguments):\n\n args = parse_args(arguments)\n\n if args.test_suite is not None:\n test_suite = report_manager.load_test_suite_conf(args.test_suite)\n for i, test in enumerate(test_suite):\n args = parse_args(test)\n process_args_and_run(args, test_suite_iter=i)\n else:\n process_args_and_run(args)", "def test_generate_all_testing(self):\n pass", "def pytest_generate_tests(metafunc):\n from datastructures.tests._test_trees_data import \\\n ids, \\\n inputs, \\\n expected_list, \\\n expected_items_list, \\\n expected_tree, \\\n expected_items_tree, \\\n expected_len, \\\n expected_valid_BST, \\\n shuffled_inputs, \\\n is_equal\n\n if 'get_test_as_list_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_as_list_data',\n list(zip(inputs, expected_list)),\n ids=ids)\n\n if 'get_test_items_as_list_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_items_as_list_data',\n list(zip(inputs, expected_items_list)),\n ids=ids)\n\n if 'get_test_as_tree_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_as_tree_data',\n list(zip(inputs, expected_tree)),\n ids=ids)\n\n if 'get_test_items_as_tree_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_items_as_tree_data',\n list(zip(inputs, expected_items_tree)),\n ids=ids)\n\n if 'get_test_len_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_len_data',\n list(zip(inputs, expected_len)),\n ids=ids)\n\n if 'get_test_valid_BST_glassbox' in metafunc.fixturenames:\n metafunc.parametrize('get_test_valid_BST_glassbox',\n list(zip(inputs, expected_valid_BST)),\n ids=ids)\n\n if 'get_test_eq' in metafunc.fixturenames:\n metafunc.parametrize('get_test_eq',\n list(zip(inputs, shuffled_inputs, is_equal)),\n ids=ids)", "def test_batch(self):\n pass", "def run_all_unit_tests():\n original = verify.parse_content\n try:\n verify.parse_content = parse_string_in_scope\n\n test_list_of()\n\n test_activity_multiple_choice()\n test_activity_free_text()\n test_activity_multiple_choice_group()\n test_activity_ast()\n\n test_assessment()\n test_assessment_ast()\n\n # test existing verifier using parsing instead of exec/compile\n verify.test_sample_assets()\n finally:\n verify.parse_content = original", "def simple_behavior_test(test_files):\n parse_config_file_and_execute_run(test_files, overwrite=True)", "def run(self):\n\n runSuccess = True\n\n for value in self._shouldWork:\n value = value.normalize()\n print('Testing %s for %s' % (value, self._paramPath))\n\n for testName, testFunc in self._chainingTests:\n value, success = testFunc(value)\n if not success:\n runSuccess = False\n print(\"%s ERROR for %s\" % (testName, self._paramPath))\n break\n\n for value in self._shouldBreak:\n value = value.normalize()\n print('Testing invalid value %s for %s' % (value, self._paramPath))\n value, success = self.checkBounds(value)\n if success:\n runSuccess = False\n print(\"ERROR: This test should have failed but it has not\")\n\n return runSuccess", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def main():\n print(\"My test started\")\n\n ten = my_function_1(5, 5)\n twenty = my_function_2(5, 5, 10)\n\n print(\"My test finished\")", "def AddAllParametricTests():\n # Add all _CheckElem() test cases.\n AddParametricTests('AddElem',\n {'linebreak': (True, False),\n 'indent': (0, 1, 2),\n 'convert': (str, lambda s: s[::-1]),\n 'is_present': (True, False),\n 'is_mandatory': (True, False),\n 'is_submsg': (True, False)})\n\n # Add all _Add{Mandatory,Optional}Field tests.\n AddParametricTests('AddField',\n {'is_mandatory': (True, False),\n 'linebreak': (True, False),\n 'indent': (0, 1, 2),\n 'convert': (str, lambda s: s[::-1]),\n 'is_present': (True, False)})\n\n # Add all _Add{Mandatory,Optional}SubMsg tests.\n AddParametricTests('AddSubMsg',\n {'is_mandatory': (True, False),\n 'is_present': (True, False)})\n\n # Add all _CheckManifest() test cases.\n AddParametricTests('CheckManifest',\n {'fail_mismatched_block_size': (True, False),\n 'fail_bad_sigs': (True, False),\n 'fail_mismatched_oki_ori': (True, False),\n 'fail_bad_oki': (True, False),\n 'fail_bad_ori': (True, False),\n 'fail_bad_nki': (True, False),\n 'fail_bad_nri': (True, False),\n 'fail_old_kernel_fs_size': (True, False),\n 'fail_old_rootfs_fs_size': (True, False),\n 'fail_new_kernel_fs_size': (True, False),\n 'fail_new_rootfs_fs_size': (True, False)})\n\n # Add all _CheckOperation() test cases.\n AddParametricTests('CheckOperation',\n {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',\n 'MOVE', 'BSDIFF', 'SOURCE_COPY',\n 'SOURCE_BSDIFF', 'PUFFDIFF',\n 'BROTLI_BSDIFF'),\n 'is_last': (True, False),\n 'allow_signature': (True, False),\n 'allow_unhashed': (True, False),\n 'fail_src_extents': (True, False),\n 'fail_dst_extents': (True, False),\n 'fail_mismatched_data_offset_length': (True, False),\n 'fail_missing_dst_extents': (True, False),\n 'fail_src_length': (True, False),\n 'fail_dst_length': (True, False),\n 'fail_data_hash': (True, False),\n 'fail_prev_data_offset': (True, False),\n 'fail_bad_minor_version': (True, False)},\n validate_func=ValidateCheckOperationTest)\n\n # Add all _CheckOperations() test cases.\n AddParametricTests('CheckOperations',\n {'fail_nonexhaustive_full_update': (True, False)})\n\n # Add all _CheckOperations() test cases.\n AddParametricTests('CheckSignatures',\n {'fail_empty_sigs_blob': (True, False),\n 'fail_missing_pseudo_op': (True, False),\n 'fail_mismatched_pseudo_op': (True, False),\n 'fail_sig_missing_fields': (True, False),\n 'fail_unknown_sig_version': (True, False),\n 'fail_incorrect_sig': (True, False)})\n\n # Add all _CheckManifestMinorVersion() test cases.\n AddParametricTests('CheckManifestMinorVersion',\n {'minor_version': (None, 0, 1, 2, 3, 4, 5, 555),\n 'payload_type': (checker._TYPE_FULL,\n checker._TYPE_DELTA)})\n\n # Add all Run() test cases.\n AddParametricTests('Run',\n {'rootfs_part_size_provided': (True, False),\n 'kernel_part_size_provided': (True, False),\n 'fail_wrong_payload_type': (True, False),\n 'fail_invalid_block_size': (True, False),\n 'fail_mismatched_metadata_size': (True, False),\n 'fail_mismatched_block_size': (True, False),\n 'fail_excess_data': (True, False),\n 'fail_rootfs_part_size_exceeded': (True, False),\n 'fail_kernel_part_size_exceeded': (True, False)})", "def do_it(args):\n\n #force = args.force\n #testing = args.testing\n #verbose = args.verbose\n #regions = args.regions\n\n # XXX WORKING HERE" ]
[ "0.6993551", "0.69358134", "0.6821566", "0.68078095", "0.6793133", "0.6716086", "0.66775966", "0.6666555", "0.6655967", "0.66501856", "0.66326547", "0.66166264", "0.66117173", "0.6609923", "0.6597693", "0.65284544", "0.6498083", "0.6488252", "0.64796436", "0.6477362", "0.6452625", "0.6440838", "0.6440685", "0.6430273", "0.64245194", "0.64156896", "0.64073616", "0.640259", "0.64003533", "0.6399709", "0.63988465", "0.6396776", "0.63912815", "0.6390259", "0.63769835", "0.63765734", "0.63722736", "0.6368706", "0.6361833", "0.6351681", "0.63503987", "0.632933", "0.63252634", "0.6317878", "0.6288666", "0.6279352", "0.6269222", "0.6262087", "0.6253137", "0.62236893", "0.6220128", "0.62176234", "0.6210407", "0.62099665", "0.6204194", "0.620183", "0.6179351", "0.6179001", "0.61704534", "0.61704534", "0.61657715", "0.6162895", "0.6153813", "0.6152455", "0.6152296", "0.6151245", "0.61496395", "0.61489654", "0.6138477", "0.61318177", "0.6115535", "0.6115535", "0.6107789", "0.61076397", "0.6100114", "0.6099792", "0.6099695", "0.609918", "0.60983336", "0.6097854", "0.60958326", "0.6087331", "0.60862607", "0.6082245", "0.6081795", "0.6078696", "0.60772204", "0.6075776", "0.6072205", "0.60602766", "0.60594153", "0.60532445", "0.60396475", "0.60382503", "0.6032371", "0.60242647", "0.60232127", "0.60099757", "0.60032475", "0.59999067", "0.5998157" ]
0.0
-1
Returns true if c is a printable character. We do this by checking for ord value above 32 (space), as well as CR (\r), LF (\n) and tab (\t)
def is_printable(c): return ord(c)>=32 or c in ['\r','\n', '\t']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True", "def is_printable(b):\n return b in e(string.printable)", "def is_p4d_printable(c):\n if ord(c) < 0x20:\n return False\n if ord(c) == 0x7F:\n return False\n return True", "def ascii_printable(s: str) -> bool:\n return frozenset(s).issubset(_ascii_pa)", "def _is_control(char):\n if char == '\\t' or char == '\\n' or char == '\\r':\n return False\n cat = unicodedata.category(char)\n if cat.startswith('C'):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False # pragma: no cover\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True # pragma: no cover\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def isPrintableKey(event_string):\n\n if event_string == \"space\":\n reply = True\n else:\n unicodeString = event_string.decode(\"UTF-8\")\n reply = (len(unicodeString) == 1) \\\n and (unicodeString.isalnum() or unicodeString.isspace()\n or unicodedata.category(unicodeString)[0] in ('P', 'S'))\n debug.println(debug.LEVEL_FINEST,\n \"orca.isPrintableKey: returning: %s\" % reply)\n return reply", "def is_ascii(token):\n\n printable = set(string.printable)\n\n for char in token:\n if char not in printable:\n return False\n\n return True", "def isascii(s):\n return len(s) == len(s.encode())", "def isascii(s):\n return len(s) == len(s.encode())", "def is_ascii_chars(text):\n is_ascii = True\n try:\n text.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n is_ascii = False\n return is_ascii", "def is_string_printable(string_):\n return set(string_) - set(string.printable)", "def _is_whitespace(char):\n # \\t, \\n, and \\r are technically control characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True # pragma: no cover\n return False", "def ishex(char: chr) -> bool:\n return char.isdigit() or char in \"abcdef\"", "def _is_whitespace(char):\n if char == ' ' or char == '\\t' or char == '\\n' or char == '\\r':\n return True\n cat = unicodedata.category(char)\n if cat == 'Zs':\n return True\n return False", "def _is_whitespace(char):\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False", "def __contains_nonascii_characters(string):\n for c in string:\n if not ord(c) < 128:\n return True\n return False", "def _is_whitespace(char):\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False", "def isChar(ch):\n ret = libxml2mod.xmlIsChar(ch)\n return ret", "def ascii_hexchar(s: str) -> bool:\n return frozenset(s).issubset(_ascii_h)", "def _isascii(string):\n try:\n return len(string) == len(string.encode())\n except UnicodeDecodeError:\n return False\n except UnicodeEncodeError:\n return False", "def _has_non_ascii_characters(data_string):\r\n try:\r\n data_string.encode('ascii')\r\n except UnicodeEncodeError:\r\n return True\r\n\r\n return False", "def is_valid_char(src):\n\n return src.isalnum()", "def isAlphanum(c):\r\n return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or\r\n (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\\\' or (c is not None and ord(c) > 126));", "def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)", "def is_character_key(self, p_event):\n p = rffi.cast(RSDL.KeyboardEventPtr, p_event)\n keycode = rffi.getintfield(p.c_keysym, 'c_sym')\n return RSDL.K_BACKSPACE <= keycode <= RSDL.K_z \\\n or RSDL.K_WORLD_0 <= keycode <= RSDL.K_KP_EQUALS \\\n or keycode == RSDL.K_EURO # whoever came up with this being beyond the modifier keys etc...", "def __valid_char(self, char: str) -> bool:\r\n if char.isdigit():\r\n raise ValueError('Characters can\\'t be numbers')\r\n\r\n return char.isalpha() or char.isspace()", "def is_char_token(c: str) -> bool:\n return c in [\"+\", \"-\", \"*\", \"/\", \"(\", \")\"]", "def is_allowed_char(ch):\n\treturn ch.isalnum() or ch in \"#.>+*:$-_!@\"", "def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)", "def is_punct_char(char):\n\treturn char in string.punctuation #1 is punctuation, 0 is not punctuation", "def is_letter(c):\n return 'A' <= c <= 'Z' or 'a' <= c <= 'z'", "def is_num_char(x):\n return ord('0') <= ord(x) <= ord('9')", "def has_invalid_characters(filen=None,text=None):\n if filen is not None:\n with open(filen,'r') as fp:\n for line in fp:\n for c in set(line.replace('\\n','').replace('\\t','')):\n if ord(c) > 127 or ord(c) < 32:\n return True\n else:\n for c in set(text.replace('\\n','').replace('\\t','')):\n if ord(c) > 127 or ord(c) < 32:\n return True\n return False", "def printable(a):\n\treturn \"\".join([\n\t\tchr(c).isprintable() and chr(c) or \"\\\\x{0:02x}\".format(c)\n\t\tfor c in a\n\t])", "def _is_punctuation(char):\n cp = ord(char)\n if cp >= 33 and cp <= 47 or cp >= 58 and cp <= 64 or cp >= 91 and cp <= 96 or cp >= 123 and cp <= 126:\n return True\n cat = unicodedata.category(char)\n if cat.startswith('P'):\n return True\n return False", "def __isHexString(self, text):\n return all(map(lambda c: c in \"0123456789abcdefABCDEF\", text))", "def is_char(self, size=None):\n return False", "def isLetter(c):\n ret = libxml2mod.xmlIsLetter(c)\n return ret", "def _is_chinese_char(cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if (\n (cp >= 0x4E00 and cp <= 0x9FFF)\n or (cp >= 0x3400 and cp <= 0x4DBF) #\n or (cp >= 0x20000 and cp <= 0x2A6DF) #\n or (cp >= 0x2A700 and cp <= 0x2B73F) #\n or (cp >= 0x2B740 and cp <= 0x2B81F) #\n or (cp >= 0x2B820 and cp <= 0x2CEAF) #\n or (cp >= 0xF900 and cp <= 0xFAFF)\n or (cp >= 0x2F800 and cp <= 0x2FA1F) #\n ): #\n return True\n\n return False", "def _is_chinese_char(cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((0x4E00 <= cp <= 0x9FFF) or #\n (0x3400 <= cp <= 0x4DBF) or #\n (0x20000 <= cp <= 0x2A6DF) or #\n (0x2A700 <= cp <= 0x2B73F) or #\n (0x2B740 <= cp <= 0x2B81F) or #\n (0x2B820 <= cp <= 0x2CEAF) or\n (0xF900 <= cp <= 0xFAFF) or #\n (0x2F800 <= cp <= 0x2FA1F)): #\n return True\n\n return False", "def readable(self):\n return self._cc[13] == 0", "def keep_chr(char):\n return (unicodedata.category(char).startswith('P') and\n (char != \"#\" and char != \"@\" and char != \"&\"))", "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if (\n (cp >= 33 and cp <= 47)\n or (cp >= 58 and cp <= 64)\n or (cp >= 91 and cp <= 96)\n or (cp >= 123 and cp <= 126)\n ):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True # pragma: no cover\n return False", "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((33 <= cp <= 47) or (58 <= cp <= 64) or\n (91 <= cp <= 96) or (123 <= cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False", "def isUnicodeEmoji(c : str) -> bool:\n return c in UNICODE_EMOJI", "def isemoji(c):\n if type(c) == str:\n c = c.encode('utf-8')\n c = bytes(c)\n return c.decode() in UNICODE_EMOJI", "def has_ascii_name(self):\n return self.unpack_word(0x10) & 1 == 1", "def is_ascii(self, rule_string):\r\n return len(rule_string) == len(rule_string.encode())", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def _is_chinese_char(self, cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((0x4E00 <= cp <= 0x9FFF) or #\n (0x3400 <= cp <= 0x4DBF) or #\n (0x20000 <= cp <= 0x2A6DF) or #\n (0x2A700 <= cp <= 0x2B73F) or #\n (0x2B740 <= cp <= 0x2B81F) or #\n (0x2B820 <= cp <= 0x2CEAF) or\n (0xF900 <= cp <= 0xFAFF) or #\n (0x2F800 <= cp <= 0x2FA1F)): #\n return True\n\n return False", "def _is_chinese_char(self, cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if (\n (cp >= 0x4E00 and cp <= 0x9FFF)\n or (cp >= 0x3400 and cp <= 0x4DBF) #\n or (cp >= 0x20000 and cp <= 0x2A6DF) #\n or (cp >= 0x2A700 and cp <= 0x2B73F) #\n or (cp >= 0x2B740 and cp <= 0x2B81F) #\n or (cp >= 0x2B820 and cp <= 0x2CEAF) #\n or (cp >= 0xF900 and cp <= 0xFAFF)\n or (cp >= 0x2F800 and cp <= 0x2FA1F) #\n ): #\n return True # pragma: no cover\n\n return False", "def contains_only_char(s, char):\n for c in s:\n if c != char:\n return False\n return True", "def is_input_valid(char):\n\n # is there a char at all?\n if char is None:\n return False\n\n # check for embedded 0 byte\n if char == \"\\0\":\n return False\n\n return True", "def isspace(self):\n return isspace(self)", "def ascii_alphanumeric(s: str) -> bool:\n return frozenset(s).issubset(_ascii_an)", "def is_usascii(value):\n try:\n # if value is byte string, it will be decoded first using us-ascii\n # and will generate UnicodeEncodeError, this is fine too\n value.encode('us-ascii')\n except UnicodeError:\n return False\n \n return True", "def isspace(self) -> bool:\n pass", "def _is_chinese_char(self, cp):\n if cp >= 19968 and cp <= 40959 or cp >= 13312 and cp <= 19903 or cp >= 131072 and cp <= 173791 or cp >= 173824 and cp <= 177983 or cp >= 177984 and cp <= 178207 or cp >= 178208 and cp <= 183983 or cp >= 63744 and cp <= 64255 or cp >= 194560 and cp <= 195103:\n return True\n return False", "def ascii_print(string):\n string = ANSI_ESCAPE.sub(\"\", string)\n print(\"\".join(ch for ch in string if ch == \"\\n\" or unicodedata.category(ch)[0] != \"C\"))", "def convert_to_printable(s):\n if is_printable(s):\n return s\n return \"\".join(convert_char(c) for c in s)", "def is_printing(line):\r\n return line.startswith('G1 ') and 'X' in line and 'Y' in line and 'E' in line", "def _write_char_to_printer(self, c):\n\n if c != 13: # Strip carriage returns\n self.timeout_wait()\n self._send_to_printer(c)\n d = self._byte_time\n if (c == '\\n') or (self._column == self._max_column): # If newline or wrap\n if self._prev_byte == '\\n':\n d += ((self._char_height + self._line_spacing) * self._dot_feed_time) # Feed line\n else:\n d += ((self._char_height * self._dot_print_time) +\n (self._line_spacing * self._dot_feed_time)) # Text line\n\n self._column = 0\n c = '\\n' # Treat wrap as newline on next pass\n else:\n self._column = self._column + 1\n\n self.timeout_set(d)\n self._prev_byte = c", "def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1", "def is_number_char(c: str) -> bool:\n return c.isdigit() or c == \".\"", "def is_word_character(ch):\n if (ch >= 'a' and ch <= 'z'): return True\n if (ch >= 'A' and ch <= 'Z'): return True\n if (ch >= '0' and ch <= '9'): return True\n if (ch >= 'À' and ch < 'ˀ'): return True\n if (ch == '-' or ch == '0xAD'): return True # hyphen or soft hyphen\n if (ch >= 'Ά' and ch <= 'ԓ'): return True\n return False", "def is_apostrophe(ch):\n if (ch == '\\'' or ch == '\\u2019' or ch == '\\u02bc'): return True\n return False", "def ascii_numeric(s: str) -> bool:\n return frozenset(s).issubset(_ascii_n)", "def alphanumeric(s: str) -> bool:\n return len(re.findall(r'[^A-Za-z0-9]', s)) == 0", "def is_valid_char(t_char):\r\n eax = 1 # mi preparo il flag \"invalido\" per il carattere\r\n \r\n # se il carattere e' un operatore, un operando o uno spazio\r\n # il carattere e' valido\r\n if is_operator(t_char) == 0:\r\n # e' operatore\r\n eax = 0\r\n \r\n if is_operand(t_char) == 0:\r\n # e' operando\r\n eax = 0\r\n \r\n if ord(t_char) == 32:\r\n # e' uno spazio\r\n eax = 0\r\n\r\n return eax", "def file_is_ascii_text(op):\n if not os.path.isfile(op):\n return False\n fd = open(op, \"rb\")\n while True:\n line = fd.readline()\n if 0 >= len(line):\n fd.close()\n return True\n try:\n line.decode(\"ascii\")\n except UnicodeDecodeError:\n fd.close()\n return False", "def non_secret_char(c):\n return c", "def validator(self, char):\r\n if char == ord(\"q\"):\r\n char = curses.KEY_F10\r\n if curses.ascii.isprint(char):\r\n if chr(char) not in \"0123456789.\":\r\n char = 0\r\n return TextBox.validator(self, char)", "def isalnum(self):\n return isalnum(self)", "def character(x):\n if (x==\"a\"or x==\"A\"or x==\"e\"or x==\"E\"or x==\"i\"or x==\"I\"or x==\"o\"or x==\"O\"or x==\"u\"or x==\"U\"):\n return('True')\n else:\n return('False')", "def isalnum(self) -> bool:\n pass", "def is_asian(char):\r\n\r\n # 0x3000 is ideographic space (i.e. double-byte space)\r\n # Anything over is an Asian character\r\n return ord(char) > IDEOGRAPHIC_SPACE", "def is_connective(char):\n return char in [u\"¬\", u\"∧\", u\"∨\", u\"→\", u\"↔\"]", "def validator(self, char):\r\n if curses.ascii.isprint(char):\r\n return char\r\n if char == curses.ascii.TAB:\r\n char = curses.KEY_DOWN\r\n if char in [curses.KEY_DOWN, curses.KEY_UP]:\r\n self.result = char\r\n return curses.ascii.BEL\r\n if char in [10, 13, curses.KEY_ENTER, curses.ascii.BEL]:\r\n self.result = 10\r\n return curses.ascii.BEL\r\n if char in [27, curses.KEY_F10]:\r\n self.result = -1\r\n return curses.ascii.BEL\r\n return char", "def is_char(user_input):\n # Check lenght of input and if equal to zero return True\n if len(user_input) == 0:\n return True\n return False", "def checkChar(self, char):\n return char not in self.guessedChars", "def is_chars(obj: Any) -> bool:\n return isinstance(obj, (str, bytes, bytearray))", "def next_character(self, ascii_code: int) -> None:\n if self.editing:\n allowed_text = [',', '-', '*', ' ']\n if ascii_code == 8:\n self.text = self.text[:-1]\n return True\n elif ascii_code == 10:\n self.editing = False\n return True\n else:\n if 48 <= ascii_code <= 57 or chr(ascii_code) in allowed_text:\n self.text += chr(ascii_code)\n return True\n return False", "def isalpha(self) -> bool:\n pass", "def char_scoring(binary_string,encoding = 'ascii',threshold = 0.90):\r\n # handle unusual input for threshold (can't be greater than 1), less than 0 will simply return true.\r\n if threshold > 1: threshold = 1;\r\n looks_valid = False;\r\n a_print = bytes(string.printable,encoding);\r\n max_ = len(binary_string);\r\n cnt_ = 0; # I'll take the percentage of printable characters\r\n for a in binary_string:\r\n if a in a_print: cnt_+=1;\r\n if cnt_/max_>=threshold:\r\n looks_valid = True;\r\n return looks_valid", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def is_punctuation(ch):\n if (ch == '.'): return False\n if (ch >= '!' and ch <= '/'): return True\n if (ch >= ':' and ch <= '@'): return True\n if (ch >= '\\u2010' and ch <= '\\u2014'): return True # various dashes\n if (is_quote_mark(ch)): return True\n return False", "def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True", "def __is_quote(cls, char):\n return char in (\"'\", '\"')", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def isatty(self):\n\n return False", "def is_unicode(space, w_obj):\n return space.wrap(True)", "def _hexchar(c):\n if c == '1': return 1\n if c == '2': return 2\n if c == '3': return 3\n if c == '4': return 4\n if c == '5': return 5\n if c == '6': return 6\n if c == '7': return 7\n if c == '8': return 8\n if c == '9': return 9\n if c == 'A' or c == 'a': return 10\n if c == 'B' or c == 'b': return 11\n if c == 'C' or c == 'c': return 12\n if c == 'D' or c == 'd': return 13\n if c == 'E' or c == 'e': return 14\n if c == 'F' or c == 'f': return 15\n return 0", "def isatty(self):\n return False", "def isalpha(self):\n return isalpha(self)", "def has_whitespaces(string):\n if not isinstance(string, STRTYPE):\n print(str(string) + \" (\" + str(type(string)) + \") is not a string!\")\n return False\n has_ws = False\n for char in string:\n has_ws |= char.isspace()\n return has_ws", "def what_in_string(printable_string):\n if SCCS_ID in printable_string:\n content = re.sub(r\"^.*\" + re.escape(SCCS_ID), \"\", printable_string)\n content = re.sub(r'(\"|>|\\n|\\\\).*', \"\", content)\n if parameters[\"No formatting\"]:\n print(content)\n else:\n print(\"\\t\" + content)\n\n return True\n\n return False" ]
[ "0.77798957", "0.7536381", "0.7419245", "0.7221991", "0.718354", "0.7084998", "0.7084998", "0.70768076", "0.70703983", "0.6941782", "0.6763601", "0.66158307", "0.66158307", "0.6564988", "0.6561958", "0.6391701", "0.6390051", "0.6385484", "0.63393587", "0.63295555", "0.6323699", "0.6310147", "0.6301625", "0.63009757", "0.62561953", "0.61797297", "0.6173338", "0.6128911", "0.6028381", "0.6020126", "0.59977275", "0.5899273", "0.588226", "0.5834943", "0.5806811", "0.58050317", "0.5736869", "0.5711323", "0.5705923", "0.566744", "0.56664956", "0.565289", "0.5619184", "0.5591019", "0.5569459", "0.5557143", "0.55517054", "0.5550003", "0.5545634", "0.5544269", "0.5544038", "0.5519799", "0.55105543", "0.55089855", "0.550299", "0.54864043", "0.5480561", "0.5478088", "0.5470824", "0.5468746", "0.5458884", "0.5450353", "0.54495364", "0.54437447", "0.5443323", "0.5421473", "0.54210764", "0.5383293", "0.537644", "0.5352695", "0.5339773", "0.532838", "0.53232676", "0.5309224", "0.5296539", "0.52711695", "0.5237848", "0.52340555", "0.5223828", "0.52207744", "0.51999694", "0.5185147", "0.5176912", "0.5165365", "0.5152024", "0.5150403", "0.5145345", "0.5129875", "0.5127171", "0.51027644", "0.5092522", "0.50914633", "0.508141", "0.50805503", "0.5077134", "0.5075026", "0.50701874", "0.5054746", "0.50533396", "0.5039286" ]
0.89795405
0
Filter control characters out of the string buf, given a list of control codes that represent backspaces, and a regex of escape sequences. backspaces are characters emitted when the user hits backspace. This will probably vary from terminal to terminal, and this list should grow as new terminals are encountered. escape_regex is a Regex filter to capture all escape sequences.
def sanitize(buf, backspaces=['\x08\x1b[K', '\x08 \x08'], escape_regex=re.compile(r'\x1b(\[|\]|\(|\))[;?0-9]*[0-9A-Za-z](.*\x07)?')): # Filter out control characters # First, handle the backspaces. for backspace in backspaces: try: while True: ind = buf.index(backspace) buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):])) except: pass strip_escapes = escape_regex.sub('',buf) # strip non-printable ASCII characters clean = ''.join([x for x in strip_escapes if is_printable(x)]) return clean
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def escape(self, text, escape_chars):\n _bs = \"\\\\\"\n # backslash is always escaped\n text = text.replace(_bs, _bs * 2)\n for _el in escape_chars:\n assert _el != _bs, \"Backslash has been already escaped\"\n text = text.replace(_el, _bs + _el)\n return text", "def escapedSeq(term):\n\tfor char in term:\n\t\tif char in escapeRules.keys():\n\t\t\tyield escapeRules[char]\n\t\telse:\n\t\t\tyield char", "def loop_escaped(val, c):\n if not val:\n val = ''\n val = as_unicode(val)\n rc = re.compile(r'([^%s\\\\]|\\\\.)*' % re.escape(c))\n pos = 0\n while pos < len(val):\n if val[pos] == c:\n pos += 1\n continue\n m = rc.match(val, pos)\n if not m:\n raise Exception('rx bug')\n pos = m.end()\n yield unescape(m.group(0))", "def strip_from_ansi_esc_sequences(text):\n # esc[ + values + control character\n # h, l, p commands are complicated, let's ignore them\n seq_regex = r\"\\x1b\\[[0-9;]*[mKJusDCBAfH]\"\n regex = re.compile(seq_regex)\n start = 0\n response = \"\"\n for match in regex.finditer(text):\n end = match.start()\n response += text[start:end]\n\n start = match.end()\n response += text[start:len(text)]\n return response", "def _get_escape_translation_table(cls) -> List[str]:\n _escape_table = [chr(x) for x in range(128)]\n _escape_table[0] = \"\\\\0\"\n _escape_table[ord(\"\\\\\")] = \"\\\\\\\\\"\n _escape_table[ord(\"\\n\")] = \"\\\\n\"\n _escape_table[ord(\"\\r\")] = \"\\\\r\"\n _escape_table[ord(\"\\032\")] = \"\\\\Z\"\n _escape_table[ord('\"')] = '\\\\\"'\n _escape_table[ord(\"'\")] = \"\\\\'\"\n return _escape_table", "def dummyOutEscapeCharacters(self, text):\n \n return re.sub(\"\\\\\\\\.\", \"\\$\", text)\n \n #escape = False\n #escapedText = text\n \n #for i in range(len(text)):\n #if escape:\n #escapedText = escapedText[:i] + self.DUMMY_CHAR + escapedText[i+1:]\n #escape = False\n #elif text[i] == \"\\\\\":\n #escape = True\n #return escapedText", "def _escaped_text_from_text(text, escapes=\"eol\"):\n #TODO:\n # - Add 'c-string' style.\n # - Add _escaped_html_from_text() with a similar call sig.\n import re\n\n if isinstance(escapes, base_string_type):\n if escapes == \"eol\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\"}\n elif escapes == \"whitespace\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\",\n '\\t': \"\\\\t\", ' ': \".\"}\n elif escapes == \"eol-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\"}\n elif escapes == \"whitespace-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\", '\\t': \"\\\\t\", ' ': '.'}\n else:\n raise ValueError(\"unknown text escape style: %r\" % escapes)\n\n # Sort longer replacements first to allow, e.g. '\\r\\n' to beat '\\r' and\n # '\\n'.\n escapes_keys = list(escapes.keys())\n try:\n escapes_keys.sort(key=lambda a: len(a), reverse=True)\n except TypeError:\n # Python 2.3 support: sort() takes no keyword arguments\n escapes_keys.sort(lambda a,b: cmp(len(a), len(b)))\n escapes_keys.reverse()\n def repl(match):\n val = escapes[match.group(0)]\n return val\n escaped = re.sub(\"(%s)\" % '|'.join([re.escape(k) for k in escapes_keys]),\n repl,\n text)\n\n return escaped", "def escape_control_characters(text: str, keep_spacing=True) -> str:\n if not isinstance(text, str):\n raise ValueError(\"text type must be unicode but is {}\".format(type(text).__name__))\n\n trans = _control_char_trans_newline if keep_spacing else _control_char_trans\n return text.translate(trans)", "def _create_char_spinner():\r\n while True:\r\n for c in '|/-\\\\':\r\n yield c", "def _terminal_command_regexes(self):\n patterns = {}\n for intent, keys in self.keywords.get(\"terminal\").items():\n if keys:\n patterns[intent] = re.compile(r'\\b' + r'\\b|\\b'.join(keys) + r'\\b')\n return patterns", "def remove_control_chars(json_string):\n return re.sub('[\\x00-\\x1f]', '',json_string)", "def ScanRE(self, exp):\n # Make sure the expression is not empty\n assert type(exp) is str \n assert exp\n \n self.NotedRE = list()\n\n i = 0\n while i < len(exp):\n if exp[i] == ' ':\n i += 1\n elif exp[i] == '\\\\':\n ch = exp[i:i + 2]\n i += 2\n else:\n ch = exp[i]\n i += 1\n \n self.NotedRE.append(rule.CheckCharType(ch))", "def remove_ansi_escape_sequence(self, text):\n\n # By default no string returned\n output = \"\"\n\n # By default no escape sequence found\n esc_found = 0\n\n # Read char by char a string\n for i in text:\n\n # Display char\n # log.info(f\"{str(i).encode('ascii')}\")\n\n # No escape previously found?\n if esc_found == 0:\n\n # No escape sequence currently found\n\n # Escape?\n if i == \"\\x1b\":\n\n # Yes\n log.info(\"Esc!\")\n\n # Escape found\n esc_found = 1\n\n else:\n\n # No\n\n # Then the current char can be saved\n output += i\n\n # Escape previously found?\n elif esc_found == 1:\n\n # Yes\n\n # Then check if this is a CSI sequence\n if i == \"[\":\n\n # Beginning of CSI sequence\n log.info(\"CSI sequence\")\n\n # CSI sequence\n esc_found = 2\n\n else:\n\n # Another Escape sequence\n\n # Keep the escape sequence in the string\n output += \"\\x1b\" + i\n\n # No escape sequence next\n esc_found = 0\n\n else:\n\n # Char between 'a' and 'z' or 'A' and 'Z'?\n if (i >= \"a\" and i <= \"z\") or (i >= \"A\" and i <= \"Z\"):\n\n # Yes\n\n # Then it is the end of CSI escape sequence\n log.info(\"End of escape sequence\")\n\n # No escape sequence next\n esc_found = 0\n\n # Return a string without ANSI escape sequence\n return output", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def _zap_esc_map(sub, _epat = re.compile(r'(\\[\\anrfbtv])')):\n for craw, cmap in [(r'\\n', '\\n'), (r'\\\\', '\\\\'), (r'\\r', '\\r'),\n (r'\\t', '\\t'), (r'\\f', '\\f'), (r'\\a', '\\a'),\n (r'\\b', '\\b'), (r'\\v', '\\v')]:\n if _epat.search(sub) is None:\n return sub\n sub = re.sub(craw, cmap, sub)\n return sub", "def escape(raw_string): \n return ''.join(\n [_caret_escapes_for_unprintables.get(c, c) for c in raw_string])", "def _escape(strings):\n ret = []\n for string in strings:\n if string == '[' or string == ']' or string == \"\\\"\":\n string = '\\\\' + string\n ret.append(string)\n return \"\".join(ret)", "def compile_regex(self, paths):\r\n if isinstance(paths, list):\r\n ret = []\r\n for regex in paths:\r\n ret.append(re.compile(regex, re.I))\r\n return ret\r\n else:\r\n return re.compile(paths, re.I)", "def remove_escape_characters(text):\n text_removed_escape = list(map(lambda x: x.replace(\"\\\\\", \"\").replace(\"'\", \"\").strip().lower(), re.split(r\"(?<=\\\\)[a-z]{1}\", repr(text))))\n text_removed_extra_spaces = list(filter(lambda x: x != \"\", text_removed_escape))\n return \" \".join(text_removed_extra_spaces)", "def remove_ansi_escape_sequences(input_string):\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n result = ansi_escape.sub('',input_string)\n return result", "def stripEscapes(s):\r\n result = ''\r\n show = 1\r\n i = 0\r\n L = len(s)\r\n while i < L:\r\n if show == 0 and s[i] in ANSI_TERMINATORS:\r\n show = 1\r\n elif show:\r\n n = s.find(ANSI_ESCAPE_BEGIN, i)\r\n if n == -1:\r\n return result + s[i:]\r\n else:\r\n result = result + s[i:n]\r\n i = n\r\n show = 0\r\n i += 1\r\n return result", "def _escapeSpecialCharacters(text):\n text.replace('\\\\', '\\\\\\\\')\n escape = ['~', '#', '&', '%', '_']\n for c in escape:\n text = text.replace(c, '\\\\' + c )\n return text", "def escape_special_characters_for_regex(expression):\n spec_char_escaper = re.compile(r\"[^a-zA-Z0-9]\", re.IGNORECASE)\n expression = re.sub(spec_char_escaper, r'\\1', expression)\n return expression", "def __create_regex(self):\n self.lexer_regex = \"|\".join(self.tokens)\n logger.debug(f\"Generated tokenizer regex {self.lexer_regex}\")", "def strip_ansi(content):\n return ANSI_ESCAPES_REGEX.sub('', content)", "def ansi_escape(text: object) -> str:\n return str(text).replace(\"\\x1b\", \"?\").replace(\"\\b\", \"?\")", "def test_escape(self):\n bad_str = '''`~!@#$%^&*()_+-={}[]|\\\\;:'\",./<>?\\n\\r\\t '''\n self.run_escape_case(bad_str)", "def escape_character_in_string(self, a, text):\n logging.debug(\"in escape character \" + text)\n #self.just_read_char()\n self.read_char()\n self.produce(STRING, text)", "def strip_ansi_escape(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n return re.sub(r\"\\x1b[^m]*m\", \"\", data)", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def buffer_build_regex(buffer):\n\n\thdata = weechat.hdata_get(\"buffer\")\n\tinput = weechat.hdata_string(hdata, buffer, \"input_buffer\")\n\texact = weechat.hdata_integer(hdata, buffer, \"text_search_exact\")\n\twhere = weechat.hdata_integer(hdata, buffer, \"text_search_where\")\n\tregex = weechat.hdata_integer(hdata, buffer, \"text_search_regex\")\n\n\tif not regex:\n\t\tinput = re.escape(input)\n\n\tif exact:\n\t\tinput = \"(?-i)%s\" % input\n\n\tfilter_regex = None\n\tif where == 1: # message\n\t\tfilter_regex = input\n\telif where == 2: # prefix\n\t\tfilter_regex = \"%s\\\\t\" % input\n\telse: # prefix | message\n\t\tfilter_regex = input # TODO: impossible with current filter regex\n\n\treturn \"!%s\" % filter_regex", "def html_escape(text):\n L=[]\n for c in text:\n L.append(html_escape_table.get(c,c))\n return \"\".join(L)", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if not self.__dict__.has_key('encode_re_braces'):\n self.encode_re_braces = re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}',text)\n if not self.__dict__.has_key('encode_re_bslash'):\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n if not ( self.literal_block or self.literal or self.mathmode ):\n # the vertical bar: in mathmode |,\\vert or \\mid\n # in textmode \\textbar\n text = text.replace(\"|\", '{\\\\textbar}')\n text = text.replace(\"<\", '{\\\\textless}')\n text = text.replace(\">\", '{\\\\textgreater}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n # text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"^\", '{\\\\textasciicircum}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\textasciitilde}')\n # Separate compound characters, e.g. \"--\" to \"-{}-\". (The\n # actual separation is done later; see below.)\n separate_chars = '-'\n if self.literal_block or self.literal:\n # In monospace-font, we also separate \",,\", \"``\" and \"''\"\n # and some other characters which can't occur in\n # non-literal text.\n separate_chars += ',`\\'\"<>'\n # pdflatex does not produce doublequotes for ngerman.\n text = self.babel.double_quotes_in_tt(text)\n if self.font_encoding == 'OT1':\n # We're using OT1 font-encoding and have to replace\n # underscore by underlined blank, because this has\n # correct width.\n text = text.replace('_', '{\\\\underline{ }}')\n # And the tt-backslash doesn't work in OT1, so we use\n # a mirrored slash.\n text = text.replace('\\\\textbackslash', '\\\\reflectbox{/}')\n else:\n text = text.replace('_', '{\\\\_}')\n else:\n text = self.babel.quote_quotes(text)\n text = text.replace(\"_\", '{\\\\_}')\n for char in separate_chars * 2:\n # Do it twice (\"* 2\") becaues otherwise we would replace\n # \"---\" by \"-{}--\".\n text = text.replace(char + char, char + '{}' + char)\n if self.insert_newline or self.literal_block:\n # Insert a blank before the newline, to avoid\n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n if self.literal_block:\n closings = \"}\" * len(self.literal_block_stack)\n openings = \"\".join(self.literal_block_stack)\n else:\n closings = \"\"\n openings = \"\"\n text = text.replace(\"\\n\", \"%s}\\\\\\\\\\n\\\\mbox{%s\" % (closings,openings))\n # lines starting with \"[\" give errors.\n text = text.replace('[', '{[}')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n if self.latex_encoding != 'utf8':\n text = self.unicode_to_latex(text)\n return text", "def escaped(array):\n\n return list(map(re.escape, array))", "def masked_by_quotechar(S, quotechar, escapechar, test_char):\n if test_char == \"\":\n return False\n escape_next = False\n in_quotes = False\n i = 0\n while i < len(S):\n s = S[i]\n if s == quotechar:\n if escape_next:\n i += 1\n continue\n if not in_quotes:\n in_quotes = True\n else:\n if i + 1 < len(S) and S[i + 1] == quotechar:\n i += 1\n else:\n in_quotes = False\n elif s == test_char and not in_quotes:\n return False\n elif s == escapechar:\n escape_next = True\n i += 1\n return True", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False # pragma: no cover\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True # pragma: no cover\n return False", "def readline(self):\n\n\t\tself.history.insert(0, '')\n\t\tself.history_pos = 0\n\n\t\ttry:\n\t\t\tif self._gevent_handle_sigint:\n\t\t\t\timport gevent\n\t\t\t\tself._readline_greenlet = gevent.getcurrent()\n\n\t\t\twhile True:\n\n\t\t\t\tself.refresh()\n\n\t\t\t\t# read input\n\t\t\t\tc = self.read()\n\t\t\t\tif isinstance(c, unicode):\n\t\t\t\t\tc = c.encode(self.encoding or 'utf-8')\n\t\t\t\tif not c:\n\t\t\t\t\traise EOFError()\n\t\t\t\tif c in self.TERMINATORS:\n\t\t\t\t\tbreak\n\t\t\t\tself.esc_buf += c\n\n\t\t\t\t# on partial unicode characters, continue to buffer\n\t\t\t\tesc_buf = self.esc_buf\n\t\t\t\tif self.encoding or PY3:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tesc_buf = self.esc_buf.decode(self.encoding or 'utf-8')\n\t\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\t\tlogging.debug(\"Got partial unicode character {!r}, continuing\".format(self.esc_buf))\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t# check for full escape sequence\n\t\t\t\tif esc_buf in ESCAPE_HANDLERS:\n\t\t\t\t\tlogging.debug(\"Got esc handler {!r}\".format(esc_buf))\n\t\t\t\t\tself.head, self.tail = ESCAPE_HANDLERS[esc_buf](self.head, self.tail, self)\n\t\t\t\t\tself.esc_buf = b''\n\t\t\t\t\tcontinue\n\n\t\t\t\t# on partial escape sequences, continue to buffer\n\t\t\t\tif any(sequence.startswith(esc_buf) for sequence in ESCAPE_HANDLERS):\n\t\t\t\t\tlogging.debug(\"Buffer {!r} is prefix of at least one esc handler, continuing\".format(esc_buf))\n\t\t\t\t\tcontinue\n\n\t\t\t\tlogging.debug(\"Buffer {!r} not prefix of any esc handler, stripping and adding\".format(esc_buf))\n\n\t\t\t\tif self.suppress_nonprinting:\n\t\t\t\t\t# filter non-printing chars before we add to main buffer\n\t\t\t\t\t# (also allow >128 for non-ascii chars)\n\t\t\t\t\tesc_buf = type(esc_buf)().join([\n\t\t\t\t\t\tc for c in esc_buf\n\t\t\t\t\t\tif c in self.PRINTABLE or ord(c) > 128\n\t\t\t\t\t])\n\n\t\t\t\t# flush escape buffer\n\t\t\t\tself.head += esc_buf\n\t\t\t\tself.esc_buf = b''\n\n\t\texcept KeyboardInterrupt:\n\t\t\tself.head = ''\n\t\t\tself.tail = ''\n\t\t\t# fall through\n\t\texcept EOFError:\n\t\t\tif not (self.head or self.tail): raise\n\t\t\t# fall through\n\t\tfinally:\n\t\t\tif self._gevent_handle_sigint:\n\t\t\t\tself._readline_greenlet = None\n\n\t\tself.history[0] = self.head + self.tail\n\t\tif not self.history[0]: self.history.pop(0)\n\n\t\tret = self.head + self.tail\n\t\tself.head = ''\n\t\tself.tail = ''\n\n\t\tif self.encoding and not isinstance(ret, unicode):\n\t\t\t# Some edge cases (eg. ^C) can result in ret being bytes even when decoding should happen.\n\t\t\t# Our code doesn't care because the implict coercion is safe for empty strings and ascii characters,\n\t\t\t# but we want to avoid unexpected behaviour when returning to the caller.\n\t\t\t# If this raises a UnicodeDecodeError, it indicates that there is a logic bug, as non-ascii characters\n\t\t\t# shouldn't be present if ret isn't already a unicode object.\n\t\t\tret = ret.decode('ascii')\n\n\t\treturn ret", "def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[\n Node]:\n # Control inputs start with \"^\". Skip everything else and strip off the\n # leading caret character\n control_input_names = [n[1:] for n in inputs if n.startswith(\"^\")]\n return [g[name] for name in control_input_names]", "def escape(s, pattern=r'(\\W)'):\n r = re.compile(pattern)\n return r.subn(r'\\\\\\1', s)[0]", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def sub(self, rgx, repl, count=0):\n count = max(count, 0)\n newbuf = [re.sub(rgx, repl, line, count) for line in self.buffer]\n self.buffer = newbuf", "def test_parse_quotes_not_three_vertical_bars(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"did not find 3 '|' characters\")):\n api.parse_quote(\" This is a quote||\", simple_format=False)", "def regex_filter(regex_str, versions):\n regex = re.compile(regex_str)\n return [v for v in versions if regex.search(v)]", "def build_regex(self) -> typing.Pattern:\n self._regex = re.compile(\"|\".join(sorted(self._includes)))\n return self._regex", "def _binary_command_regexes(self):\n patterns = {}\n for intent, keys in self.keywords.get(\"binary\").items():\n if keys:\n patterns[intent] = re.compile(r'\\b' + r'\\b|\\b'.join(keys) + r'\\b')\n return patterns", "def _escape_string(text, _map={}):\n if isinstance(text, str):\n text = text.encode()\n assert isinstance(text, (bytes, bytearray))\n\n if not _map:\n for ch in range(256):\n if ch in _VALID_CHARS:\n _map[ch] = chr(ch)\n else:\n _map[ch] = '\\\\%02x' % ch\n\n buf = [_map[ch] for ch in text]\n return ''.join(buf)", "def prepare_regexps(self):\r\n print(\"Preparing regular expressions for this session.\")\r\n privmsg_parse = re.compile(\"\")", "def replace_escaped_characters(data: Text) -> Text:\n return re.sub(r'\\\\(.)', r'\\1', data)", "def CUnescape(text):\n # type: (str) -> bytes\n\n def ReplaceHex(m):\n # Only replace the match if the number of leading back slashes is odd. i.e.\n # the slash itself is not escaped.\n if len(m.group(1)) & 1:\n return m.group(1) + 'x0' + m.group(2)\n return m.group(0)\n\n # This is required because the 'string_escape' encoding doesn't\n # allow single-digit hex escapes (like '\\xf').\n result = _CUNESCAPE_HEX.sub(ReplaceHex, text)\n\n return (result.encode('utf-8') # Make it bytes to allow decode.\n .decode('unicode_escape')\n # Make it bytes again to return the proper type.\n .encode('raw_unicode_escape'))", "def escape(text: str) -> str:\n\n def replace(match_obj):\n \"\"\"\n Returns the match text prefixed with backslash\n\n :param re.match match_obj: The match.\n\n :rtype: str\n \"\"\"\n return '\\\\' + match_obj.group(0)\n\n return re.sub(r'[\\\\{}]', replace, text)", "def escape_shell_chars_tmsu(str):\n str = str.replace(\"/\", \"\\\\\")\n str = re.sub(\"(!|\\$|#|&|\\\"|\\'|\\(|\\)|\\||<|>|`|\\\\\\|;| )\", r\"\\\\\\1\", str)\n return str", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _compile_list(self):\n if self.case_insensitive:\n return [(re.compile(pattern, re.I), replacement) for\n pattern, replacement in self.uncompiled]\n else:\n return [(re.compile(pattern), replacement) for\n pattern, replacement in self.uncompiled]", "def html_escape(text):\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def html_escape(text):\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def escape_tex(value):\n # This code, and the code that call this is courtesy of Clemens Kaposi\n # http://flask.pocoo.org/snippets/55/\n\n LATEX_SUBS = (\n (re.compile(r'\\\\'), r'\\\\textbackslash'),\n (re.compile(r'([{}_#%&$])'), r'\\\\\\1'),\n (re.compile(r'~'), r'\\~{}'),\n (re.compile(r'\\^'), r'\\^{}'),\n (re.compile(r'\"'), r\"''\"),\n (re.compile(r'\\.\\.\\.+'), r'\\\\ldots'),\n )\n\n newval = value\n for pattern, replacement in LATEX_SUBS:\n newval = pattern.sub(replacement, newval)\n return newval", "def ReadKeys(self):\n\n reg = re.compile(r\"\\w|\\s\")\n chars = \"\"\n while True:\n key = getch()\n keynum = ord(key)\n\n if keynum == 27: #escape\n self.shouldExit = True\n return \"\"\n\n if keynum == 13: #enter\n stdout.write(\"\\n\")\n break\n\n if keynum == 8: #backspace\n chars = chars[:-1]\n stdout.write(key)\n stdout.write(\" \")\n stdout.write(key)\n continue\n\n if reg.match(key): \n chars += key\n stdout.write(key)\n\n return chars", "def stopwords_regex(self):\n regexes = list()\n for stopword in self.stopwords():\n regexes.append(r'\\b' + stopword + r'\\b')\n return re.compile('|'.join(regexes), re.IGNORECASE)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def clean_regex(regex):\n # copy for return\n ret_regex = regex\n\n # these characters are escaped (all except alternation | and escape \\)\n # see http://www.regular-expressions.info/refquick.html\n escape_chars = '[^$.?*+(){}'\n\n # remove any escape chars\n ret_regex = ret_regex.replace('\\\\', '')\n\n # escape any characters which are used by regex\n # could probably concoct something incomprehensible using re.sub() but\n # prefer to write clear code with this loop\n # note expectation that no characters have already been escaped\n for c in escape_chars:\n ret_regex = ret_regex.replace(c, '\\\\' + c)\n\n # remove any double alternations until these don't exist any more\n while True:\n old_regex = ret_regex\n ret_regex = ret_regex.replace('||', '|')\n if old_regex == ret_regex:\n break\n\n # if last char is alternation | remove it because this\n # will cause operational error\n # this can happen as user is typing in global search box\n while len(ret_regex) >= 1 and ret_regex[-1] == '|':\n ret_regex = ret_regex[:-1]\n\n # and back to the caller\n return ret_regex", "def scrub_output(output):\n ansi_escape = re.compile(r'\\x1b[^m]*m')\n return ansi_escape.sub('', output)", "def register_all(self):\n # TODO complete this list\n # register special symbols\n self.register(u'\\n\\n', u' \\\\par', encode=False)\n self.register(u'\\n\\n', u'\\\\par', encode=False)\n self.register(u' ', u'\\\\ ', encode=False)\n self.register(u'\\N{EM SPACE}', u'\\\\quad')\n self.register(u'\\N{THIN SPACE}', u' ', decode=False)\n self.register(u'%', u'\\\\%')\n self.register(u'\\N{EN DASH}', u'--')\n self.register(u'\\N{EN DASH}', u'\\\\textendash')\n self.register(u'\\N{EM DASH}', u'---')\n self.register(u'\\N{EM DASH}', u'\\\\textemdash')\n self.register(u'\\N{REPLACEMENT CHARACTER}', u\"????\", decode=False)\n self.register(u'\\N{LEFT SINGLE QUOTATION MARK}', u'`', decode=False)\n self.register(u'\\N{RIGHT SINGLE QUOTATION MARK}', u\"'\", decode=False)\n self.register(u'\\N{LEFT DOUBLE QUOTATION MARK}', u'``')\n self.register(u'\\N{RIGHT DOUBLE QUOTATION MARK}', u\"''\")\n self.register(u'\\N{DOUBLE LOW-9 QUOTATION MARK}', u\",,\")\n self.register(u'\\N{DOUBLE LOW-9 QUOTATION MARK}', u'\\\\glqq',\n encode=False)\n self.register(u'\\N{LEFT-POINTING DOUBLE ANGLE QUOTATION MARK}',\n u'\\\\guillemotleft')\n self.register(u'\\N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}',\n u'\\\\guillemotright')\n self.register(u'\\N{MODIFIER LETTER PRIME}', u\"'\", decode=False)\n self.register(u'\\N{MODIFIER LETTER DOUBLE PRIME}', u\"''\", decode=False)\n self.register(u'\\N{MODIFIER LETTER TURNED COMMA}', u'`', decode=False)\n self.register(u'\\N{MODIFIER LETTER APOSTROPHE}', u\"'\", decode=False)\n self.register(u'\\N{MODIFIER LETTER REVERSED COMMA}', u'`',\n decode=False)\n self.register(u'\\N{DAGGER}', u'\\\\dag')\n self.register(u'\\N{DOUBLE DAGGER}', u'\\\\ddag')\n\n self.register(u'\\\\', u'\\\\textbackslash', encode=False)\n self.register(u'\\\\', u'\\\\backslash', mode='math', encode=False)\n\n self.register(u'\\N{TILDE OPERATOR}', u'\\\\sim', mode='math')\n self.register(u'\\N{MODIFIER LETTER LOW TILDE}',\n u'\\\\texttildelow', package='textcomp')\n self.register(u'\\N{SMALL TILDE}', u'\\\\~{}')\n self.register(u'~', u'\\\\textasciitilde')\n\n self.register(u'\\N{BULLET}', u'\\\\bullet', mode='math')\n self.register(u'\\N{BULLET}', u'\\\\textbullet', package='textcomp')\n self.register(u'\\N{ASTERISK OPERATOR}', u'\\\\ast', mode='math')\n\n self.register(u'\\N{NUMBER SIGN}', u'\\\\#')\n self.register(u'\\N{LOW LINE}', u'\\\\_')\n self.register(u'\\N{AMPERSAND}', u'\\\\&')\n self.register(u'\\N{NO-BREAK SPACE}', u'~')\n self.register(u'\\N{INVERTED EXCLAMATION MARK}', u'!`')\n self.register(u'\\N{CENT SIGN}', u'\\\\not{c}')\n\n self.register(u'\\N{POUND SIGN}', u'\\\\pounds')\n self.register(u'\\N{POUND SIGN}', u'\\\\textsterling', package='textcomp')\n self.register(u'\\N{YEN SIGN}', u'\\\\yen')\n self.register(u'\\N{YEN SIGN}', u'\\\\textyen', package='textcomp')\n\n self.register(u'\\N{SECTION SIGN}', u'\\\\S')\n self.register(u'\\N{DIAERESIS}', u'\\\\\"{}')\n self.register(u'\\N{NOT SIGN}', u'\\\\neg')\n self.register(u'\\N{HYPHEN}', u'-', decode=False)\n self.register(u'\\N{SOFT HYPHEN}', u'\\\\-')\n self.register(u'\\N{MACRON}', u'\\\\={}')\n\n self.register(u'\\N{DEGREE SIGN}', u'^\\\\circ', mode='math')\n self.register(u'\\N{DEGREE SIGN}', u'\\\\textdegree', package='textcomp')\n\n self.register(u'\\N{MINUS SIGN}', u'-', mode='math')\n self.register(u'\\N{PLUS-MINUS SIGN}', u'\\\\pm', mode='math')\n self.register(u'\\N{PLUS-MINUS SIGN}', u'\\\\textpm', package='textcomp')\n\n self.register(u'\\N{SUPERSCRIPT TWO}', u'^2', mode='math')\n self.register(\n u'\\N{SUPERSCRIPT TWO}',\n u'\\\\texttwosuperior',\n package='textcomp')\n\n self.register(u'\\N{SUPERSCRIPT THREE}', u'^3', mode='math')\n self.register(\n u'\\N{SUPERSCRIPT THREE}',\n u'\\\\textthreesuperior',\n package='textcomp')\n\n self.register(u'\\N{ACUTE ACCENT}', u\"\\\\'{}\")\n\n self.register(u'\\N{MICRO SIGN}', u'\\\\mu', mode='math')\n self.register(u'\\N{MICRO SIGN}', u'\\\\micro', package='gensymu')\n\n self.register(u'\\N{PILCROW SIGN}', u'\\\\P')\n\n self.register(u'\\N{MIDDLE DOT}', u'\\\\cdot', mode='math')\n self.register(\n u'\\N{MIDDLE DOT}',\n u'\\\\textperiodcentered',\n package='textcomp')\n\n self.register(u'\\N{CEDILLA}', u'\\\\c{}')\n\n self.register(u'\\N{SUPERSCRIPT ONE}', u'^1', mode='math')\n self.register(\n u'\\N{SUPERSCRIPT ONE}',\n u'\\\\textonesuperior',\n package='textcomp')\n\n self.register(u'\\N{INVERTED QUESTION MARK}', u'?`')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH GRAVE}', u'\\\\`A')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH CIRCUMFLEX}', u'\\\\^A')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH TILDE}', u'\\\\~A')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH DIAERESIS}', u'\\\\\"A')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', u'\\\\AA')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', u'\\\\r A',\n encode=False)\n self.register(u'\\N{LATIN CAPITAL LETTER AE}', u'\\\\AE')\n self.register(u'\\N{LATIN CAPITAL LETTER C WITH CEDILLA}', u'\\\\c C')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH GRAVE}', u'\\\\`E')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH ACUTE}', u\"\\\\'E\")\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH CIRCUMFLEX}', u'\\\\^E')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH DIAERESIS}', u'\\\\\"E')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH GRAVE}', u'\\\\`I')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH CIRCUMFLEX}', u'\\\\^I')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH DIAERESIS}', u'\\\\\"I')\n self.register(u'\\N{LATIN CAPITAL LETTER N WITH TILDE}', u'\\\\~N')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH GRAVE}', u'\\\\`O')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH ACUTE}', u\"\\\\'O\")\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH CIRCUMFLEX}', u'\\\\^O')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH TILDE}', u'\\\\~O')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH DIAERESIS}', u'\\\\\"O')\n self.register(u'\\N{MULTIPLICATION SIGN}', u'\\\\times', mode='math')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH STROKE}', u'\\\\O')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH GRAVE}', u'\\\\`U')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH ACUTE}', u\"\\\\'U\")\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH CIRCUMFLEX}', u'\\\\^U')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH DIAERESIS}', u'\\\\\"U')\n self.register(u'\\N{LATIN CAPITAL LETTER Y WITH ACUTE}', u\"\\\\'Y\")\n self.register(u'\\N{LATIN SMALL LETTER SHARP S}', u'\\\\ss')\n self.register(u'\\N{LATIN SMALL LETTER A WITH GRAVE}', u'\\\\`a')\n self.register(u'\\N{LATIN SMALL LETTER A WITH ACUTE}', u\"\\\\'a\")\n self.register(u'\\N{LATIN SMALL LETTER A WITH CIRCUMFLEX}', u'\\\\^a')\n self.register(u'\\N{LATIN SMALL LETTER A WITH TILDE}', u'\\\\~a')\n self.register(u'\\N{LATIN SMALL LETTER A WITH DIAERESIS}', u'\\\\\"a')\n self.register(u'\\N{LATIN SMALL LETTER A WITH RING ABOVE}', u'\\\\aa')\n self.register(u'\\N{LATIN SMALL LETTER A WITH RING ABOVE}', u'\\\\r a',\n encode=False)\n self.register(u'\\N{LATIN SMALL LETTER AE}', u'\\\\ae')\n self.register(u'\\N{LATIN SMALL LETTER C WITH CEDILLA}', u'\\\\c c')\n self.register(u'\\N{LATIN SMALL LETTER E WITH GRAVE}', u'\\\\`e')\n self.register(u'\\N{LATIN SMALL LETTER E WITH ACUTE}', u\"\\\\'e\")\n self.register(u'\\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}', u'\\\\^e')\n self.register(u'\\N{LATIN SMALL LETTER E WITH DIAERESIS}', u'\\\\\"e')\n self.register(u'\\N{LATIN SMALL LETTER I WITH GRAVE}', u'\\\\`\\\\i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH GRAVE}', u'\\\\`i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH ACUTE}', u\"\\\\'\\\\i\")\n self.register(u'\\N{LATIN SMALL LETTER I WITH ACUTE}', u\"\\\\'i\")\n self.register(u'\\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', u'\\\\^\\\\i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', u'\\\\^i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH DIAERESIS}', u'\\\\\"\\\\i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH DIAERESIS}', u'\\\\\"i')\n self.register(u'\\N{LATIN SMALL LETTER N WITH TILDE}', u'\\\\~n')\n self.register(u'\\N{LATIN SMALL LETTER O WITH GRAVE}', u'\\\\`o')\n self.register(u'\\N{LATIN SMALL LETTER O WITH ACUTE}', u\"\\\\'o\")\n self.register(u'\\N{LATIN SMALL LETTER O WITH CIRCUMFLEX}', u'\\\\^o')\n self.register(u'\\N{LATIN SMALL LETTER O WITH TILDE}', u'\\\\~o')\n self.register(u'\\N{LATIN SMALL LETTER O WITH DIAERESIS}', u'\\\\\"o')\n self.register(u'\\N{DIVISION SIGN}', u'\\\\div', mode='math')\n self.register(u'\\N{LATIN SMALL LETTER O WITH STROKE}', u'\\\\o')\n self.register(u'\\N{LATIN SMALL LETTER U WITH GRAVE}', u'\\\\`u')\n self.register(u'\\N{LATIN SMALL LETTER U WITH ACUTE}', u\"\\\\'u\")\n self.register(u'\\N{LATIN SMALL LETTER U WITH CIRCUMFLEX}', u'\\\\^u')\n self.register(u'\\N{LATIN SMALL LETTER U WITH DIAERESIS}', u'\\\\\"u')\n self.register(u'\\N{LATIN SMALL LETTER Y WITH ACUTE}', u\"\\\\'y\")\n self.register(u'\\N{LATIN SMALL LETTER Y WITH DIAERESIS}', u'\\\\\"y')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH MACRON}', u'\\\\=A')\n self.register(u'\\N{LATIN SMALL LETTER A WITH MACRON}', u'\\\\=a')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH BREVE}', u'\\\\u A')\n self.register(u'\\N{LATIN SMALL LETTER A WITH BREVE}', u'\\\\u a')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH OGONEK}', u'\\\\k A')\n self.register(u'\\N{LATIN SMALL LETTER A WITH OGONEK}', u'\\\\k a')\n self.register(u'\\N{LATIN CAPITAL LETTER C WITH ACUTE}', u\"\\\\'C\")\n self.register(u'\\N{LATIN SMALL LETTER C WITH ACUTE}', u\"\\\\'c\")\n self.register(u'\\N{LATIN CAPITAL LETTER C WITH CIRCUMFLEX}', u'\\\\^C')\n self.register(u'\\N{LATIN SMALL LETTER C WITH CIRCUMFLEX}', u'\\\\^c')\n self.register(u'\\N{LATIN CAPITAL LETTER C WITH DOT ABOVE}', u'\\\\.C')\n self.register(u'\\N{LATIN SMALL LETTER C WITH DOT ABOVE}', u'\\\\.c')\n self.register(u'\\N{LATIN CAPITAL LETTER C WITH CARON}', u'\\\\v C')\n self.register(u'\\N{LATIN SMALL LETTER C WITH CARON}', u'\\\\v c')\n self.register(u'\\N{LATIN CAPITAL LETTER D WITH CARON}', u'\\\\v D')\n self.register(u'\\N{LATIN SMALL LETTER D WITH CARON}', u'\\\\v d')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH MACRON}', u'\\\\=E')\n self.register(u'\\N{LATIN SMALL LETTER E WITH MACRON}', u'\\\\=e')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH BREVE}', u'\\\\u E')\n self.register(u'\\N{LATIN SMALL LETTER E WITH BREVE}', u'\\\\u e')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH DOT ABOVE}', u'\\\\.E')\n self.register(u'\\N{LATIN SMALL LETTER E WITH DOT ABOVE}', u'\\\\.e')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH OGONEK}', u'\\\\k E')\n self.register(u'\\N{LATIN SMALL LETTER E WITH OGONEK}', u'\\\\k e')\n self.register(u'\\N{LATIN CAPITAL LETTER E WITH CARON}', u'\\\\v E')\n self.register(u'\\N{LATIN SMALL LETTER E WITH CARON}', u'\\\\v e')\n self.register(u'\\N{LATIN CAPITAL LETTER G WITH CIRCUMFLEX}', u'\\\\^G')\n self.register(u'\\N{LATIN SMALL LETTER G WITH CIRCUMFLEX}', u'\\\\^g')\n self.register(u'\\N{LATIN CAPITAL LETTER G WITH BREVE}', u'\\\\u G')\n self.register(u'\\N{LATIN SMALL LETTER G WITH BREVE}', u'\\\\u g')\n self.register(u'\\N{LATIN CAPITAL LETTER G WITH DOT ABOVE}', u'\\\\.G')\n self.register(u'\\N{LATIN SMALL LETTER G WITH DOT ABOVE}', u'\\\\.g')\n self.register(u'\\N{LATIN CAPITAL LETTER G WITH CEDILLA}', u'\\\\c G')\n self.register(u'\\N{LATIN SMALL LETTER G WITH CEDILLA}', u'\\\\c g')\n self.register(u'\\N{LATIN CAPITAL LETTER H WITH CIRCUMFLEX}', u'\\\\^H')\n self.register(u'\\N{LATIN SMALL LETTER H WITH CIRCUMFLEX}', u'\\\\^h')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH TILDE}', u'\\\\~I')\n self.register(u'\\N{LATIN SMALL LETTER I WITH TILDE}', u'\\\\~\\\\i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH TILDE}', u'\\\\~i')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH MACRON}', u'\\\\=I')\n self.register(u'\\N{LATIN SMALL LETTER I WITH MACRON}', u'\\\\=\\\\i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH MACRON}', u'\\\\=i')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH BREVE}', u'\\\\u I')\n self.register(u'\\N{LATIN SMALL LETTER I WITH BREVE}', u'\\\\u\\\\i')\n self.register(u'\\N{LATIN SMALL LETTER I WITH BREVE}', u'\\\\u i')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH OGONEK}', u'\\\\k I')\n self.register(u'\\N{LATIN SMALL LETTER I WITH OGONEK}', u'\\\\k i')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}', u'\\\\.I')\n self.register(u'\\N{LATIN SMALL LETTER DOTLESS I}', u'\\\\i')\n self.register(u'\\N{LATIN CAPITAL LIGATURE IJ}', u'IJ', decode=False)\n self.register(u'\\N{LATIN SMALL LIGATURE IJ}', u'ij', decode=False)\n self.register(u'\\N{LATIN CAPITAL LETTER J WITH CIRCUMFLEX}', u'\\\\^J')\n self.register(u'\\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', u'\\\\^\\\\j')\n self.register(u'\\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', u'\\\\^j')\n self.register(u'\\N{LATIN CAPITAL LETTER K WITH CEDILLA}', u'\\\\c K')\n self.register(u'\\N{LATIN SMALL LETTER K WITH CEDILLA}', u'\\\\c k')\n self.register(u'\\N{LATIN CAPITAL LETTER L WITH ACUTE}', u\"\\\\'L\")\n self.register(u'\\N{LATIN SMALL LETTER L WITH ACUTE}', u\"\\\\'l\")\n self.register(u'\\N{LATIN CAPITAL LETTER L WITH CEDILLA}', u'\\\\c L')\n self.register(u'\\N{LATIN SMALL LETTER L WITH CEDILLA}', u'\\\\c l')\n self.register(u'\\N{LATIN CAPITAL LETTER L WITH CARON}', u'\\\\v L')\n self.register(u'\\N{LATIN SMALL LETTER L WITH CARON}', u'\\\\v l')\n self.register(u'\\N{LATIN CAPITAL LETTER L WITH STROKE}', u'\\\\L')\n self.register(u'\\N{LATIN SMALL LETTER L WITH STROKE}', u'\\\\l')\n self.register(u'\\N{LATIN CAPITAL LETTER N WITH ACUTE}', u\"\\\\'N\")\n self.register(u'\\N{LATIN SMALL LETTER N WITH ACUTE}', u\"\\\\'n\")\n self.register(u'\\N{LATIN CAPITAL LETTER N WITH CEDILLA}', u'\\\\c N')\n self.register(u'\\N{LATIN SMALL LETTER N WITH CEDILLA}', u'\\\\c n')\n self.register(u'\\N{LATIN CAPITAL LETTER N WITH CARON}', u'\\\\v N')\n self.register(u'\\N{LATIN SMALL LETTER N WITH CARON}', u'\\\\v n')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH MACRON}', u'\\\\=O')\n self.register(u'\\N{LATIN SMALL LETTER O WITH MACRON}', u'\\\\=o')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH BREVE}', u'\\\\u O')\n self.register(u'\\N{LATIN SMALL LETTER O WITH BREVE}', u'\\\\u o')\n self.register(\n u'\\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}',\n u'\\\\H O')\n self.register(u'\\N{LATIN SMALL LETTER O WITH DOUBLE ACUTE}', u'\\\\H o')\n self.register(u'\\N{LATIN CAPITAL LIGATURE OE}', u'\\\\OE')\n self.register(u'\\N{LATIN SMALL LIGATURE OE}', u'\\\\oe')\n self.register(u'\\N{LATIN CAPITAL LETTER R WITH ACUTE}', u\"\\\\'R\")\n self.register(u'\\N{LATIN SMALL LETTER R WITH ACUTE}', u\"\\\\'r\")\n self.register(u'\\N{LATIN CAPITAL LETTER R WITH CEDILLA}', u'\\\\c R')\n self.register(u'\\N{LATIN SMALL LETTER R WITH CEDILLA}', u'\\\\c r')\n self.register(u'\\N{LATIN CAPITAL LETTER R WITH CARON}', u'\\\\v R')\n self.register(u'\\N{LATIN SMALL LETTER R WITH CARON}', u'\\\\v r')\n self.register(u'\\N{LATIN CAPITAL LETTER S WITH ACUTE}', u\"\\\\'S\")\n self.register(u'\\N{LATIN SMALL LETTER S WITH ACUTE}', u\"\\\\'s\")\n self.register(u'\\N{LATIN CAPITAL LETTER S WITH CIRCUMFLEX}', u'\\\\^S')\n self.register(u'\\N{LATIN SMALL LETTER S WITH CIRCUMFLEX}', u'\\\\^s')\n self.register(u'\\N{LATIN CAPITAL LETTER S WITH CEDILLA}', u'\\\\c S')\n self.register(u'\\N{LATIN SMALL LETTER S WITH CEDILLA}', u'\\\\c s')\n self.register(u'\\N{LATIN CAPITAL LETTER S WITH CARON}', u'\\\\v S')\n self.register(u'\\N{LATIN SMALL LETTER S WITH CARON}', u'\\\\v s')\n self.register(u'\\N{LATIN CAPITAL LETTER T WITH CEDILLA}', u'\\\\c T')\n self.register(u'\\N{LATIN SMALL LETTER T WITH CEDILLA}', u'\\\\c t')\n self.register(u'\\N{LATIN CAPITAL LETTER T WITH CARON}', u'\\\\v T')\n self.register(u'\\N{LATIN SMALL LETTER T WITH CARON}', u'\\\\v t')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH TILDE}', u'\\\\~U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH TILDE}', u'\\\\~u')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH MACRON}', u'\\\\=U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH MACRON}', u'\\\\=u')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH BREVE}', u'\\\\u U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH BREVE}', u'\\\\u u')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH RING ABOVE}', u'\\\\r U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH RING ABOVE}', u'\\\\r u')\n self.register(\n u'\\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}',\n u'\\\\H U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH DOUBLE ACUTE}', u'\\\\H u')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH OGONEK}', u'\\\\k U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH OGONEK}', u'\\\\k u')\n self.register(u'\\N{LATIN CAPITAL LETTER W WITH CIRCUMFLEX}', u'\\\\^W')\n self.register(u'\\N{LATIN SMALL LETTER W WITH CIRCUMFLEX}', u'\\\\^w')\n self.register(u'\\N{LATIN CAPITAL LETTER Y WITH CIRCUMFLEX}', u'\\\\^Y')\n self.register(u'\\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}', u'\\\\^y')\n self.register(u'\\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}', u'\\\\\"Y')\n self.register(u'\\N{LATIN CAPITAL LETTER Z WITH ACUTE}', u\"\\\\'Z\")\n self.register(u'\\N{LATIN SMALL LETTER Z WITH ACUTE}', u\"\\\\'z\")\n self.register(u'\\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}', u'\\\\.Z')\n self.register(u'\\N{LATIN SMALL LETTER Z WITH DOT ABOVE}', u'\\\\.z')\n self.register(u'\\N{LATIN CAPITAL LETTER Z WITH CARON}', u'\\\\v Z')\n self.register(u'\\N{LATIN SMALL LETTER Z WITH CARON}', u'\\\\v z')\n self.register(u'\\N{LATIN CAPITAL LETTER DZ WITH CARON}', u'D\\\\v Z')\n self.register(\n u'\\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON}',\n u'D\\\\v z')\n self.register(u'\\N{LATIN SMALL LETTER DZ WITH CARON}', u'd\\\\v z')\n self.register(u'\\N{LATIN CAPITAL LETTER LJ}', u'LJ', decode=False)\n self.register(\n u'\\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}',\n u'Lj',\n decode=False)\n self.register(u'\\N{LATIN SMALL LETTER LJ}', u'lj', decode=False)\n self.register(u'\\N{LATIN CAPITAL LETTER NJ}', u'NJ', decode=False)\n self.register(\n u'\\N{LATIN CAPITAL LETTER N WITH SMALL LETTER J}',\n u'Nj',\n decode=False)\n self.register(u'\\N{LATIN SMALL LETTER NJ}', u'nj', decode=False)\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH CARON}', u'\\\\v A')\n self.register(u'\\N{LATIN SMALL LETTER A WITH CARON}', u'\\\\v a')\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH CARON}', u'\\\\v I')\n self.register(u'\\N{LATIN SMALL LETTER I WITH CARON}', u'\\\\v\\\\i')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH CARON}', u'\\\\v O')\n self.register(u'\\N{LATIN SMALL LETTER O WITH CARON}', u'\\\\v o')\n self.register(u'\\N{LATIN CAPITAL LETTER U WITH CARON}', u'\\\\v U')\n self.register(u'\\N{LATIN SMALL LETTER U WITH CARON}', u'\\\\v u')\n self.register(u'\\N{LATIN CAPITAL LETTER G WITH CARON}', u'\\\\v G')\n self.register(u'\\N{LATIN SMALL LETTER G WITH CARON}', u'\\\\v g')\n self.register(u'\\N{LATIN CAPITAL LETTER K WITH CARON}', u'\\\\v K')\n self.register(u'\\N{LATIN SMALL LETTER K WITH CARON}', u'\\\\v k')\n self.register(u'\\N{LATIN CAPITAL LETTER O WITH OGONEK}', u'\\\\k O')\n self.register(u'\\N{LATIN SMALL LETTER O WITH OGONEK}', u'\\\\k o')\n self.register(u'\\N{LATIN SMALL LETTER J WITH CARON}', u'\\\\v\\\\j')\n self.register(u'\\N{LATIN CAPITAL LETTER DZ}', u'DZ', decode=False)\n self.register(\n u'\\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z}',\n u'Dz',\n decode=False)\n self.register(u'\\N{LATIN SMALL LETTER DZ}', u'dz', decode=False)\n self.register(u'\\N{LATIN CAPITAL LETTER G WITH ACUTE}', u\"\\\\'G\")\n self.register(u'\\N{LATIN SMALL LETTER G WITH ACUTE}', u\"\\\\'g\")\n self.register(u'\\N{LATIN CAPITAL LETTER AE WITH ACUTE}', u\"\\\\'\\\\AE\")\n self.register(u'\\N{LATIN SMALL LETTER AE WITH ACUTE}', u\"\\\\'\\\\ae\")\n self.register(\n u'\\N{LATIN CAPITAL LETTER O WITH STROKE AND ACUTE}',\n u\"\\\\'\\\\O\")\n self.register(\n u'\\N{LATIN SMALL LETTER O WITH STROKE AND ACUTE}',\n u\"\\\\'\\\\o\")\n self.register(u'\\N{LATIN CAPITAL LETTER ETH}', u'\\\\DH')\n self.register(u'\\N{LATIN SMALL LETTER ETH}', u'\\\\dh')\n self.register(u'\\N{LATIN CAPITAL LETTER THORN}', u'\\\\TH')\n self.register(u'\\N{LATIN SMALL LETTER THORN}', u'\\\\th')\n self.register(u'\\N{LATIN CAPITAL LETTER D WITH STROKE}', u'\\\\DJ')\n self.register(u'\\N{LATIN SMALL LETTER D WITH STROKE}', u'\\\\dj')\n self.register(u'\\N{LATIN CAPITAL LETTER D WITH DOT BELOW}', u'\\\\d D')\n self.register(u'\\N{LATIN SMALL LETTER D WITH DOT BELOW}', u'\\\\d d')\n self.register(u'\\N{LATIN CAPITAL LETTER L WITH DOT BELOW}', u'\\\\d L')\n self.register(u'\\N{LATIN SMALL LETTER L WITH DOT BELOW}', u'\\\\d l')\n self.register(u'\\N{LATIN CAPITAL LETTER M WITH DOT BELOW}', u'\\\\d M')\n self.register(u'\\N{LATIN SMALL LETTER M WITH DOT BELOW}', u'\\\\d m')\n self.register(u'\\N{LATIN CAPITAL LETTER N WITH DOT BELOW}', u'\\\\d N')\n self.register(u'\\N{LATIN SMALL LETTER N WITH DOT BELOW}', u'\\\\d n')\n self.register(u'\\N{LATIN CAPITAL LETTER R WITH DOT BELOW}', u'\\\\d R')\n self.register(u'\\N{LATIN SMALL LETTER R WITH DOT BELOW}', u'\\\\d r')\n self.register(u'\\N{LATIN CAPITAL LETTER S WITH DOT BELOW}', u'\\\\d S')\n self.register(u'\\N{LATIN SMALL LETTER S WITH DOT BELOW}', u'\\\\d s')\n self.register(u'\\N{LATIN CAPITAL LETTER T WITH DOT BELOW}', u'\\\\d T')\n self.register(u'\\N{LATIN SMALL LETTER T WITH DOT BELOW}', u'\\\\d t')\n self.register(u'\\N{LATIN CAPITAL LETTER S WITH COMMA BELOW}',\n u'\\\\textcommabelow S')\n self.register(u'\\N{LATIN SMALL LETTER S WITH COMMA BELOW}',\n u'\\\\textcommabelow s')\n self.register(u'\\N{LATIN CAPITAL LETTER T WITH COMMA BELOW}',\n u'\\\\textcommabelow T')\n self.register(u'\\N{LATIN SMALL LETTER T WITH COMMA BELOW}',\n u'\\\\textcommabelow t')\n self.register(u'\\N{PARTIAL DIFFERENTIAL}', u'\\\\partial', mode='math')\n self.register(u'\\N{N-ARY PRODUCT}', u'\\\\prod', mode='math')\n self.register(u'\\N{N-ARY SUMMATION}', u'\\\\sum', mode='math')\n self.register(u'\\N{SQUARE ROOT}', u'\\\\surd', mode='math')\n self.register(u'\\N{INFINITY}', u'\\\\infty', mode='math')\n self.register(u'\\N{INTEGRAL}', u'\\\\int', mode='math')\n self.register(u'\\N{INTERSECTION}', u'\\\\cap', mode='math')\n self.register(u'\\N{UNION}', u'\\\\cup', mode='math')\n self.register(u'\\N{RIGHTWARDS ARROW}', u'\\\\rightarrow', mode='math')\n self.register(\n u'\\N{RIGHTWARDS DOUBLE ARROW}',\n u'\\\\Rightarrow',\n mode='math')\n self.register(u'\\N{LEFTWARDS ARROW}', u'\\\\leftarrow', mode='math')\n self.register(\n u'\\N{LEFTWARDS DOUBLE ARROW}',\n u'\\\\Leftarrow',\n mode='math')\n self.register(u'\\N{LOGICAL OR}', u'\\\\vee', mode='math')\n self.register(u'\\N{LOGICAL AND}', u'\\\\wedge', mode='math')\n self.register(u'\\N{ALMOST EQUAL TO}', u'\\\\approx', mode='math')\n self.register(u'\\N{NOT EQUAL TO}', u'\\\\neq', mode='math')\n self.register(u'\\N{LESS-THAN OR EQUAL TO}', u'\\\\leq', mode='math')\n self.register(u'\\N{GREATER-THAN OR EQUAL TO}', u'\\\\geq', mode='math')\n self.register(u'\\N{MODIFIER LETTER CIRCUMFLEX ACCENT}', u'\\\\^{}')\n self.register(u'\\N{CARON}', u'\\\\v{}')\n self.register(u'\\N{BREVE}', u'\\\\u{}')\n self.register(u'\\N{DOT ABOVE}', u'\\\\.{}')\n self.register(u'\\N{RING ABOVE}', u'\\\\r{}')\n self.register(u'\\N{OGONEK}', u'\\\\k{}')\n self.register(u'\\N{DOUBLE ACUTE ACCENT}', u'\\\\H{}')\n self.register(u'\\N{LATIN SMALL LIGATURE FI}', u'fi', decode=False)\n self.register(u'\\N{LATIN SMALL LIGATURE FL}', u'fl', decode=False)\n self.register(u'\\N{LATIN SMALL LIGATURE FF}', u'ff', decode=False)\n\n self.register(u'\\N{GREEK SMALL LETTER ALPHA}', u'\\\\alpha', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER BETA}', u'\\\\beta', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER GAMMA}', u'\\\\gamma', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER DELTA}', u'\\\\delta', mode='math')\n self.register(\n u'\\N{GREEK SMALL LETTER EPSILON}',\n u'\\\\epsilon',\n mode='math')\n self.register(u'\\N{GREEK SMALL LETTER ZETA}', u'\\\\zeta', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER ETA}', u'\\\\eta', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER THETA}', u'\\\\theta', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER THETA}', u'\\\\texttheta',\n package='textgreek', encode=False)\n self.register(u'\\N{GREEK SMALL LETTER IOTA}', u'\\\\iota', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER KAPPA}', u'\\\\kappa', mode='math')\n self.register(\n u'\\N{GREEK SMALL LETTER LAMDA}',\n u'\\\\lambda',\n mode='math') # LAMDA not LAMBDA\n self.register(u'\\N{GREEK SMALL LETTER MU}', u'\\\\mu', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER NU}', u'\\\\nu', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER XI}', u'\\\\xi', mode='math')\n self.register(\n u'\\N{GREEK SMALL LETTER OMICRON}',\n u'\\\\omicron',\n mode='math')\n self.register(u'\\N{GREEK SMALL LETTER PI}', u'\\\\pi', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER RHO}', u'\\\\rho', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER SIGMA}', u'\\\\sigma', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER TAU}', u'\\\\tau', mode='math')\n self.register(\n u'\\N{GREEK SMALL LETTER UPSILON}',\n u'\\\\upsilon',\n mode='math')\n self.register(u'\\N{GREEK SMALL LETTER PHI}', u'\\\\phi', mode='math')\n self.register(u'\\N{GREEK PHI SYMBOL}', u'\\\\varphi', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER CHI}', u'\\\\chi', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER PSI}', u'\\\\psi', mode='math')\n self.register(u'\\N{GREEK SMALL LETTER OMEGA}', u'\\\\omega', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER ALPHA}',\n u'\\\\Alpha',\n mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER BETA}', u'\\\\Beta', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER GAMMA}',\n u'\\\\Gamma',\n mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER DELTA}',\n u'\\\\Delta',\n mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER EPSILON}',\n u'\\\\Epsilon',\n mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER ZETA}', u'\\\\Zeta', mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER ETA}', u'\\\\Eta', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER THETA}',\n u'\\\\Theta',\n mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER IOTA}', u'\\\\Iota', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER KAPPA}',\n u'\\\\Kappa',\n mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER LAMDA}',\n u'\\\\Lambda',\n mode='math') # LAMDA not LAMBDA\n self.register(u'\\N{GREEK CAPITAL LETTER MU}', u'\\\\Mu', mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER NU}', u'\\\\Nu', mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER XI}', u'\\\\Xi', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER OMICRON}',\n u'\\\\Omicron',\n mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER PI}', u'\\\\Pi', mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER RHO}', u'\\\\Rho', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER SIGMA}',\n u'\\\\Sigma',\n mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER TAU}', u'\\\\Tau', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER UPSILON}',\n u'\\\\Upsilon',\n mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER PHI}', u'\\\\Phi', mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER CHI}', u'\\\\Chi', mode='math')\n self.register(u'\\N{GREEK CAPITAL LETTER PSI}', u'\\\\Psi', mode='math')\n self.register(\n u'\\N{GREEK CAPITAL LETTER OMEGA}',\n u'\\\\Omega',\n mode='math')\n self.register(u'\\N{COPYRIGHT SIGN}', u'\\\\copyright')\n self.register(u'\\N{COPYRIGHT SIGN}', u'\\\\textcopyright')\n self.register(u'\\N{LATIN CAPITAL LETTER A WITH ACUTE}', u\"\\\\'A\")\n self.register(u'\\N{LATIN CAPITAL LETTER I WITH ACUTE}', u\"\\\\'I\")\n self.register(u'\\N{HORIZONTAL ELLIPSIS}', u'\\\\ldots')\n self.register(u'\\N{TRADE MARK SIGN}', u'^{TM}', mode='math')\n self.register(\n u'\\N{TRADE MARK SIGN}',\n u'\\\\texttrademark',\n package='textcomp')\n self.register(\n u'\\N{REGISTERED SIGN}',\n u'\\\\textregistered',\n package='textcomp')\n # \\=O and \\=o will be translated into Ō and ō before we can\n # match the full latex string... so decoding disabled for now\n self.register(u'Ǭ', text_type(r'\\textogonekcentered{\\=O}'),\n decode=False)\n self.register(u'ǭ', text_type(r'\\textogonekcentered{\\=o}'),\n decode=False)\n self.register(u'ℕ', text_type(r'\\mathbb{N}'), mode='math')\n self.register(u'ℕ', text_type(r'\\mathbb N'), mode='math', decode=False)\n self.register(u'ℤ', text_type(r'\\mathbb{Z}'), mode='math')\n self.register(u'ℤ', text_type(r'\\mathbb Z'), mode='math', decode=False)\n self.register(u'ℚ', text_type(r'\\mathbb{Q}'), mode='math')\n self.register(u'ℚ', text_type(r'\\mathbb Q'), mode='math', decode=False)\n self.register(u'ℝ', text_type(r'\\mathbb{R}'), mode='math')\n self.register(u'ℝ', text_type(r'\\mathbb R'), mode='math', decode=False)\n self.register(u'ℂ', text_type(r'\\mathbb{C}'), mode='math')\n self.register(u'ℂ', text_type(r'\\mathbb C'), mode='math', decode=False)", "def remove_special_chars(self, text_list):\n return [self._remove_special_chars(text) for text in text_list]", "def escape_chars(a_string):\n return a_string.translate(str.maketrans({\n \"\\n\": r\"\\\\n\",\n }))", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def split_escaped(string, separator):\n\n result = []\n current = ''\n escaped = False\n for char in string:\n if not escaped:\n if char == '\\\\':\n escaped = True\n continue\n elif char == separator:\n result.append(current)\n current = ''\n continue\n escaped = False\n current += char\n result.append(current)\n return result", "def html_escape(text):\r\n\treturn \"\".join(html_escape_table.get(c,c) for c in text)", "def sendkey_escape(string):\r\n return re.sub(r'([+^%~{}\\[\\]()])', r'{\\1}', string)", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if not self.__dict__.has_key('encode_re_braces'):\n self.encode_re_braces = re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}',text)\n if not self.__dict__.has_key('encode_re_bslash'):\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n # then all that needs math mode\n text = text.replace(\"<\", '{$<$}')\n text = text.replace(\">\", '{$>$}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n text = text.replace(\"_\", '{\\\\_}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\~{}}')\n if self.insert_newline:\n # HACK: insert a blank before the newline, to avoid \n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n text = text.replace(\"\\n\", '}\\\\\\\\\\n\\\\mbox{')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n # unicode !!! \n text = text.replace(u'\\u2020', '{$\\\\dagger$}')\n return text", "def regex_compiled():\n return re.compile(SBE19DataParticle.regex())", "def tok_by_reg(pattern, list_of_toks, concordancing = False, **kwargs):\n import re\n comped = compiler(pattern)\n if comped == 'Bad query':\n return 'Bad query'\n if not concordancing:\n matches = [m for m in list_of_toks if re.search(comped, m)]\n else:\n matches = []\n for index, token in enumerate(list_of_toks):\n if re.search(comped, token):\n if not split_contractions:\n match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]\n else:\n match = [' '.join(t for t in list_of_toks[:index])[-140:]]\n match.append(re.search(comped, token).group(0))\n if not split_contractions:\n match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])\n else:\n match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])\n matches.append(match)\n if countmode:\n return len(matches)\n else:\n return matches", "def _is_control(char):\n if char == '\\t' or char == '\\n' or char == '\\r':\n return False\n cat = unicodedata.category(char)\n if cat.startswith('C'):\n return True\n return False", "def add_command_regex(self, pattern):\n # Make regex match consistent with wildcards, i.e. full string match\n if not pattern.endswith('$'):\n pattern += '$'\n self._command_regexs.append(re.compile(pattern))", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def needs_escape(self, string, target_char, quote_count=1):\n\n skip = False\n count = 0\n needs_escape = False\n for c in string:\n if skip:\n skip = False\n continue\n if c == '\\\\':\n skip = True\n elif c == target_char:\n count += 1\n if count == quote_count:\n needs_escape = True\n break\n else:\n count = 0\n return needs_escape", "def _internal_scan(self, text: str)->list:\n start_index = -1\n # end_index = -1\n\n current_regex = None\n tokens = []\n # print(\"entered\", text)\n i = -1\n while i - 1 < len(text):\n # print(i, char, \"'{}'\".format(text[start_index:i + 1]), current_regex, tokens)\n i += 1\n if i >= len(text):\n break\n char = text[i]\n if start_index == -1:\n continue_flag = False\n for regex in self._regexes:\n result = regex.check(text[i: i + regex.min_lookahead])\n if result:\n start_index = i\n i += regex.min_lookahead - 1\n # print('found', \"'\"+text[start_index:i + 1]+\"'\")\n current_regex = regex\n continue_flag = True\n break\n if not continue_flag:\n tokens.append(UndefinedToken(char))\n else:\n continue_flag = False\n # print('check', \"'\" + text[start_index:i+1] + \"'\", tokens)\n if current_regex.check(text[start_index:i + 1]):\n continue_flag = True\n else:\n for regex in self._regexes:\n if regex.check(text[start_index:i + 1]) and regex != current_regex:\n continue_flag = True\n current_regex = regex\n\n if continue_flag:\n break\n if not continue_flag:\n tokens.append(Token(current_regex, text[start_index:i]))\n # start_index = -1\n # current_regex = None\n # print(tokens, \"'{}'\".format(text[start_index:i]))\n tokens2 = self._internal_scan(text[i:])\n if tokens2:\n tokens += self._backtrack(tokens2)\n return tokens\n if start_index != -1 and current_regex:\n tokens.append(Token(current_regex, text[start_index:]))\n # tokens = self._backtrack(tokens)\n\n return tokens", "def _regex_from_encoded_pattern(s):\r\n if s.startswith('/') and s.rfind('/') != 0:\r\n # Parse it: /PATTERN/FLAGS\r\n idx = s.rfind('/')\r\n pattern, flags_str = s[1:idx], s[idx+1:]\r\n flag_from_char = {\r\n \"i\": re.IGNORECASE,\r\n \"l\": re.LOCALE,\r\n \"s\": re.DOTALL,\r\n \"m\": re.MULTILINE,\r\n \"u\": re.UNICODE,\r\n }\r\n flags = 0\r\n for char in flags_str:\r\n try:\r\n flags |= flag_from_char[char]\r\n except KeyError:\r\n raise ValueError(\"unsupported regex flag: '%s' in '%s' \"\r\n \"(must be one of '%s')\"\r\n % (char, s, ''.join(list(flag_from_char.keys()))))\r\n return re.compile(s[1:idx], flags)\r\n else: # not an encoded regex\r\n return re.compile(re.escape(s))", "def control_bus_tokens(self):\n if self.lliagraph:\n return self.lliagraph.control_bus_tokens.items()\n else:\n return []", "def strip_ansi(text):\n return ANSI_ESCAPE_RE.sub('', text)", "def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex", "def strip_raw_codes(self, string):\n return self.ansi_regex.sub(\"\", string)", "def CEscape(text, as_utf8):\n # type: (...) -> str\n # Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not\n # satisfy our needs; they encodes unprintable characters using two-digit hex\n # escapes whereas our C++ unescaping function allows hex escapes to be any\n # length. So, \"\\0011\".encode('string_escape') ends up being \"\\\\x011\", which\n # will be decoded in C++ as a single-character string with char code 0x11.\n text_is_unicode = isinstance(text, str)\n if as_utf8 and text_is_unicode:\n # We're already unicode, no processing beyond control char escapes.\n return text.translate(_cescape_chr_to_symbol_map)\n ord_ = ord if text_is_unicode else lambda x: x # bytes iterate as ints.\n if as_utf8:\n return ''.join(_cescape_unicode_to_str[ord_(c)] for c in text)\n return ''.join(_cescape_byte_to_str[ord_(c)] for c in text)", "def list_escape(s):\n return re.sub(r'[\\\\,]', _escape_char, s)", "def compile_regexes(bugs):\n for bug in bugs:\n try:\n bug['regex'] = re.compile(bug['pattern'])\n except Exception as exc:\n print \"regex error: \", bug['pattern'], bug['id']\n bug['regex'] = None\n return bugs", "def test_term_chars_default(self, instrument):\n assert instrument.term_chars == b'\\r'", "def py_scanstring(s, end, encoding=None, strict=True,\n _b=BACKSLASH, _m=STRINGCHUNK.match):\n if encoding is None:\n encoding = DEFAULT_ENCODING\n chunks = []\n _append = chunks.append\n begin = end - 1\n while 1:\n chunk = _m(s, end)\n if chunk is None:\n raise ValueError(\n errmsg(\"Unterminated string starting at\", s, begin))\n end = chunk.end()\n content, terminator = chunk.groups()\n # Content is contains zero or more unescaped string characters\n if content:\n if not isinstance(content, unicode):\n content = unicode(content, encoding)\n _append(content)\n # Terminator is the end of string, a literal control character,\n # or a backslash denoting that an escape sequence follows\n if terminator == '\"':\n break\n elif terminator != '\\\\':\n if strict:\n msg = \"Invalid control character %r at\" % (terminator,)\n #msg = \"Invalid control character {0!r} at\".format(terminator)\n raise ValueError(errmsg(msg, s, end))\n else:\n _append(terminator)\n continue\n try:\n esc = s[end]\n except IndexError:\n raise ValueError(\n errmsg(\"Unterminated string starting at\", s, begin))\n # If not a unicode escape sequence, must be in the lookup table\n if esc != 'u':\n try:\n char = _b[esc]\n except KeyError:\n msg = \"Invalid \\\\escape: \" + repr(esc)\n raise ValueError(errmsg(msg, s, end))\n end += 1\n else:\n # Unicode escape sequence\n uni = _decode_uXXXX(s, end)\n end += 5\n # Check for surrogate pair on UCS-4 systems\n if sys.maxunicode > 65535 and \\\n 0xd800 <= uni <= 0xdbff and s[end:end + 2] == '\\\\u':\n uni2 = _decode_uXXXX(s, end + 1)\n if 0xdc00 <= uni2 <= 0xdfff:\n uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))\n end += 6\n char = unichr(uni)\n # Append the unescaped character\n _append(char)\n return u''.join(chunks), end", "def gen_chars(self, lines_str_list):\n char_index_counter = 0\n chars = VGroup()\n for line_no in range(lines_str_list.__len__()):\n chars.add(VGroup())\n chars[line_no].add(\n *self.lines_text.chars[\n char_index_counter : char_index_counter\n + lines_str_list[line_no].__len__()\n + 1\n ]\n )\n char_index_counter += lines_str_list[line_no].__len__() + 1\n return chars", "def compile_commands(commands):\n return [ord(char) for char in ''.join([c + '\\n' for c in commands])]", "def forbidden_latex_chars():\n\n tex_char = ['\\\\', '{', '}', '&', '[', ']', '^', '~']\n chars = ', '.join(['\"{char}\"'.format(char=char) for char in tex_char])\n message = _(u\"Următoarele caractere sunt interzise și trebuie scoase : {chars}.\".format(chars=chars))\n return tex_char, message", "def handle_special_symbols(text: str\n ) -> str:\n valid_special_symbols = {' ', '_'}\n\n def criteria(c: str\n ) -> str:\n return c if c.isalnum() or c in valid_special_symbols else ' '\n\n return ''.join(criteria(c) for c in list(text))", "def _compileRegex(self):\n\t\tself.__punctuationRegex = re.compile(\"|\".join(self._punctuation))\n\t\tself.__apostropheRegex = re.compile('(?<=[a-zA-Z])('+\n\t\t\t\t\t\t\t\t\t\"|\".join(self._apostrophe)+\n\t\t\t\t\t\t\t\t\t')(?=[a-zA-Z])')\n\t\tself.__articlesRegex = re.compile('(?i)^('+\"|\".join(self._articles)+')\\s')", "def sh_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n escaped = []\n for char in str(val):\n if char in \"$#\\\"\":\n char = \"\\\\\" + char\n elif ord(char) < 32 or ord(char) > 126:\n char = \"\\\\%03o\" % ord(char)\n escaped.append(char)\n return ''.join(escaped)", "def regex_compiled():\n return re.compile(SBE19HardwareParticle.regex(), re.DOTALL)", "def _surround_ansi_escapes(prompt, start=\"\\x01\", end=\"\\x02\"):\n # Windows terminals don't use ANSI escape codes and Windows readline isn't based on GNU Readline\n if sys.platform == \"win32\":\n return prompt\n\n escaped = False\n result = \"\"\n\n for c in prompt:\n if c == \"\\x1b\" and not escaped:\n result += start + c\n escaped = True\n elif c.isalpha() and escaped:\n result += c + end\n escaped = False\n else:\n result += c\n\n return result", "def escape_string(text):\n return escape(text)", "def _compile_regexes(tokdict):\r\n for key, value in tokdict.items():\r\n tokdict[key] = re.compile('^(?:%s)$' % value, re.I).match\r\n return tokdict", "def _escape_backticks(text: str, escape_with='\\u200b'):\r\n return text.replace('`', '`'+escape_with)", "def convertPatterns(path, sign):\r\n filters = []\r\n f = open(path, \"r\")\r\n while 1:\r\n pattern = f.readline()\r\n if not pattern:\r\n break\r\n if pattern[-1] == \"\\n\":\r\n pattern = pattern[:-1]\r\n\r\n if re.match(\"[\\t ]*$\", pattern):\r\n continue\r\n if pattern[0] == \"#\":\r\n continue\r\n filters = filters + [convertPattern(pattern, sign)]\r\n f.close()\r\n return filters" ]
[ "0.49353287", "0.49150157", "0.49105355", "0.46912605", "0.4635989", "0.4623271", "0.4604767", "0.4595183", "0.45459825", "0.4540293", "0.45323464", "0.450759", "0.44997773", "0.44368735", "0.44090384", "0.44047463", "0.43948525", "0.43837532", "0.43768844", "0.43508714", "0.43080154", "0.43022498", "0.4266281", "0.42568347", "0.42557907", "0.4253599", "0.42483586", "0.42445487", "0.42350715", "0.4199649", "0.4196392", "0.4167491", "0.41602823", "0.41568127", "0.40995", "0.40974364", "0.40923044", "0.40795586", "0.40737838", "0.40681395", "0.40681395", "0.40676552", "0.40668353", "0.40667117", "0.40658107", "0.4058287", "0.40319234", "0.40286258", "0.4026335", "0.40139738", "0.40064788", "0.40053755", "0.40043938", "0.3975925", "0.39640284", "0.39640284", "0.39588362", "0.3958248", "0.39414597", "0.39372364", "0.3936229", "0.39296553", "0.39225236", "0.3920039", "0.39133996", "0.39108875", "0.3908277", "0.38996428", "0.38975832", "0.38967186", "0.3884715", "0.3884273", "0.38806018", "0.38766742", "0.38747466", "0.3845616", "0.38452664", "0.3842026", "0.38251585", "0.3804141", "0.38023755", "0.38010535", "0.37944812", "0.37933987", "0.378523", "0.3781716", "0.37769732", "0.3774877", "0.37733683", "0.3770777", "0.37627777", "0.37563387", "0.37463012", "0.37441155", "0.37400606", "0.37354958", "0.37310767", "0.37303326", "0.3729759", "0.37099043" ]
0.6139948
0
Signal handler that gets installed
def signal_handler(self,sig,data): self.resize_child_window()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_signal_handler():\n signal.signal(signal.SIGUSR1, sig_handler)\n signal.signal(signal.SIGTERM, term_handler)\n #logger.warning(\"Signal handler installed.\")", "def signal(self, args):\n pass", "def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()", "def _signal_handler(*args):\n self._user_exit = True", "def init_signal_handler():\n os.environ['SIGNAL_RECEIVED'] = 'False'\n os.environ['MAIN_PID'] = str(os.getpid())\n\n signal.signal(signal.SIGUSR1, signalHandler)\n signal.signal(signal.SIGTERM, SIGTERMHandler)\n print(\"Signal handler installed.\", flush=True)", "def signal(self):\n pass", "def register_signal_handler(self):\n signal.signal(signal.SIGINT, self.quit_gracefully)\n signal.signal(signal.SIGTERM, self.quit_gracefully)\n return", "def UpdateSignals():\n try:\n import signal\n except ImportError:\n Log('UpdateSignals: Warning: signal module unavailable -- '\n 'not installing signal handlers.')\n return\n # Twisted installs a SIGTERM signal handler which tries to shut the system\n # down. Use our own handler instead.\n Log('UpdateSignals: installed new SIGTERM handler')\n signal.signal(signal.SIGTERM, SigTerm)", "def _set_signal_handler(self) -> None:\r\n loop = asyncio.get_running_loop()\r\n # get interupt signals supported by user's OS.\r\n signals = [getattr(signal, s) for s in (\r\n 'SIGBREAK', 'SIGINT', 'SIGTERM', 'SIGHUP') if hasattr(signal, s)]\r\n for s in signals:\r\n try:\r\n loop.add_signal_handler(\r\n s, lambda s=s: asyncio.create_task(self.shutdown(s)))\r\n except NotImplementedError:\r\n pass", "def signal(sig, action): # real signature unknown; restored from __doc__\n pass", "def setup_signal_handlers(self):\n signal.signal(signal.SIGUSR1, self.handle_logging_signal)\n signal.signal(signal.SIGUSR2, self.handle_logging_signal)", "def setup_signal_handlers():\n # type: () -> None\n for signum in [signal.SIGINT, signal.SIGTERM]:\n signal.signal(signum, log_and_exit_handler)\n\n signal.signal(signal.SIGUSR1, dump_thread_handler)", "def signal_handler(self, signum):\n raise Exception(\"Caught signal {0}\".format(signum))", "def signalReceived (self, signal): \n raise NotImplementedError(\"Lack of signalReceived method\")", "def handler(signum, frame):\n m.signal()", "def signal(self):\n self.mainloop().signal()", "def hook_signals(self):\n signal.signal(signal.SIGTERM, self.quit)\n signal.signal(signal.SIGQUIT, self.quit)\n signal.signal(signal.SIGHUP, self.reload)", "def sigint_handler(signal, frame):\n rclpy.shutdown()\n if prev_sigint_handler is not None:\n prev_sigint_handler(signal)", "def catchall_signal_handler(*args, **kwargs): \n print(\"Caught signal (in catchall handler) \" + kwargs['dbus_interface'] + \".\" + kwargs['member'])\n for arg in args:\n print(\" \" + str(arg))", "def signal_oi(self):\n pass", "def _signal_handler(signum, frame):\n res_mgr()\n sys.exit(0)", "def _set_signal_handlers():\n\n def _handler(_signal, _frame):\n raise KeyboardInterrupt\n\n signal.signal(signal.SIGINT, _handler)\n signal.signal(signal.SIGTERM, _handler)", "def signal_handler(*args):\n if station:\n station.shutdown()", "def setup(self):\n\t\tif self.hasSignalModule and not self.signalsRegistered:\n\t\t\t# Jython does not support all signals, so we only use\n\t\t\t# the available ones\n\t\t\tsignals = ['SIGINT', 'SIGHUP', 'SIGABRT', 'SIGQUIT', 'SIGTERM']\n\t\t\timport signal\n\t\t\tfor sig in signals:\n\t\t\t\ttry:\n\t\t\t\t\tsignal.signal(getattr(signal, sig), self._shutdown)\n\t\t\t\t\tself.signalsRegistered.append(sig)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tLogger.Err(\"[!] monitoring.Signals._registerSignals:%s %s\\n\" % (sig, e))", "def signal_handler(signal, frame):\n print()\n endProgram(0)", "def set_signal(self):\n eprint(\"Signal caught, ending log...\")\n self.log_sig = True", "def handler(signum, frame):\n logging.warning(\"Got a {} signal. Doing nothing\".format(signum))", "def setReCreateTreeSignalHandler():\n signal.signal(signal.SIGUSR2,reCreateTreeSignalHandler)", "def send_signal(self, sig):\r\n sig = { 0x01 : \"HUP\",\r\n 0x02 : \"INT\",\r\n 0x03 : \"NEWNYM\",\r\n 0x0A : \"USR1\",\r\n 0x0C : \"USR2\",\r\n 0x0F : \"TERM\" }.get(sig,sig)\r\n self.sendAndRecv(\"SIGNAL %s\\r\\n\"%sig)", "def add_signal_handler(self, signal: str, handler: SignalType) -> None:\n self.signals[signal].append(handler)", "def _on_event(self, event) -> None:\n self.signal.emit(event)", "def signal_handler(sig, frame):\n raise ExitException()", "def signal_handler(self, signum, frame):\n if signum == signal.SIGINT:\n self.terminate = True\n elif signum == signal.SIGALRM:\n self.button_handler(self.BUTTON_PIN)", "def setup_signals(self):\n if os.name == 'nt':\n return\n\n def shutdown_handler(signo):\n log.info('Shutting down on signal %d' % signo)\n self.shutdown_event.set()\n\n loop = asyncio.get_event_loop()\n for sig in [signal.SIGTERM, signal.SIGINT]:\n loop.add_signal_handler(sig, shutdown_handler, sig)", "def sigHandler(sig, frame):\n # pylint: disable=unused-argument\n shutdown()\n sys.exit(0)", "def handler(signum, frame):\n print(\"Signal handler called with signal %i\" % signum)\n sys.exit(-1)", "def test_getsignal(self):\n from signal import getsignal, signal, SIGINT, SIG_DFL, SIG_IGN\n\n def handler(*a):\n pass\n\n try:\n assert getsignal(SIGINT) == SIG_DFL\n signal(SIGINT, SIG_DFL)\n assert getsignal(SIGINT) == SIG_DFL\n signal(SIGINT, SIG_IGN)\n assert getsignal(SIGINT) == SIG_IGN\n signal(SIGINT, handler)\n assert getsignal(SIGINT) is handler\n finally:\n signal(SIGINT, SIG_DFL)", "def sighandler(self) -> Optional[daemoniker.SignalHandler1]:\n if not self.pid_path.exists():\n return None\n\n def _quit(*args, **kw):\n from meerschaum.__main__ import _exit\n _exit()\n daemoniker = attempt_import('daemoniker')\n if '_sighandler' not in self.__dict__:\n self._sighandler = daemoniker.SignalHandler1(\n str(self.pid_path),\n sigint = _quit,\n sigterm = _quit,\n sigabrt = _quit,\n )\n return self._sighandler", "def signal_handler(self, signum, frame):\n self._running = False", "def sigtrace_handler(sig,ign):\n global SIGNALS\n print(\"received SIG%s: %s\"%(SIGNALS[sig],process_infos(\"???\")),file=sys.stderr)\n if sig == 2:\n # Python has a special handler for SIGINT that generates\n # a KeyboardInterrupt exception\n signal.signal(sig,signal.default_int_handler)\n elif sig == signal.SIGCONT:\n # When the process restarts after being stopped we re-install\n # tracing handler on Ctrl-Z and TTIN/TTOUT signals so it is\n # possible to play with job control\n signal.signal(signal.SIGTSTP,sigtrace_handler)\n signal.signal(signal.SIGTTOU,sigtrace_handler)\n signal.signal(signal.SIGTTIN,sigtrace_handler)\n else:\n # Once a signal has been received we reinstall the default\n # handler before self-resending the signal\n signal.signal(sig,signal.SIG_DFL)\n # All signal received but SIGCONT are self-resent after being received\n if sig != signal.SIGCONT:\n os.kill(os.getpid(),sig)", "def handle_signal(sig, frame):\n IOLoop.instance().add_callback(IOLoop.instance().stop)", "def __signalHandler(self, signalNumber, frame):\n self._loop = False", "def run(self):\n logger.debug(\"Installing SIGIO signal handler ..\")\n signal.signal(signal.SIGIO, self.signal_handler)\n timer = Timer()\n for seconds in itertools.count():\n logger.debug(\"Waiting for SIGIO signal (%s) ..\", timer)\n time.sleep(seconds)", "def signal_handler(sig_num, frame):\n global exit_flag\n logger.warn('Signal Recieved: {}'.format(str(sig_num)))\n if sig_num:\n exit_flag = True", "def sigint_handler(*args):\n Qt.QApplication.quit()", "def signal_handler(signum, frame):\n main.CLOSE = True", "def post(self, event, *args, **kwargs):\n self.inq.Signal((event, args, kwargs))", "def signal_handler(signum, frame):\n sys.exit(0)", "def signal_handler(self, signal_number, frame):\n sys.exit(0)", "def send_signal(self, sig):\n os.kill(self.pid, sig)", "def trigger_signal(self, signal: str) -> None:\n logger.debug(\"Triggered Signal %s\", signal)\n for handler in self.signals[signal]:\n if not iscoroutinefunction(handler):\n handler(self, signal)", "def sighandler(signum, frame):\n global _terminate\n global _interruptcnt\n print >> FileKeyUtils.WMSlog, 'sighandler> ', signum\n ++_interruptcnt \n if signum in(signal.SIGABRT, signal.SIGINT, signal.SIGTERM):\n print >> FileKeyUtils.WMSlog, 'sighandler> terminate pid: ', os.getpid(), signum\n _terminate = True\n elif signum in(signal.SIGHUP, signal.SIGTSTP):\n print >> FileKeyUtils.WMSlog, 'sighandler> suspend/stop/pause pid: ', os.getpid(), signum\n signal.pause()\n else:\n print >> FileKeyUtils.WMSlog, 'sighandler> resume/continue pid: ', os.getpid(), signum\n _terminate = False", "def ready(self):\n import main.signals # noqa", "def requeueHandler(self, signum, frame):\n args = self.args\n print('Signal received', signum, time.time(), flush=True)\n self.SIGNAL_RECEIVED = True\n\n if os.path.isfile(self.HALT_filename):\n print('Job is done, exiting', flush=True)\n exit(0)", "def set_signal_handlers(cls, signals):\n for sig in signals:\n try:\n original_handler = signal.getsignal(sig)\n if original_handler == cls.signal_handler:\n continue\n signal.signal(sig, cls.signal_handler)\n cls.__signal_handlers[sig] = original_handler\n except Exception as e:\n pass", "def signal_handler(sig, frame):\n sys.exit(0)", "def test_allows_signal_handler_override(self):\n self._signals_notified = False\n b1 = Block()\n self.configure_block(b1, {})\n\n self.assertFalse(self._signals_notified)\n b1.notify_signals([Signal()])\n self.assertTrue(self._signals_notified)", "def signal_handler(aggregator, liveness_probe, signal):\n\n if signal == signal.SIGTERM:\n liveness_probe.close()\n\n # Wait aggregator handle with all the received events before close it\n while (\n any([not x.channel.empty() for x in aggregator.subscribers])\n or RequestTimeLogger.current_requests_count > 0\n ):\n print(\"There are events to handle yet.\")\n\n aggregator.stop()\n exit(0)", "def _sigint(self, signal, frame):\n self.disconnect = True\n if self.cardinal:\n self.cardinal.quit('Received SIGINT.')", "def signal_handler(signal_number, stack_frame):\n if signal_number in [signal.SIGTERM, signal.SIGINT]:\n terminate_surveillance()", "def process_signal(self, src, tag, value):\n pass", "def send_signal(self, signal):\n self.kill()", "def sigint_handler(*args):\n sys.stderr.write('\\r')\n QtGui.QApplication.quit()", "def signal_handler(cls, signum, frame):\n global _iom_instance\n\n # Find name of signal\n signame = str(signum)\n for key in signal.__dict__.keys():\n if key.startswith(\"SIG\") and getattr(signal, key) == signum:\n signame = key\n break\n\n try:\n logger = _iom_instance._IOManager__logger\n logger.warning(\"Caught signal %s. Terminating IOManager\" % signame)\n except:\n print \"Caught signal %s. Terminating IOManager\" % signame\n\n original_handler = None\n if signal in cls.__signal_handlers:\n original_handler = cls.__signal_handlers[signal]\n\n clear_IOM()\n\n if original_handler:\n original_handler(signal, frame)\n else:\n sys.exit(1)", "def register_handler(self, handler):\r\n self.handler = handler", "def sig_handler(sig, frame):\n\n # Close down any MC sockets that were opened for listening.\"\n if mc_sock is not None:\n mc_sock.close()\n print(\"Closed the MC Listening Socket\")\n\n print(\"User signals: 'The End'\")\n exit(0)", "def _sigterm_handler(signum: int, _frame: FrameType) -> None:\n sys.exit('from sigterm handler')", "def caught_signal(\n self, signals: int, max_signals: int, executor: \"TaskGraphExecutor\"\n ) -> None:", "def update_signals(self, event):\r\n\r\n if event.type == 'SIGNAL':\r\n order_event = self.generate_naive_order(event)\r\n self.events.put(order_event)", "def cli():\n signal.signal(signal.SIGINT, signal_handler)\n pass", "def register(self):\n REGISTERED_SIGNALS.setdefault(self.path, []).append(self)", "def _install_signal_handlers(workers_socket, manager_socket):\n\n def sighup_handler(signal, frame):\n logger.info(\"hangup signal (SIGHUP) received; reloading configuration\")\n workers_socket.close()\n manager_socket.close()\n main()\n\n signal.signal(signal.SIGHUP, sighup_handler)\n\n def cleanup():\n workers_socket.close()\n manager_socket.close()\n context.destroy()\n\n def sigint_handler(signal, frame):\n logger.info(\"interrupt signal (SIGINT or Ctrl-C) received; shutting down\")\n cleanup()\n raise SystemExit\n\n signal.signal(signal.SIGINT, sigint_handler)\n\n def sigterm_handler(signal, frame):\n logger.info(\"termination signal (SIGTERM) received; shutting down\")\n cleanup()\n raise SystemExit\n\n signal.signal(signal.SIGTERM, sigterm_handler)", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect\n pass", "def signal_handler(self, signal, frame):\r\n print 'You pressed Ctrl+C!'\r\n sys.exit(0)", "def install_signal_handlers(self):\n log = logging.getLogger('mailman.runner')\n # Set up our signal handlers. Also set up a SIGALRM handler to\n # refresh the lock once per day. The lock lifetime is 1 day + 6 hours\n # so this should be plenty.\n def sigalrm_handler(signum, frame): # noqa: E306\n self._lock.refresh()\n signal.alarm(SECONDS_IN_A_DAY)\n signal.signal(signal.SIGALRM, sigalrm_handler)\n signal.alarm(SECONDS_IN_A_DAY)\n # SIGHUP tells the runners to close and reopen their log files.\n def sighup_handler(signum, frame): # noqa: E306\n reopen()\n for pid in self._kids:\n os.kill(pid, signal.SIGHUP)\n log.info('Master watcher caught SIGHUP. Re-opening log files.')\n signal.signal(signal.SIGHUP, sighup_handler)\n # SIGUSR1 is used by 'mailman restart'.\n def sigusr1_handler(signum, frame): # noqa: E306\n for pid in self._kids:\n os.kill(pid, signal.SIGUSR1)\n log.info('Master watcher caught SIGUSR1. Exiting.')\n signal.signal(signal.SIGUSR1, sigusr1_handler)\n # SIGTERM is what init will kill this process with when changing run\n # levels. It's also the signal 'mailman stop' uses.\n def sigterm_handler(signum, frame): # noqa: E306\n for pid in self._kids:\n os.kill(pid, signal.SIGTERM)\n log.info('Master watcher caught SIGTERM. Exiting.')\n signal.signal(signal.SIGTERM, sigterm_handler)\n # SIGINT is what control-C gives.\n def sigint_handler(signum, frame): # noqa: E306\n for pid in self._kids:\n os.kill(pid, signal.SIGINT)\n log.info('Master watcher caught SIGINT. Restarting.')\n signal.signal(signal.SIGINT, sigint_handler)", "def signalSetup(self):\n self.ui.b_info.clicked.connect(self.showInfo)\n self.ui.b_save.clicked.connect(self.openSave)\n self.ui.b_vid.clicked.connect(self.openVideo)\n self.ui.b_run.clicked.connect(self.startRun)\n self.ui.b_colour.clicked.connect(self.pickColour)\n self.ui.b_ground_truth.clicked.connect(self.openGroundTruth)\n\n self.ui.t_fps.textChanged.connect(self.changeFps)\n self.ui.t_low.editingFinished.connect(self.changeLow)\n self.ui.t_high.editingFinished.connect(self.changeHigh)\n self.ui.c_error_plot.stateChanged.connect(self.checkFiles)\n self.ui.c_speed_plot.stateChanged.connect(self.checkFiles)\n self.ui.c_crash_plot.stateChanged.connect(self.checkFiles)\n self.ui.combo_superpixel.currentIndexChanged.connect(\n self.changeSuperPixelMethod\n )\n self.ui.c_optimize.stateChanged.connect(self.checkFiles)\n self.ui.c_draw.stateChanged.connect(self.checkFiles)\n self.ui.c_velocity.stateChanged.connect(self.checkFiles)\n self.ui.c_object_detection.stateChanged.connect(self.checkFiles)", "async def session_handler(self, signal: str) -> None:\n if signal == SIGNAL_DATA:\n self.event_handler(self.websocket.data) # type: ignore\n\n elif signal == SIGNAL_CONNECTION_STATE:\n if self.async_connection_status_callback:\n self.async_connection_status_callback(self.websocket.state == \"running\") # type: ignore", "def register_exit_signals(self):\n signal.signal(signal.SIGINT, self._exit_gracefully)\n signal.signal(signal.SIGTERM, self._exit_gracefully)\n # So that we ignore the debug dump signal, making it easier to send\n signal.signal(signal.SIGUSR2, signal.SIG_IGN)", "def signal_handler(signal, frame):\n sys.exit(0)", "def signal_handler(signal, frame):\n sys.exit(0)", "def signal_handler(signal, frame):\n sys.exit(0)", "def on_notify(self, name):\r\n pass", "def signals():\n sigdict = {'SIGABRT':'should cause (graceful) daemon exit', 'SIGCONT':'should cause daemon resume/continue after SIGHUP',\n 'SIGHUP':'should suspend/pause daemon', 'SIGINT':'should cause (graceful) daemon exit',\n 'SIGTERM':'should cause (graceful) daemon exit'}\n signal.signal(signal.SIGABRT, sighandler) \n signal.signal(signal.SIGCONT, sighandler)\n signal.signal(signal.SIGHUP, sighandler)\n signal.signal(signal.SIGINT, sighandler)\n signal.signal(signal.SIGTERM, sighandler)\n# signal.signal(signal.SIGTSTP, sighandler) ignore this for now to allow shell jobs\n# print >> FileKeyUtils.WMSlog, sigdict\n return sigdict", "def signal(self, signal_id):\n # if signal id is registered runs all its callbacks\n if signal_id in self.registered_callbacks:\n for callback in self.registered_callbacks[signal_id]:\n callback()", "def signal_handler(signal_received, frame):\n\n sys.stdout.write('\\n')\n sys.exit(0)" ]
[ "0.81946003", "0.7705526", "0.76633954", "0.75827134", "0.74916065", "0.742279", "0.71166563", "0.71097434", "0.70967144", "0.70619386", "0.70392495", "0.6911852", "0.69012845", "0.6881658", "0.6872002", "0.6856131", "0.6803024", "0.6799461", "0.6750118", "0.6702888", "0.6686504", "0.66847664", "0.66683626", "0.6637499", "0.6559416", "0.6524026", "0.6514206", "0.64792943", "0.64627826", "0.64210385", "0.6410354", "0.6403864", "0.6398275", "0.6395402", "0.6391171", "0.6376159", "0.6375366", "0.63544875", "0.63199466", "0.6289813", "0.6288516", "0.6279386", "0.62706167", "0.6267061", "0.6237986", "0.6221631", "0.6219087", "0.620591", "0.6173345", "0.6161078", "0.6156394", "0.6154711", "0.6151318", "0.6143084", "0.6117416", "0.6116114", "0.6109439", "0.61089", "0.60959476", "0.60884094", "0.60864556", "0.60734826", "0.6062675", "0.6062532", "0.60569316", "0.60530996", "0.60517955", "0.6047968", "0.60409635", "0.6037372", "0.60071886", "0.600092", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.5991486", "0.59867245", "0.5967072", "0.5959257", "0.5955916", "0.59397227", "0.5932124", "0.5932124", "0.5932124", "0.59228766", "0.5922446", "0.5911824", "0.5897668" ]
0.0
-1
Tells the child process to resize its window
def resize_child_window(self): s = struct.pack('HHHH', 0, 0, 0, 0) x = fcntl.ioctl(0,termios.TIOCGWINSZ,s) fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)", "def signal_handler(self,sig,data):\n self.resize_child_window()", "def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def resize_to(self, width, height):\n\n self.driver.resize_window_to(self.handle, width, height)", "def resize(self, *args):\n if self.parent is None: # when deleted\n return\n if self.parent.render_window is None: # BasePlotter\n return\n\n if self._prior_window_size != self.parent.window_size:\n self._prior_window_size = self.parent.window_size\n\n actor = self._actors['background']\n image_data = actor.GetInput()\n origin = image_data.GetOrigin()\n extent = image_data.GetExtent()\n spacing = image_data.GetSpacing()\n xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]\n yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]\n yd = (extent[3] - extent[2] + 1) * spacing[1]\n dist = self.camera.distance\n\n # make the longest dimensions match the plotting window\n img_dim = np.array(image_data.dimensions[:2])\n self.camera.focus = np.array([xc, yc, 0.0])\n self.camera.position = np.array([xc, yc, dist])\n\n ratio = img_dim / np.array(self.parent.window_size)\n scale_value = 1\n if ratio.max() > 1:\n # images are not scaled if larger than the window\n scale_value = ratio.max()\n\n if self._scale is not None:\n scale_value /= self._scale\n\n self.camera.parallel_scale = 0.5 * yd / self._scale", "def setWindowSize(self, width, height, windowHandle='current'):\n cmdId = self.executeCommand(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), \n \"windowHandle\": windowHandle})\n return cmdId", "def setWindowSize(width,height):\n dislin.winsiz(width,height)", "def resize(self, yx=None):\n if yx == None:\n yx = self.screen.getmaxyx()\n self.screen.clear()\n curses.resizeterm(yx[0], yx[1])\n self.setup_windows(resize = True)\n self.screen.refresh()", "def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def setwinsize(self, rows, cols):", "def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))", "def resizeEvent(self, *args, **kwargs):\n self.windowMoved.emit()", "def resize(self, width, height):\n\n\t\tself._window.resize(width, height)", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def ev_windowresized(self, event: WindowResized) -> None:", "def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()", "def __window_resizeBy(self, xDelta, yDelta):\n pass", "def setWindowGeometry(x,y,width,height):\n dislin.window(x,y,width,height)", "def resize(self):\r\n del self.win\r\n self.__create_win()", "def resize(self, win, width:int, height:int):\r\n\r\n\t\tglViewport(0, 0, width, height)", "def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:", "def on_resize(self, width, height):\n self.gamestatemanager.peek().on_resize(width, height)", "def set_resolution(self, width, height):\n self.driver.set_window_size(width, height, self.driver.window_handles[0])", "def size_with_window(self, size_with_window):\n\n self.container['size_with_window'] = size_with_window", "def resizeEvent(self, event):\n super().resizeEvent(event)\n self.resized.emit()", "def set_igv_window_size(self, width=800, height=600):\n self.set_igv_window_width(width)\n self.set_igv_window_height(height)", "def _resize_image(self, event):\n self.window_width = event.width\n self.window_height = event.height", "def handleResize(self):\n pass", "def on_resize(self, *args):\n\n self.page_current.width = terminal.width # Give page new terminal width\n self.render_buffer = []\n\n self.render() # Re-render buffer", "def resizeEvent(self, event):\n self.updateViewer()", "def SetWindowSize(self, size):\n self.WINDOW_SIZE = size", "def maximize(self):\n lib.SDL_MaximizeWindow(self._ptr)", "def setKnownConsoleSize(self, width, height):\n # Local import to avoid win32 issues.\n import tty\n class FakeFcntl(object):\n def ioctl(self, fd, opt, mutate):\n if opt != tty.TIOCGWINSZ:\n self.fail(\"Only window-size queries supported.\")\n return struct.pack(\"4H\", height, width, 0, 0)\n self.patch(cftp, \"fcntl\", FakeFcntl())", "def ev_windowresized(self, event: tcod.event.WindowResized) -> T | None:", "def getwinsize(self):", "def on_resize(event):\n gloo.set_viewport(0, 0, *event.physical_size)", "def OnResize(self, event):\n self._resizing = True\n self._resize_timer.Start(60, True)", "def resize (self):\n return self._arrange_displays()", "def send_size_event() -> None:\n terminal_size = self._get_terminal_size()\n width, height = terminal_size\n textual_size = Size(width, height)\n event = events.Resize(textual_size, textual_size)\n asyncio.run_coroutine_threadsafe(\n self._app._post_message(event),\n loop=loop,\n )", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def ev_windowmaximized(self, event: WindowEvent) -> None:", "def maximize_option():\n Width=MaxWidth\n Height=MaxHeight - WinTitle -WinBorder\n PosX=LeftPadding\n PosY=TopPadding\n move_active(PosX,PosY,Width,Height)\n raise_window(\":ACTIVE:\")", "def exec_resize(self, exec_id, height=None, width=None):\n\n if isinstance(exec_id, dict):\n exec_id = exec_id.get('Id')\n\n params = {'h': height, 'w': width}\n url = self._url(\"/exec/{0}/resize\", exec_id)\n res = self._post(url, params=params)\n self._raise_for_status(res)", "def updatesize(frame):\n winwid, winhgt = frame.winfo_width(), frame.winfo_height()\n scrwid, scrhgt = frame.winfo_screenwidth(), frame.winfo_screenheight()\n newx, newy = math.floor(scrwid * 0.99) - winwid, math.floor(scrhgt * 0.01)\n frame.master.geometry(\"{}x{}+{}+{}\".format(winwid, winhgt, newx, newy))", "def resize(self, dims):\n width, height = dims[:2]\n self.logger.debug(\"renderer reconfigured to %dx%d\" % (\n width, height))\n\n # create cairo surface the size of the window\n #surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)\n depth = len(self.rgb_order)\n self.surface_arr = np.zeros((height, width, depth), dtype=np.uint8)\n\n stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32,\n width)\n surface = cairo.ImageSurface.create_for_data(self.surface_arr,\n cairo.FORMAT_ARGB32,\n width, height, stride)\n self.surface = surface\n\n # fill surface with background color;\n # this reduces unwanted garbage in the resizing window\n cr = cairo.Context(self.surface)\n\n # fill surface with background color\n cr.rectangle(0, 0, width, height)\n r, g, b = self.viewer.get_bg()\n cr.set_source_rgba(r, g, b)\n cr.fill()\n\n super(CanvasRenderer, self).resize(dims)", "def SizeWindows(self):\n self._SizeWindows()", "def resize(self, event=None):\n #self.render()\n self.__resize_background(event)\n #self.__delete_background()\n #self.__create_background(self._imfname)\n for sym in self.itersymbols():\n sym.sym.resize(event)", "def handle_scaling():\n os_type = platform.system()\n if os_type == \"Windows\":\n from ctypes import windll\n windll.user32.SetProcessDPIAware()", "def on_resize_parentx(self,event):\n ##print(\"parent event size=\"+str(event.width)+\" X \"+str(event.height))\n self.canvas_width = event.width\n self.canvas.get_tk_widget().config(width=self.canvas_width)\n self.show_image()", "def maximizeWindow(self, windowHandle='current'):\n cmdId = self.executeCommand(Command.MAXIMIZE_WINDOW, {\"windowHandle\": windowHandle})\n return cmdId", "def on_resize_parent(self,event):\n #print(\"parent event size=\"+str(event.width)+\" X \"+str(event.height))\n self.canvas_width = event.width\n self.canvas_height = event.height\n self.canvas.get_tk_widget().config(width=self.canvas_width, height=self.canvas_height)\n self.show_image()", "def resizeEvent(self, event):\n self.autosize()\n super().resizeEvent(event)", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def on_user_resize(self, event):\n self.resize_scaled(drag_rootx=event.x_root + self._mouse_drag_offset)", "def __ev_resize(self, event):\n\n new_size = event.dict['size']\n surface_size = self.__screen.get_size()\n old_center = self.__screen.get_rect().center\n if new_size != surface_size:\n self.__screen = pygame.display.set_mode(new_size,\n self.__screen.get_flags(),\n self.__screen.get_bitsize())\n self.init(offset=vect_diff(self.__screen.get_rect().center,\n old_center))\n self.__screen_width, self.__screen_height = self.__screen.get_size()", "def resize(self, width: int, height: int):\n pass", "def on_resize(width, height):\n\tglViewport(0, 0, width, height)\n\tglMatrixMode(GL_PROJECTION)\n\tglLoadIdentity()\n\tgluPerspective(70, 1.0*width/height, 0.1, 1000.0)\n\tglMatrixMode(GL_MODELVIEW)\n\tglLoadIdentity()", "def do_relayout(self):\n # This method is called whenever a relayout is requested. By\n # default, this is when the layout children change. In that case\n # we just need to update the min and max sizes. We are a top\n # level window, so no one really cares about our size hint. \n self.update_minimum_size()\n self.update_maximum_size()", "def resize(self):\n\t\tself.win.erase()\n\t\tfor c in self.components:\n\t\t\tc.resize()\n\t\tself.draw(True)", "def resize(self, size):\n self.widget.resize(*size)", "def setWindowSize(self, value):\n return self._set(windowSize=value)", "def on_resize(width, height):\n\tglViewport(0, 0, width, height)\n\tglMatrixMode(GL_PROJECTION)\n\tglLoadIdentity()\n\tgluPerspective(40, 1.0*width/height, 0.1, 1000.0)\n\tglMatrixMode(GL_MODELVIEW)\n\tglLoadIdentity()", "def resizeEvent(self, event):\r\n QDialog.resizeEvent(self, event)\r\n self.emit(SIGNAL(\"size_change(QSize)\"), self.size())", "def maximize(self):\n\n self.driver.maximize_window(self.handle)", "def onSize(self,event=None):\n if self.app.DEBUG:\n print 'Event: Parent: %s.onSize'%self.__class__\n if self.redraw:self.redraw()", "def ev_windowminimized(self, event: WindowEvent) -> None:", "def set_size(self, width, height):\n cairo.cairo_xcb_surface_set_size(self._pointer, width, height)\n self._check_status()", "def OnSize(self, event):\r\n\r\n if self._owner_mgr and self._send_size:\r\n self._owner_mgr.OnFloatingPaneResized(self._pane_window, event.GetSize())", "def Resizable(self, resizable=True):\r\n \r\n return self.SetFlag(self.optionResizable, resizable)", "def set_window_rect(self, value: bool):\n self._caps['setWindowRect'] = value", "def update_dimensions(self):\r\n # stores the old screen height for cleaning the screen\r\n old_w_height = self.w_height\r\n\r\n self.w_width, self.w_height = get_terminal_size()\r\n # see __init__\r\n self.w_width -= self.w_width % 2\r\n self.w_height -= self.w_height % 2\r\n\r\n # no need to clear screen if window size hasn't changed\r\n if old_w_height != self.w_height:\r\n self.clear_screen(old_w_height)", "def resize(self):\n pass", "def set_window(self, handle):\n pass", "def resize(self, x=0, y=0, w=0, h=0):\r\n if w <= 0:\r\n w = self.max_width\r\n if h <= 0:\r\n h = self.max_height\r\n self.width = w\r\n self.height = h\r\n\r\n self.left = x\r\n self.top = y\r\n self.right = x + w\r\n self.bottom = y + h\r\n self.opengl.resize(x, y, w, h)", "def sigwinch_handler(pinstance):\n def handler(sig, data):\n s = struct.pack(\"HHHH\", 0, 0, 0, 0)\n a = struct.unpack(\"hhhh\", fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, s))\n pinstance.setwinsize(a[0], a[1])\n return handler", "def reshape(w, h):\n global win_width, win_height\n win_width = w\n win_height = h\n glutPostRedisplay() # May need to call a redraw...", "def window_size(self, window_size):\n\n self._window_size = window_size", "def resize(self,event):\n if event.widget==self.master:\n Y=event.height\n X=event.width\n self.seqframe.configure(width=X-self.canvas_border_x,\n height=Y-self.canvas_border_y)\n return", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def set_screen_size(self, width, height, width_mm, height_mm):\n # FIXME: setting framebuffer size doesn't work for some reason\n # self.window.xrandr_set_screen_size(width, height, 310+550, 310)\n # so I am going to use this dirty hack for the time being\n import subprocess\n\n subprocess.call([\"xrandr\", \"--fb\", \"%dx%d\" % (width, height)])", "def on_resize(self, width, height):\n\t\tglViewport(0, 0, width, height)\n\t\tglMatrixMode(GL_PROJECTION)\n\t\tglLoadIdentity()\n\t\tgluPerspective(70., width / float(height), .1, 1000.)\n\t\tglMatrixMode(GL_MODELVIEW)\n\t\treturn pyglet.event.EVENT_HANDLED", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def ev_windowmaximized(self, event: tcod.event.WindowEvent) -> T | None:", "def on_resize(self, width, height):\n self.ctx.viewport = 0, 0, width, height\n self.program['projection'] = Mat4.perspective_projection(self.aspect_ratio, 0.1, 100, fov=60)", "def OnSize(self, event):\r\n\r\n self.Layout()", "def resizeEvent(self, event):\n self.refresh_images(resize=True)\n QMainWindow.resizeEvent(self, event)", "def resize(self, auto_layout = False, **kwds):\n\t\told_auto_layout = self.auto_layout\n\t\ttry:\n\t\t\tself.auto_layout = auto_layout\n\t\t\tself.set(**kwds)\n\t\tfinally:\n\t\t\tself.auto_layout = old_auto_layout", "def set_height(height):\n resize.transforms[1].size = height", "def set_window_width(self, width):\n self.device.set_window_width(int(width))\n return \"OK\"", "def minimize(self):\n lib.SDL_MinimizeWindow(self._ptr)", "def set_screen(self, size):\r\n self.screen = size", "def OnResizeEnd(self, event):\n self._resizing = False\n self.Refresh()", "def defaultWindowSize(self):\n self.resize(self.defaultWindowWidth, self.defaultWindowHeight)", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def showResized(name, image, scale):\n image = resizeImage(image, scale)\n cv.ShowImage(name, image)", "def resizeEvent(self, event):\n self.ui.main_edit.setGeometry(QtCore.QRect(0, 0, event.size().width(),\n event.size().height()-73))\n self.ui.dialog_map.setGeometry(QtCore.QRect(0, 0, event.size().width(),\n event.size().height()-73))" ]
[ "0.6948274", "0.6908833", "0.67389023", "0.6696905", "0.6490529", "0.6470183", "0.6419227", "0.64030665", "0.6388671", "0.63874537", "0.6313409", "0.6285529", "0.6282814", "0.62690467", "0.62603426", "0.62542343", "0.6253225", "0.6249994", "0.62148416", "0.61682737", "0.61676556", "0.6089843", "0.60795474", "0.6074318", "0.6050025", "0.6035615", "0.60288364", "0.6027586", "0.6000829", "0.59838", "0.5982281", "0.59643334", "0.5911938", "0.5872204", "0.5862485", "0.58488864", "0.5833428", "0.57965994", "0.57913625", "0.57798666", "0.574798", "0.5738527", "0.5735235", "0.57104105", "0.56986845", "0.5698457", "0.5689337", "0.56883764", "0.5680061", "0.5669868", "0.566959", "0.56545985", "0.5636856", "0.55946386", "0.559257", "0.55924034", "0.55862284", "0.5547311", "0.55341876", "0.55330366", "0.5494649", "0.5490802", "0.54818195", "0.54815805", "0.54707295", "0.5466416", "0.5465837", "0.54513955", "0.54491866", "0.54357964", "0.5414303", "0.5411304", "0.5410462", "0.5410422", "0.53980666", "0.5375769", "0.5373465", "0.5366621", "0.5363722", "0.5362359", "0.53591776", "0.535609", "0.5353207", "0.53506243", "0.53477436", "0.5320506", "0.53173727", "0.5311611", "0.53013945", "0.5294116", "0.5280647", "0.52779317", "0.5272972", "0.52532166", "0.5251196", "0.5246401", "0.5232493", "0.5226028", "0.5208421", "0.52081084" ]
0.7933913
0
Launch the appropriate shell as a login shell It will be either bash or tcsh depending on what the user is currently running. It checks the SHELL variable to figure it out.
def run_shell(): shell = get_shell() if shell not in ['bash','tcsh']: raise ValueError, "Unsupported shell (only works with bash and tcsh)" os.execvp(shell,(shell,"-l"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loginShell(self, shell=None):\n\n\t\tif shell is None:\n\t\t\traise exceptions.BadArgumentError(\n\t\t\t\t_(u'You must specify a shell'))\n\n\t\tif shell not in LMC.configuration.users.shells:\n\t\t\t\traise exceptions.BadArgumentError(_(u'Invalid shell \"{0}\". '\n\t\t\t\t\t'Valid shells are {1}.').format(stylize(ST_BAD, shell),\n\t\t\t\t\t', '.join(stylize(ST_COMMENT, shell)\n\t\t\t\t\t\tfor shell in LMC.configuration.users.shells)))\n\n\t\twith self.lock:\n\t\t\tself.__loginShell = shell\n\t\t\tself.serialize()\n\n\t\t\tLicornEvent('user_loginShell_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tlogging.notice(_(u'Changed user {0} shell to {1}.').format(\n\t\t\t\tstylize(ST_NAME, self.__login), stylize(ST_COMMENT, shell)))", "def shell():\n # Provides:\n # shell\n if salt.utils.platform.is_windows():\n env_var = \"COMSPEC\"\n default = r\"C:\\Windows\\system32\\cmd.exe\"\n else:\n env_var = \"SHELL\"\n default = \"/bin/sh\"\n\n return {\"shell\": os.environ.get(env_var, default)}", "def shell(app, shell_name, shell_path, shell_args): # no cov\n app.ensure_environment_plugin_dependencies()\n\n if app.env == app.env_active:\n app.abort(f'Already in environment: {app.env}')\n\n if app.env in app.project.config.matrices:\n app.display_error(f'Environment `{app.env}` defines a matrix, choose one of the following instead:\\n')\n for env_name in app.project.config.matrices[app.env]['envs']:\n app.display_error(env_name)\n\n app.abort()\n\n if not shell_name:\n shell_name = app.config.shell.name\n if not shell_path:\n shell_path = app.config.shell.path\n if not shell_args:\n shell_args = app.config.shell.args\n\n if not shell_path:\n import shellingham\n\n try:\n shell_name, command = shellingham.detect_shell()\n except shellingham.ShellDetectionFailure:\n from hatch.utils.fs import Path\n\n shell_path = app.platform.default_shell\n shell_name = Path(shell_path).stem\n else:\n if app.platform.windows:\n shell_path = command\n else:\n shell_path, *shell_args = app.platform.modules.shlex.split(command)\n\n with app.project.location.as_cwd():\n environment = app.get_environment()\n app.prepare_environment(environment)\n\n first_run_indicator = app.cache_dir / 'shell' / 'first_run'\n if not first_run_indicator.is_file():\n app.display_waiting(\n 'You are about to enter a new shell, exit as you usually would e.g. '\n 'by typing `exit` or pressing `ctrl+d`...'\n )\n first_run_indicator.parent.ensure_dir_exists()\n first_run_indicator.touch()\n\n environment.enter_shell(shell_name, shell_path, shell_args)", "def get_shell_type():\n if sys.platform.startswith(\"win\"):\n parent_proc = os.getppid()\n parent_name = Process(parent_proc).name()\n\n if bool(re.match(\"pwsh*|pwsh.exe|powershell.exe\", parent_name)):\n return Shell.POWER_SHELL\n\n return Shell.WINDOWS_COMMAND_PROMPT\n\n return Shell.LINUX", "def start_shell(self):\n cmd = 'shell'\n end_strs = ['>']\n self.run_with_output(cmd, end_strs)\n return True", "def djshell():\n if '@' in env.host_string:\n env.shell_host_string = env.host_string\n else:\n env.shell_host_string = '%(user)s@%(host_string)s' % env\n env.shell_default_dir = env.shell_default_dir_template % env\n env.shell_interactive_djshell_str = env.shell_interactive_djshell % env\n if env.is_local:\n cmd = '%(shell_interactive_djshell_str)s' % env\n else:\n cmd = 'ssh -t -i %(key_filename)s %(shell_host_string)s \"%(shell_interactive_djshell_str)s\"' % env\n #print cmd\n os.system(cmd)", "def test_shell_run_SHELL(tmp_home, tmp_prefix, tmp_env_name, use_prefix, tmp_path):\n skip_if_shell_incompat(\"bash\")\n\n script_path = tmp_path / \"fakeshell.sh\"\n script_path.write_text(\"#!/bin/sh\\nexit 42\")\n script_path.chmod(0o777)\n\n if use_prefix:\n cmd = [helpers.get_umamba(), \"shell\", \"-p\", tmp_prefix]\n else:\n cmd = [helpers.get_umamba(), \"shell\", \"-n\", tmp_env_name]\n\n ret = subprocess.run(cmd, env={**os.environ, \"SHELL\": script_path})\n assert ret.returncode == 42", "def use_shell(self):\n return self._shell", "def shell(gui=0, dryrun=0):\n render_remote_paths()\n print 'env.remote_app_dir:',env.remote_app_dir\n env.SITE = env.SITE or env.default_site\n env.shell_x_opt = '-X' if int(gui) else ''\n if '@' in env.host_string:\n env.shell_host_string = env.host_string\n else:\n env.shell_host_string = '%(user)s@%(host_string)s' % env\n env.shell_default_dir = env.shell_default_dir_template % env\n env.shell_interactive_shell_str = env.shell_interactive_shell % env\n if env.is_local:\n cmd = '%(shell_interactive_shell_str)s' % env\n else:\n cmd = 'ssh -t %(shell_x_opt)s -i %(key_filename)s %(shell_host_string)s \"%(shell_interactive_shell_str)s\"' % env\n print cmd\n if int(dryrun):\n return\n os.system(cmd)", "def shell(self):\r\n channel = self._ssh_client.invoke_shell()\r\n interactive_shell(channel)", "def detect_shell() -> Optional[str]:\n shell_var = os.environ.get('SHELL')\n if shell_var:\n return os.path.basename(shell_var)\n return None", "def run_shell(kit):\n context = {\n 'kit': kit,\n }\n try:\n import IPython\n except ImportError:\n interact(local=context)\n else:\n interactive_shell = IPython.frontend.terminal.embed.InteractiveShellEmbed()\n interactive_shell(local_ns=context)", "def command_shell(\n session_name,\n window_name,\n socket_name,\n socket_path,\n command,\n shell,\n use_pythonrc,\n use_vi_mode,\n):\n server = Server(socket_name=socket_name, socket_path=socket_path)\n\n util.raise_if_tmux_not_running(server=server)\n\n current_pane = util.get_current_pane(server=server)\n\n session = util.get_session(\n server=server, session_name=session_name, current_pane=current_pane\n )\n\n window = util.get_window(\n session=session, window_name=window_name, current_pane=current_pane\n )\n\n pane = util.get_pane(window=window, current_pane=current_pane) # NOQA: F841\n\n if command is not None:\n exec(command)\n else:\n if shell == \"pdb\" or (os.getenv(\"PYTHONBREAKPOINT\") and PY3 and PYMINOR >= 7):\n from tmuxp._compat import breakpoint as tmuxp_breakpoint\n\n tmuxp_breakpoint()\n return\n else:\n from ..shell import launch\n\n launch(\n shell=shell,\n use_pythonrc=use_pythonrc, # shell: code\n use_vi_mode=use_vi_mode, # shell: ptpython, ptipython\n # tmux environment / libtmux variables\n server=server,\n session=session,\n window=window,\n pane=pane,\n )", "def launch_shell(*, cwd: Optional[pathlib.Path] = None) -> None:\n with emit.pause():\n subprocess.run([\"bash\"], check=False, cwd=cwd)", "def shell(console):\n return create_shell(\n MANAGE_DICT.get(\"shell\", {}).get(\"console\", console), MANAGE_DICT\n )", "def get_shell(self, shell):", "def get_shell(name='bash'):\n if name.startswith('/'):\n return [name]\n return ['/usr/bin/env', name]", "def get_shell(cls):\n tvars = cls._get_thread_vars()\n if len(tvars['shell_stack']) == 0:\n raise RuntimeError(\"No currently active shell\")\n return tvars['shell_stack'][-1]", "def __get_adb_shell(self):\n shell = self.command + [\"shell\"]\n if self.root_adb == \"root_adb\":\n # Root adb-specific things\n pass\n elif self.root_adb == \"root_shell\":\n # Root shell-specific things\n shell.extend([\"su\", \"-c\"])\n elif self.root_adb == \"not_root\":\n # Non root-specific things\n pass\n return shell", "def test_shell_run_activated(tmp_home, tmp_prefix):\n skip_if_shell_incompat(\"bash\")\n stdout = subprocess.check_output(\n [helpers.get_umamba(), \"shell\", \"-p\", tmp_prefix],\n input=\"echo $PATH\",\n text=True,\n )\n assert str(tmp_prefix) in stdout.split(os.pathsep)[0]", "def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell", "def _channel_invoke_shell(self) -> None:\n self._shell = True\n self.channel.shell()", "def use_shell(self, shell):\n return ShellContext(self, shell)", "def shell():\n\n from IPython.terminal.ipapp import TerminalIPythonApp\n import app.model as m\n from trex.support import quantum\n\n context = dict(\n app = app,\n quantum = quantum,\n m = m,\n )\n\n rc_file = os.path.normpath(os.path.join(app.root_path, os.pardir, 'shell.rc'))\n if os.access(rc_file, os.R_OK):\n execfile(rc_file, context, dict(context=context))\n\n shell = TerminalIPythonApp.instance(\n display_banner = False,\n quick = True,\n user_ns = context,\n )\n shell.initialize(argv=[])\n shell.shell.confirm_exit = False\n\n context = app.test_request_context('__shell__')\n context.push()\n shell.start()\n context.pop()", "def login_aashell(self):\n flag = 0\n login_aashell = 'telnet 192.168.255.1 15007'\n aashell_prompt = 'AaShell>'\n\n self._current.write(login_aashell)\n self._current.read_until_regexp(aashell_prompt)\n flag = 1\n\n return flag", "def get_shell(self, shell):\n if shell not in self._shells:\n raise Exception(\n 'Unknown shell \"{}\"'.format(shell)\n )\n return self._shells[shell]", "def open_shell_and_run_su_user(driver):\n global sudo_results\n cmd = 'sudo ls /var/lib/sudo'\n sudo_results = ssh_sudo(cmd, host, 'ericbsd', 'testing')", "def shell():\n pass", "def execute_shell(self, cmd):\n try:\n return common.execute_shell(cmd, False)\n except Exception, e:\n raise exception.TermSaverException(help=_(\n\"\"\"Could not execute the command [%(cmd)s] properly.\n%(message)s \\nError details: %(error)s\"\"\") % {\n \"cmd\": \" \".join(cmd),\n \"message\": \"Make sure you have figlet installed!\",\n \"error\": str(e)\n }\n )", "def run_shell(cmd: str):\n print_color(f\"** RUNNING: {cmd}\")\n os.system(cmd)", "def shell():\n import code\n app = main.create_app()\n with app.app_context():\n from flask.globals import _app_ctx_stack\n app = _app_ctx_stack.top.app\n ctx = {}\n ctx.update(app.make_shell_context())\n code.interact(local=ctx)", "def run_as_cmd(cmd, user, shell=None):\n shell = shell or 'bash'\n if not user:\n return get_execute_command(cmd, shell)\n return ['sudo', '-s', '--set-home', '-u', user] + get_execute_command(cmd, shell)", "def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)", "def gkfs_shell(test_workspace):\n\n return ShellClient(test_workspace)", "def shell():\n from flask.globals import _app_ctx_stack\n banner = 'Welcome to Opsy!'\n app = _app_ctx_stack.top.app\n shell_ctx = {'create_app': create_app,\n 'db': db,\n 'User': User,\n 'Role': Role,\n 'Permission': Permission,\n 'Zone': Zone,\n 'Host': Host,\n 'Group': Group,\n 'HostGroupMapping': HostGroupMapping}\n shell_ctx.update(app.make_shell_context())\n try:\n from IPython import embed\n embed(user_ns=shell_ctx, banner1=banner)\n return\n except ImportError:\n import code\n code.interact(banner, local=shell_ctx)", "def shell_init_instructions(cmd, equivalent):\n\n shell_specific = \"{sh_arg}\" in equivalent\n\n msg = [\n \"`%s` requires Spack's shell support.\" % cmd,\n \"\",\n \"To set up shell support, run the command below for your shell.\",\n \"\",\n color.colorize(\"@*c{For bash/zsh/sh:}\"),\n \" . %s/setup-env.sh\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For csh/tcsh:}\"),\n \" source %s/setup-env.csh\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For fish:}\"),\n \" source %s/setup-env.fish\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For Windows batch:}\"),\n \" source %s/spack_cmd.bat\" % spack.paths.share_path,\n \"\",\n \"Or, if you do not want to use shell support, run \"\n + (\"one of these\" if shell_specific else \"this\")\n + \" instead:\",\n \"\",\n ]\n\n if shell_specific:\n msg += [\n equivalent.format(sh_arg=\"--sh \") + \" # bash/zsh/sh\",\n equivalent.format(sh_arg=\"--csh \") + \" # csh/tcsh\",\n equivalent.format(sh_arg=\"--fish\") + \" # fish\",\n equivalent.format(sh_arg=\"--bat \") + \" # batch\",\n ]\n else:\n msg += [\" \" + equivalent]\n\n msg += [\n \"\",\n \"If you have already set up Spack's shell support but still receive\",\n \"this message, please make sure to call Spack via the `spack` command\",\n \"without any path components (such as `bin/spack`).\",\n ]\n\n msg += [\"\"]\n tty.error(*msg)", "def init_shell(self):\n self.shell = PlayerTerminalInteractiveShell.instance(\n commands=self.commands,\n speed=self.speed,\n parent=self,\n display_banner=False,\n profile_dir=self.profile_dir,\n ipython_dir=self.ipython_dir,\n user_ns=self.user_ns,\n )\n self.shell.configurables.append(self)", "def shell():\n import code\n banner = f\"Python {sys.version} on {sys.platform}\\nInstance path: {current_app.instance_path}\" # noqa\n ctx = {\"db\": db}\n\n # Support the regular Python interpreter startup script if someone\n # is using it.\n startup = os.environ.get(\"PYTHONSTARTUP\")\n if startup and os.path.isfile(startup):\n with open(startup, \"r\") as f:\n eval(compile(f.read(), startup, \"exec\"), ctx)\n\n ctx.update(current_app.make_shell_context())\n try:\n import IPython\n IPython.embed(banner1=banner, user_ns=ctx)\n except ImportError:\n code.interact(banner=banner, local=ctx)", "def _run_shell(self, command_string: str, cwd: str = '/', print_command: bool = False) -> subprocess.Popen:\n if print_command:\n self.logger.info(command_string)\n return subprocess.Popen(command_string, shell=True, cwd=cwd)", "def make_shell_context():\n return {'User': User}", "def launch_steam():\n subprocess.run('steam &',\n shell=True,\n check=True,\n executable='/bin/sh'\n )", "def shell():\n local('docker-compose exec web python3 manage.py shell {}'.format(\n settings))", "def start_session(self):\r\n print('Executing code by running main.run()...')\r\n print('This will open a tmux session...')\r\n print('Detach by pressing CTRL + B and then D')\r\n\r\n # Connect with SSH-PubKey and execute tmux script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n '-t', 'robolab-tmux'\r\n ])\r\n\r\n print('Done.')", "def call_subshell(subshell):\n curses.def_prog_mode()\n #curses.endwin() # Probably causes a memory leak.\n\n rtn = os.system(\"%s\" % (subshell))\n curses.reset_prog_mode()\n if rtn is not 0:\n return False\n else:\n return True", "def available_shells(self):", "async def shell(self, ctx: Context, *, shellCmd):\n\t\townercheck = self.owner_check(ctx.author.id)\n\t\tif ownercheck == False:\n\t\t\treturn await self.send('Owner-Only Command', whisper=[ctx.author.id])\n\t\tproc = subprocess.Popen(shellCmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n\t\tstdout_value = proc.stdout.read() + proc.stderr.read()\n\t\trealval = str(stdout_value).replace(\"\\n\", \" \")\n\t\tawait self.send(f\"{realval}\", whisper=[ctx.author.id])", "def jail_shell(jnid = ''):\n jails = jails_list()\n jnid = jid_jname(jnid)\n\n if jnid == False:\n return False\n \n if jail_isrun(jnid) != 1:\n print \" INFO: '%s' is not running!\" % (jnid)\n return False\n\n if jnid not in jails[1]:\n print \" ERROR: Jail with name '%s' not found!\" % (jnid)\n return False\n\n if jnid == 'BASE':\n print \" INFO: 'BASE' jail cannot be used!\"\n return False\n\n root_sheel = subprocess.check_output(\"head -n3 %s%s/etc/passwd\" % (jpath, jnid) , shell=True).strip('jid')\n root_sheel = root_sheel.split(':')\n# prtin and add to log file \n logmsg = \" INFO: Enter in '%s' Jail!\" % (jnid)\n log(logmsg) \n try:\n os.system (\"jexec %s %s\" % (jnid, root_sheel[-1]))\n except:\n logmsg = \" ERROR: can't login in '%s'!\" % (jnid)\n log(logmsg)\n return False\n \n logmsg = \" INFO: Exit from '%s' Jail!\" % (jnid)\n log(logmsg)", "def shell_cmd(ctx, extra_flags):\n ctx.load_plugins(extra_flags=extra_flags)\n import code\n from lektor.db import F, Tree\n from lektor.builder import Builder\n\n banner = \"Python %s on %s\\nLektor Project: %s\" % (\n sys.version,\n sys.platform,\n ctx.get_env().root_path,\n )\n ns = {}\n startup = os.environ.get(\"PYTHONSTARTUP\")\n if startup and os.path.isfile(startup):\n with open(startup, \"r\", encoding=\"utf-8\") as f:\n eval(compile(f.read(), startup, \"exec\"), ns) # pylint: disable=eval-used\n pad = ctx.get_env().new_pad()\n ns.update(\n project=ctx.get_project(),\n env=ctx.get_env(),\n pad=pad,\n tree=Tree(pad),\n config=ctx.get_env().load_config(),\n make_builder=lambda: Builder(\n ctx.get_env().new_pad(), ctx.get_default_output_path()\n ),\n F=F,\n )\n try:\n c = Config()\n c.TerminalInteractiveShell.banner2 = banner\n embed(config=c, user_ns=ns)\n except NameError: # No IPython\n code.interact(banner=banner, local=ns)", "def login(self,\n\t command='su -',\n\t user=None,\n\t password=None,\n\t prompt_prefix=None,\n\t expect=None,\n\t timeout=shutit_global.shutit_global_object.default_timeout,\n\t escape=False,\n\t echo=None,\n\t note=None,\n\t go_home=True,\n\t fail_on_fail=True,\n\t is_ssh=True,\n\t check_sudo=True,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_session = self.get_current_shutit_pexpect_session()\n\t\treturn shutit_pexpect_session.login(ShutItSendSpec(shutit_pexpect_session,\n\t\t user=user,\n\t\t send=command,\n\t\t password=password,\n\t\t prompt_prefix=prompt_prefix,\n\t\t expect=expect,\n\t\t timeout=timeout,\n\t\t escape=escape,\n\t\t echo=echo,\n\t\t note=note,\n\t\t go_home=go_home,\n\t\t fail_on_fail=fail_on_fail,\n\t\t is_ssh=is_ssh,\n\t\t check_sudo=check_sudo,\n\t\t loglevel=loglevel))", "def desktop_session(self):\n self.user['desktop_environment'] = {'name': self.user['desktop']}\n if self.user['desktop'] is not None:\n\n # Append required packages\n if self.user['desktop'] in [10, 11, 12]:\n self.user['desktop_environment']['requirements'] = \\\n '{xorg} {xinit} {numlock}'.format(\n xorg=self.packages['xorg'],\n xinit=self.packages['xinit'],\n numlock=self.packages['numlock'])\n else:\n self.user['desktop_environment']['requirements'] = \\\n '{xorg} {numlock}'.format(xorg=self.packages['xorg'],\n numlock=self.packages['numlock'])\n\n # Set desktop environment name\n self.user['desktop_environment']['name'] = \\\n self.packages['desktop']['name'][self.user['desktop']]\n\n # Append desktop environment packages\n self.user['desktop_environment']['packages'] = \\\n self.packages['desktop']['packages'][self.user['desktop']]\n\n # Append desktop environment extra packages\n if self.user['desktop_extra'] is True:\n self.user['desktop_environment']['packages'] += ' {x}'.format(\n x=self.packages['desktop']['extras'][self.user['desktop']])\n\n # Set start command\n self.user['desktop_environment']['startcmd'] = \\\n self.packages['desktop']['startcmd'][self.user['desktop']]", "def enable_user(user: User, active: bool=True) -> Result:\n login = user.pw_shell not in _NOLOGIN_SHELLS\n if login and not active:\n command([\"/usr/bin/chsh\", \"--shell\", \"/bin/bash\", user.pw_name])\n return Result(State.success)\n elif active and not login:\n command([\"/usr/bin/chsh\", \"--shell\", _NOLOGIN_SHELLS[0], user.pw_name])\n return Result(State.success)\n else:\n return Result(State.unchanged)", "def _check_login(self, guest_obj, distro_name, distro_cmd):\n # perform login to exercise distro detection\n guest_obj.login()\n\n # validate if instantiating SshClient was correct\n self._mock_ssh_client_cls.assert_called_with()\n\n # validate usage of SshClient object was correct\n self._mock_ssh_client_obj.login.assert_called_with(\n guest_obj.host_name,\n user=guest_obj.user,\n passwd=guest_obj.passwd,\n timeout=60\n )\n self._mock_ssh_client_obj.open_shell.assert_called_with()\n\n # validate if the right distro object was created\n self.assertIs(distro_name, guest_obj._distro_obj.__class__.__name__)\n\n # validate the right command was issued for env detection\n self._mock_ssh_shell.run.assert_called_with(distro_cmd)", "def PythonShell(cls, variables = None):\n if variables is None:\n variables = locals()\n # Is APSO installed ?\n ctx = ScriptForge.componentcontext\n ext = ctx.getByName('/singletons/com.sun.star.deployment.PackageInformationProvider')\n apso = 'apso.python.script.organizer'\n if len(ext.getPackageLocation(apso)) > 0:\n # APSO is available. However, PythonShell() is ignored in bridge mode\n # because APSO library not in pythonpath\n if ScriptForge.port > 0:\n return None\n # Directly derived from apso.oxt|python|scripts|tools.py$console\n # we need to load apso before import statement\n ctx.ServiceManager.createInstance('apso.python.script.organizer.impl')\n # now we can use apso_utils library\n from apso_utils import console\n kwargs = {'loc': variables}\n kwargs['loc'].setdefault('XSCRIPTCONTEXT', uno)\n console(**kwargs)\n # An interprocess call is necessary to allow a redirection of STDOUT and STDERR by APSO\n # Choice is a minimalist call to a Basic routine: no arguments, a few lines of code\n SFScriptForge.SF_Basic.GetGuiType()\n else:\n # The APSO extension could not be located in your LibreOffice installation\n cls._RaiseFatal('SF_Exception.PythonShell', 'variables=None', 'PYTHONSHELLERROR')", "def Shell(self):\n \n from shell import SSPyShell\n \n sspy_shell = SSPyShell(scheduler=self)\n\n sspy_shell.cmdloop()", "def execute_shell(command):\n # execute shell script\n if command[0].startswith(\"./\"):\n return execute_command(command)\n # execute commands of shell\n try:\n list_path = environ['PATH'].split(':')\n except KeyError:\n print(\"bash: {}: command not found\".format(command[0]))\n return 127\n for x in list_path:\n if exists(join(x, command[0])):\n return execute_command(command)\n print(\"bash: {}: command not found\".format(command[0]))\n return 127", "def login(**kwargs):\n root_commands.cmd_login(**kwargs)", "def test_current_user(self):\n self.assertEqual(self.host.user().name, \"matlab\")\n self.assertEqual(self.host.user().shell, \"/bin/bash\")", "async def interactive_shell(self) -> None:\n session = PromptSession()\n while True:\n try:\n result = await session.prompt_async(f\"redCisco> \", style=style)\n if not result:\n continue\n await self.command_interpreter(str(result).strip())\n except (EOFError, KeyboardInterrupt):\n break", "def do_shell(command, context=None, **kwargs):\n logging.info(\"%s: executing %s\" % (context, command))\n\n child_env = {'CRANKD_CONTEXT': context}\n\n # We'll pull a subset of the available information in for shell scripts.\n # Anyone who needs more will probably want to write a Python handler\n # instead so they can reuse things like our logger & config info and avoid\n # ordeals like associative arrays in Bash\n for k in [ 'info', 'key' ]:\n if k in kwargs and kwargs[k]:\n child_env['CRANKD_%s' % k.upper()] = str(kwargs[k])\n\n if 'user_info' in kwargs:\n for k, v in kwargs['user_info'].items():\n child_env[create_env_name(k)] = str(v)\n\n try:\n rc = call(command, shell=True, env=child_env)\n if rc == 0:\n logging.debug(\"`%s` returned %d\" % (command, rc))\n elif rc < 0:\n logging.error(\"`%s` was terminated by signal %d\" % (command, -rc))\n else:\n logging.error(\"`%s` returned %d\" % (command, rc))\n except OSError, exc:\n logging.error(\"Got an exception when executing %s:\" % (command, exc))", "def login(self):\n try:\n if self.session.opts.get('authtype') == 'kerberos':\n return self.session.gssapi_login()\n else:\n return self.session.ssl_login()\n except koji.AuthError as e:\n self.logger.exception(f'login error: {e}')\n pass\n return False", "def run(*commands):\n command = str(value_translation(gget(\"raw_command_args\")))\n if (command):\n res = send(get_system_code(command))\n if (not res):\n return\n print(color.green(\"\\nResult:\\n\\n\") + res.r_text.strip() + \"\\n\")\n return\n print(color.cyan(\n \"Eenter interactive temporary shell...\\n\\nUse 'back' command to return doughnuts.\\n\"))\n res = send(\n f'{get_system_code(\"whoami\")}print(\"@\".$_SERVER[\"SERVER_NAME\"].\"|\".getcwd());').r_text.strip()\n prompt, pwd = res.split(\"|\")\n set_namespace(\"webshell\", False, True)\n wordlist = gget(\"webshell.wordlist\")\n readline.set_wordlist(\n NEW_WINDOWS_WORDLIST if (\n is_windows()) else NEW_UNIX_WORDLIST)\n if is_windows():\n prompt = \"%s> \"\n else:\n prompt = prompt.replace(\"\\r\", \"\").replace(\"\\n\", \"\") + \":%s$ \"\n try:\n while gget(\"loop\"):\n print(prompt % pwd, end=\"\")\n if gget(\"raw_input\", False):\n command = str(value_translation(readline()))\n else:\n command = input()\n\n lower_command = command.lower()\n if (lower_command.lower() in ['exit', 'quit', 'back']):\n print()\n break\n if (command == ''):\n print()\n continue\n b64_pwd = base64_encode(pwd)\n if (lower_command.startswith(\"cd \") and len(lower_command) > 3):\n path = base64_encode(lower_command[3:].strip())\n res = send(\n f'chdir(base64_decode(\\'{b64_pwd}\\'));chdir(base64_decode(\\'{path}\\'));print(getcwd());')\n if (not res):\n return\n pwd = res.r_text.strip()\n else:\n real_command = f'chdir(base64_decode(\\'{b64_pwd}\\'));' + \\\n get_system_code(command)\n if command.endswith(\"&\"):\n t = Thread(target=send, args=(real_command, ))\n t.setDaemon(True)\n t.start()\n print(\"\\n[+] \" + command + \": run in backend\\n\")\n else:\n res = send(real_command)\n if (not res):\n return\n print(\"\\n\" + res.r_text.strip() + \"\\n\")\n finally:\n readline.set_wordlist(wordlist)", "def elActivateGraphicalLogin(self):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n commandSection = self.sectionByName(\"command\")\n commandSection.string = commandSection.string + \"\"\"\n#\n# XWindows configuration information.\nxconfig --startxonboot --defaultdesktop=GNOME\n\"\"\"\n return self", "def login(username, password, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run([\n 'devpi', 'login', '--clientdir', clientdir,\n username, '--password', password])", "def do_login(self):\n if self.app.authentication_only:\n self.app.stop()\n else:\n self.set_screen(EXPLORER)", "def enter_shell(self):\n ENTER_SHELL_CMD = '\\n'\n\n while True:\n if self.verbose_logger:\n self.verbose_logger.debug(\"sending '%s', timeout: %s\", ENTER_SHELL_CMD, self.timeout)\n if self.verbose_logger:\n self.verbose_logger.debug(\"sending '%s', timeout: %s [done]\", ENTER_SHELL_CMD, self.timeout)\n\n if self.enter_shell_send_newline:\n self.sock.send(b\"\\n\")\n if self.wait_for_command_execution(timeout=self.timeout):\n break\n if self.verbose_logger:\n self.verbose_logger.debug(\"entered shell ...\")", "def run_shell_command(cmdstr, **subprocess_kwargs):\n if 'shell' in subprocess_kwargs and not subprocess_kwargs['shell']:\n raise ProgramError(\n 'The \"shell\" kwarg may be omitted, but if '\n 'provided it must be True.')\n else:\n subprocess_kwargs['shell'] = True\n\n if 'executable' not in subprocess_kwargs:\n subprocess_kwargs['executable'] = os.getenv('SHELL')\n\n for stream in ['stdin', 'stdout', 'stderr']:\n subprocess_kwargs.setdefault(stream, subprocess.PIPE)\n subprocess_kwargs = alter_subprocess_kwargs_by_platform(\n **subprocess_kwargs)\n return subprocess.Popen(cmdstr, **subprocess_kwargs)", "def login (self,server,username,password='',terminal_type='ansi',original_prompts=r\"][#$]|~[#$]|bash.*?[#$]|[#$] \",login_timeout=10):\r\n cmd = \"ssh -l %s %s\" % (username, server)\r\n spawn.__init__(self, cmd, timeout=login_timeout)\r\n #, \"(?i)no route to host\"])\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT, \"(?i)connection closed by remote host\"])\r\n if i==0: # New certificate -- always accept it. This is what you if SSH does not have the remote host's public key stored in the cache.\r\n self.sendline(\"yes\")\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==2: # password\r\n self.sendline(password)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==4:\r\n self.sendline(terminal_type)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n\r\n if i==0:\r\n # This is weird. This should not happen twice in a row.\r\n self.close()\r\n return False\r\n elif i==1: # can occur if you have a public key pair set to authenticate. \r\n ### TODO: May NOT be OK if expect() matched a false prompt.\r\n pass\r\n elif i==2: # password prompt again\r\n # For incorrect passwords, some ssh servers will\r\n # ask for the password again, others return 'denied' right away.\r\n # If we get the password prompt again then this means\r\n # we didn't get the password right the first time. \r\n self.close()\r\n return False\r\n elif i==3: # permission denied -- password was bad.\r\n self.close()\r\n return False\r\n elif i==4: # terminal type again? WTF?\r\n self.close()\r\n return False\r\n elif i==5: # Timeout\r\n # This is tricky... presume that we are at the command-line prompt.\r\n # It may be that the prompt was so weird that we couldn't match it.\r\n pass\r\n elif i==6: # Connection closed by remote host\r\n self.close()\r\n return False\r\n else: # Unexpected \r\n self.close()\r\n return False\r\n # We appear to be in -- reset prompt to something more unique.\r\n if not self.set_unique_prompt():\r\n self.close()\r\n return False\r\n return True", "def django_restart_shell():\r\n \r\n singles = wingapi.gApplication.fSingletons\r\n shell = singles.fGuiMgr.ShowPanel('python-shell', flash=True, grab_focus=True)\r\n if shell is not None:\r\n shell.fOwner.ScheduleRestart()", "def login(self, host, user, password, cwd=None):\n try:\n ret = self.connector.login(host, user, password,\n original_prompt=r\"[#$%]\",\n auto_prompt_reset=False)\n if not ret:\n error_log(\"login host[%s], with user[%s], password[%s] failed\",\n host, user, password)\n return False\n\n if cwd is not None:\n if not self.change_work_directory(cwd):\n error_log(\"change_work_directory() to [%s] failed\", cwd)\n return False\n\n if not self.set_tty():\n error_log(\"set_tty() failed!\")\n return False\n\n return True\n except:\n error_log(\"login host[%s], with user[%s], password[%s] failed: \"\n \"catch exception type[%s], value[%s]\", host, user,\n password, sys.exc_info()[0], sys.exc_info()[1])\n return False", "def temp_start():\n\tsh_file_path = data_dir.ROOT_DIR+\"mm_initial.sh\"\n\tcmd = \"sudo %s\" % sh_file_path\n\tsubprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate()", "def ssh(obj: dict[str, Any], name: str):\n profile = Profile.get_by(name=USERNAME)\n login = Login.get_by(name=name)\n\n if profile.name != USERNAME:\n click.echo(\n click.style(\n f\"You need to be logged in as {profile.name}\"\n \" to access this login\",\n fg=\"red\",\n bold=True))\n raise click.Abort(\"Authentication failed!\")\n\n decrypted_password = None\n if login.password:\n fernet = authenticate_user_and_get_fernet(profile)\n decrypted_password = fernet.decrypt(login.password.encode()).decode()\n\n click.echo(\n click.style(f\"💫 Logging you in to {login.name} ({login.host})\",\n fg=\"cyan\"))\n shell = obj[\"shell\"]\n if decrypted_password is None:\n args = shlex.split(f\"{shell} \\'ssh {login.username}@{login.host}\\'\")\n p = subprocess.Popen(args)\n else:\n args = shlex.split(f\"{shell} 'sshpass -p \\\"{decrypted_password}\\\" \"\n f\"ssh {login.username}@{login.host}'\")\n p = subprocess.Popen(args, stdout=subprocess.PIPE)\n p.communicate()\n if p.returncode == 0:\n click.echo(\n click.style(f\"✅ Logged you in to {login.name} as {login.username}\",\n fg=\"green\"))\n else:\n click.echo(f\"p.returncode: {p.returncode}\")", "def run(self, *args, **kargs):\n self.pre_run()\n\n if self.options.cmd:\n ns = globals()\n ns.update(settings=self)\n exec self.options.cmd in self.shell_namespace()\n return True\n\n if self.options.execfile:\n ns = globals()\n ns.update(settings=self)\n execfile(self.options.execfile, self.shell_namespace())\n return True\n\n if self.options.version:\n return self.show_version()\n\n if self.get_setting('user.shell'):\n try:\n from smashlib import embed\n except ImportError:\n raise SettingsError(\"You need smashlib installed \"\n \"if you want to use the shell.\")\n else:\n embed(user_ns=self.shell_namespace())\n return True", "def shell(cmd, check=True):\n eprint(f\"+ {cmd}\")\n return run(cmd, shell=True, check=check)", "def construct_parent_shell(eval_output_with, print_script_to):\n if eval_output_with:\n environment_ctor = {\n \"bash\": BashParentEnvironment,\n \"powershell\": PowershellParentEnvironment\n }\n\n printers = {\n \"bash\": escaped_printer_with_character(\"\\\\\", print_script_to),\n \"powershell\": escaped_printer_with_character(\"`\",\n print_script_to)\n }\n\n printer = printers[eval_output_with]\n return environment_ctor[eval_output_with](printer)\n else:\n return BashParentEnvironment(lambda _: None)", "def shell(cmd):\n print('Running \"{}\"...'.format(cmd))\n subprocess.check_call(cmd, shell=True)", "def t24_login(self, user_type=\"INPUTTER\"):\n\n if self.login_user_type == user_type:\n self._log_info('Already logged in as ' + user_type)\n return\n\n self._log_info('Trying to login as ' + user_type)\n\n var_user_name = \"${LOGIN_\" + user_type + \"}\"\n user = BuiltIn().get_variable_value(var_user_name)\n if not user:\n BuiltIn().fail(\"Please specify a user name in a variable \" + var_user_name)\n\n var_pass = \"${PASSWORD_\" + user_type + \"}\"\n password = BuiltIn().get_variable_value(var_pass)\n if not password:\n BuiltIn().fail(\"Please specify a password in a variable \" + var_pass)\n\n self.t24_logoff()\n\n if not self.login_page:\n self.login_page = T24LoginPage()\n self.login_page.open()\n # For demos, sometimes it is nice to to start in maximized mode. Uncomment line below when necessary:\n # self.login_page.maximize_browser_window()\n\n self.home_page = self.login_page.enter_T24_credentials(user, password)\n self.login_user_type = user_type\n self.login_page = None # After a successful login, the login page gets actually closed, so don't store it", "def do_shell(self, line):\n subprocess.call(line, shell=True)", "def do_shell(self, args):\n os.system(args)", "def do_shell(self, args):\n os.system(args)", "def _shell(self, commandName, args, stdin=None):\n cmd = [commandName]\n if type(args) == str:\n cmd.append(args)\n else:\n cmd.extend(args)\n \n #print 'running', cmd\n \n if stdin:\n out, err = subprocess.Popen(cmd, stdin=stdin, stdout=subprocess.PIPE).communicate()\n else:\n out, err = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()\n \n if out: out = out.splitlines()\n if err: err = err.splitlines()\n \n return out, err", "def login(host):\n\n\n \"\"\" change this settings to make use. \"\"\"\n gateway_user = \"lonli\"\n gateway_ip = \"127.0.0.1\"\n gateway_port = \"22\"\n gateway_key = \"/home/lonli/.ssh/id_rsa\"\n\n \"\"\" change abbove settings to make use. \"\"\"\n\n\n if host:\n try:\n subprocess.check_output([\"ssh\", \"-p\", gateway_port, \"-i\", gateway_key,\n \"{0}@{1}\".format(gateway_user, gateway_ip), \"grep {0} ~/.ssh/config\".format(host)])\n except subprocess.CalledProcessError as e:\n print(\"'{0}' does not exists in the configuratian of the gateway!\".format(host), file=sys.stderr)\n return\n\n to_gateway = \"ssh -p {0} -i {1} {2}@{3}\".format(gateway_port, gateway_key, gateway_user, gateway_ip)\n ssh = pexpect.spawn(to_gateway)\n if host:\n\n \n \"\"\" change this settings to make use. \"\"\"\n exps = [\n (\"lonli@arch\", 'echo -n \"Enter diretory : \" && read && [ -d \"${REPLY}\" ] && cd ${REPLY}'),\n (\"Enter diretory : \", \"/tmp\"),\n (\"/tmp\", \"pwd\"),\n ]\n \"\"\" change abbove session to make use. \"\"\"\n\n\n for p, s in exps:\n # print(\"expect : {0}, then send : {1}\".format(p, s))\n ssh.expect(p)\n ssh.sendline(s)\n winch_handler = sigwinch_handler(ssh)\n signal.signal(signal.SIGWINCH, winch_handler)\n winch_handler(None, None)\n ssh.interact()", "def ask_custom_command(self):\n\n shell='Bash'\n if platform == 'win32':\n shell='Batch'\n self.manager.root.show_text_box_popup('Please Enter A {} Command:'.format(shell), self.handle_user_command)", "def slogin ( server_name, user_name = 'ec2-user', ssh_identity_file = None ) :\n cmd = 'ssh'\n if ssh_identity_file :\n cmd += ' -i ' + ssh_identity_file\n cmd += ' -l ' + user_name\n cmd += ' ' + server_name\n\n return subprocess.call( cmd, shell = True )", "def get_ssh_user():\n\n return getpass.getuser()", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def do_shell(self, arglist):\n proc = subprocess.Popen(\n shlex.split(arglist), stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n print(proc.communicate()[0])", "def skip_if_shell_incompat(shell_type):\n plat_system = platform.system()\n if (\n (plat_system == \"Linux\" and shell_type not in (\"bash\", \"posix\", \"dash\"))\n or (plat_system == \"Windows\" and shell_type not in (\"cmd.exe\", \"powershell\"))\n or (\n plat_system == \"Darwin\"\n and shell_type not in (\"zsh\", \"bash\", \"posix\", \"dash\")\n )\n ):\n pytest.skip(\"Incompatible shell/OS\")", "def _as_user(self, cmd):\n p = Popen(['/usr/bin/sudo', '/bin/su', '--login', '--command', '/bin/bash', self.user], \n stdin=PIPE, stdout=PIPE, stderr=PIPE)\n out, err = p.communicate(cmd.encode('utf8'))\n return p.returncode, out, err", "def connect_session(profile):\n os.system('ssh ' + profile)", "def spawn():\n if platform.system() == \"Windows\":\n # HACK https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1243#issuecomment-706668723\n # FIXME Use pexpect or wexpect somehow to fix this\n pytest.xfail(\n \"pexpect fails on Windows\",\n )\n # Using PopenSpawn, although probably it would be best to use pexpect.spawn\n # instead. However, it's working fine and it seems easier to fix in the\n # future to work on Windows (where, this way, spawning actually works; it's just\n # python-prompt-toolkit that rejects displaying a TUI)\n return PopenSpawn", "def do_shell(self, line):\n os.system(line)", "def css_login_as_root(css_test_machine):\n ssh_config = collections.namedtuple('ssh_config',\n ('hostname port username '\n 'rsa_key_file password'))\n config = ssh_config(hostname=css_test_machine['public_ip'],\n port=22,\n username=\"root\",\n rsa_key_file=\"\", # Use password for now\n password=css_test_machine['root_password'])\n logger.debug(\"ssh instantiated\")\n yield SshUtil(config)\n # Close connection?", "async def logged_in(self):\n if not self.pexpect_child or not self.console:\n return False\n try:\n self._flush_buffer()\n exit_status, result = await self._run_cmd(\"who\")\n if exit_status is not 0:\n return False\n if result:\n fields = [line for line in result.splitlines()][0].split()\n self.applog.info(f\"user/ttyinfo:{fields}\")\n return fields and fields[0] == self.username and \"tty\" in fields[1]\n except Exception as e:\n self.applog.exception(\"Exception --> logged_in()\", exc_info=e)\n return False", "def shell(self, extra_args):\n if isinstance(extra_args, str) or isinstance(extra_args, unicode):\n extra_args = extra_args.split()\n if not isinstance(extra_args, list):\n msg = \"invalid arguments: %s\\nshould be list or str, %s given\" % (extra_args, type(extra_args))\n self.logger.warning(msg)\n raise ADBException(msg)\n\n shell_extra_args = ['shell'] + extra_args\n return self.run_cmd(shell_extra_args)", "def run_shell_job(self, job, config):\n if job.plugin != \"shell\":\n # TRANSLATORS: please keep 'plugin' untranslated\n raise ValueError(_(\"bad job plugin value\"))\n return self._just_run_command(job, config)", "def ssh():\n vbox = Vbox(env.vm_name)\n with vbox as session:\n session.wait_for_ssh()\n open_shell()", "def shell(command):\n log(\"Executing: \" + command)\n result = subprocess.call(command, shell=True, executable=\"/bin/bash\")\n if (result != 0):\n log(\"Execution failed (result=%d)\" % result)\n sys.exit()", "def run_shell_cmd(cmd, first=False, ignore=False):\n\n stdout, stderr = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n cwd=CWD).communicate()\n if stderr and not ignore:\n raise Exception(stderr)\n elif stdout:\n if first:\n return stdout.splitlines()[0]\n else:\n return stdout.splitlines()", "def the_root_user_should_be_able_to_login_with_ssh(driver):\n assert ssh_result['result'], ssh_result['output']\n assert '..' in ssh_result['output'], ssh_result['output']", "def main(self):\n cmd, path, args = self._parse_args()\n if cmd == \"shell\":\n print \"You are now in ubs shell.\"\n print \"Use \\\"python %s help\\\" to see other choice.\" % sys.argv[0]\n self.shell()\n elif cmd == \"help\":\n self.print_path_help(path)\n sys.exit(0) \n elif cmd == \"run\":\n self.route(path, args)\n else:\n raise Exception(\"unknown CMD %s\" % cmd)" ]
[ "0.6784456", "0.6614977", "0.6381293", "0.6362046", "0.6199895", "0.6197317", "0.6101484", "0.60623527", "0.60271186", "0.59255606", "0.58501774", "0.5833172", "0.5826648", "0.5798579", "0.57554454", "0.56577605", "0.5635898", "0.56230944", "0.55727273", "0.55245143", "0.5498706", "0.54953814", "0.54935485", "0.5481472", "0.547323", "0.54400617", "0.5421676", "0.5295253", "0.5289749", "0.52891004", "0.52697915", "0.52661395", "0.52627987", "0.52524924", "0.5242846", "0.52374977", "0.52289355", "0.5225961", "0.5170297", "0.512692", "0.5123387", "0.51043254", "0.50961614", "0.50840664", "0.5083628", "0.5057902", "0.50410926", "0.5038987", "0.50318843", "0.5029268", "0.5022561", "0.5006", "0.500384", "0.49974614", "0.49902204", "0.4964841", "0.49567237", "0.49446863", "0.4926346", "0.49247882", "0.49137872", "0.49129844", "0.48921552", "0.48891822", "0.48887885", "0.48861507", "0.48801714", "0.48686096", "0.4860006", "0.48590812", "0.48542243", "0.48309356", "0.48297355", "0.48291957", "0.4827213", "0.48251972", "0.4817104", "0.48099947", "0.48099947", "0.4803044", "0.4793987", "0.47894892", "0.4770414", "0.47608024", "0.47409108", "0.47374424", "0.4737293", "0.47148132", "0.47110516", "0.4698558", "0.4690798", "0.46904385", "0.46859393", "0.46770555", "0.46747902", "0.46712944", "0.46699443", "0.4664699", "0.46640956", "0.46636337" ]
0.69821674
0
Retrieve the name of the directory that will store the logfiles. If the SHELLLOGGERDIR environment variable is set, use that. Otherwise, default to ~/.shelllogger
def get_log_dir(): env_var = "SHELLLOGGERDIR" if os.environ.has_key(env_var): return os.environ[env_var] else: return os.path.expanduser('~/.shelllogger')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')", "def log_dir():\r\n if LogOptions._LOG_DIR is None:\r\n LogOptions._LOG_DIR = app.get_options().twitter_common_log_log_dir\r\n return LogOptions._LOG_DIR", "def get_logging_dir(self):\n return self.logging_dir", "def logdir(self) -> str:\n return self._logdir", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"", "def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir", "def get_log_path():\n forch_log_dir = os.getenv('FORCH_LOG_DIR')\n if not forch_log_dir:\n return None\n return os.path.join(forch_log_dir, 'forch.log')", "def logdir(self):\n return osp.join('runs/', self.net_name, '')", "def get_log_path():\n return LOG_PATH", "def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def log_filename():\n # use the env variable if set, or fallback to default\n return os.environ.get('NBAUTOEVAL_LOG') \\\n or os.path.join(os.getenv(\"HOME\"), \".nbautoeval\")", "def logdir(self) -> Path:\n assert (\n self._logdir\n ), \"Log provider has not been tied to a SummaryWriter yet\"\n return self._logdir", "def get_trial_dir() -> str:\n return logging.root._log_dir # type: ignore", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def new_custom_log_dir(self) -> str:", "def get_system_logfile():\n return \"system\" + get_day() + \".log\"", "def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)", "def log_path(self):\n return os.path.join(self._sandbox, 'log')", "def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')", "def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def pytest_logger_logsdir(self, config):", "def logs_directory(self):", "def get_log_folder(cls, test_suite_name):\n if not test_suite_name:\n test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]\n sdk_path = cls.get_sdk_path()\n log_folder = os.path.join(sdk_path, \"TEST_LOGS\",\n test_suite_name +\n time.strftime(\"_%m%d_%H_%M_%S\", time.localtime(LOG_FOLDER_TIMESTAMP)))\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n return log_folder", "def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname", "def get_console_log_filename(self):\n return", "def get_compss_log_dir(self):\n return self.compss_log_dir", "def _get_log_filename(self):\n fnd = self._get_session_dir()\n fn = os.path.join(fnd, '%s.log' % self.timestamp.time_string())\n\n if not os.path.exists(fn):\n with open(fn, 'wt') as log_file:\n log_file.write('Log Created %s by ' % str(datetime.now()))\n log_file.write('%s V%s\\n' % (__PROGRAM_NAME__, __VERSION__))\n\n return fn", "def logger_name( self ):\n return Constants.LogKeys.steps", "def get_runtime_default_log_path(soln_stk, container_config):\n\n cont_info = _get_preconfig_info(soln_stk, container_config)\n return cont_info[RUNTIME_DEFAULT_LOG_KEY]", "def _log_name():\n return os.path.splitext(os.path.basename(__file__))[0]", "def rel_logdir(self) -> Path:\n assert (\n self._logdir\n ), \"Log provider has not been tied to a SummaryWriter yet\"\n return self._rel_logdir", "def setup_log_dir():\n log_dir = get_log_dir()\n if log_dir.endswith('latest'):\n shutil.rmtree(log_dir, ignore_errors=True)\n mkdirs(log_dir)\n return log_dir", "def logname():\n global _basename\n \n parent = os.path.splitext(os.path.basename(wheresdaddy()))[0]\n return '.'.join([_basename, os.path.splitext(os.path.basename(sys.argv[0]))[0], parent])", "def get_log_file(name):\n try:\n log_file = Config.get('logs', 'file.{}'.format(name))\n except configparser.NoSectionError:\n # if the logs section doesn't exist, return the default\n return LOG_FILE\n except configparser.NoOptionError:\n # if the module doesn't have a specific file, check for a global config:\n try:\n log_file = Config.get('logs', 'file')\n except configparser.NoOptionError:\n return LOG_FILE\n return log_file", "def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")", "def get_config_dir():\n return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def get_log_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, _DEFAULT_FILENAME_LOG)", "def _get_log_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".log.txt\")", "def logfile(self):\n return self._get('logfile')", "def configDir():\n return os.path.join(os.environ['HARNESSEDJOBSDIR'], 'config', getSiteName())", "def kstest_logdir(tmpdir, test):\n logfile = test[\"logfile\"]\n for e in logfile.split(os.path.sep):\n if e.startswith(\"kstest-\"):\n return os.path.join(tmpdir, e)\n\n raise RuntimeError(f\"No kstest-* directory found in {logfile}\")", "def get_log_dir(args):\n\n params_str = '_'.join([args.model_type,\n '%d' % args.num_layers,\n '%03d' % args.hidden_layer_size,\n '%02d' % args.batch_size,\n '%.2f' % args.dropout_keep_prob,\n '%.4f' % args.initial_learning_rate])\n\n test_users_str = '_'.join(args.test_users)\n\n return os.path.join(args.data_dir, 'logs', params_str, test_users_str)", "def get_default_config_filename():\n if 'PYWREN_CONFIG_FILE' in os.environ:\n config_filename = os.environ['PYWREN_CONFIG_FILE']\n # FIXME log this\n\n elif os.path.exists(\".pywren_config\"):\n config_filename = os.path.abspath('.pywren_config')\n\n else:\n config_filename = get_default_home_filename()\n\n return config_filename", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def getRootDirectory(self):\n if Globals.WORKFLOWS_BASEDIR[0] == '~':\n return os.path.expanduser(Globals.WORKFLOWS_BASEDIR)\n else:\n return os.path.join('', Globals.WORKFLOWS_BASEDIR)", "def getDefaultFileLocation(self):\n\n label_env = os.getenv('DISPASS_LABELFILE')\n std_env = os.getenv('XDG_DATA_HOME') or os.getenv('APPDATA')\n home_file = '~/.dispass/labels'\n\n if label_env:\n return label_env\n if not exists(home_file) and std_env:\n return std_env + '/dispass/labels'\n else:\n return home_file", "def logger_name(self):\n return self.__class__.__name__", "def get_daemon_storage_dir(cls):\n\n return os.environ[cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY]", "def conf_dir(self):\r\n return self._conf_dir", "def get_config_dir() -> str:\n # Get the system app configuration standard location\n if 'APPDATA' in os.environ:\n return os.environ['APPDATA']\n elif 'XDG_CONFIG_HOME' in os.environ:\n return os.environ['XDG_CONFIG_HOME']\n else:\n return os.path.join(os.environ['HOME'], '.config')", "def _logFile_default(self):\n print \"choosing default log file\"\n return os.path.join(self.rpiADCLogFolder,time.strftime(\"rpiADC-%Y-%m-%d.csv\", self.currentLocalTime))", "def setup_logdir(self, default_logdir: Union[str, Path]) -> Path:\n self._default_logdir = Path(default_logdir)\n\n if self._create_logdir:\n self.logdir_path.mkdir(parents=True, exist_ok=True)\n\n if not self.logdir_path.is_dir():\n raise ValueError(f\"logdir '{self.logdir_path}' must be a directory.\")\n\n return self.logdir_path", "def get_logger():\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(name)s] [%(asctime)s]: %(message)s')\n caller = whoami(offset=1)\n name = os.path.basename(caller)\n logger = logging.getLogger(name)\n return logger", "def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file", "def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])", "def confDir(self):\r\n return self._confDir", "def get_script_folder_name():\n enso_command_path = os.path.expanduser('~/Library/Application Support/enso/commands')\n if (not os.path.isdir(enso_command_path)):\n os.makedirs(enso_command_path)\n return enso_command_path", "def get_cfg_dir():\n if not os.path.exists(CONF_DIR):\n os.mkdir(CONF_DIR)\n return CONF_DIR", "def _get_session_dir(self):\n\n fnd = os.path.join(self.config.capture_dir, self.timestamp.date_string(), self.timestamp.time_string())\n if not os.path.isdir(fnd):\n os.makedirs(fnd)\n\n return fnd", "def getFSUserDir(self):\n if not self.authorised:\n raise AuthError(401,\"I am sorry, but you are not authorised\")\n\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"screenName\"]:\n fsDir = self.config.get(\"FileMan\",\"homedir\") + self.authJson[\"userInfo\"][\"screenName\"]\n return fsDir\n else: \n raise AuthError(500, \"Cannot determine the working directory - Liferay did not provide user's screenName\")", "def getLog():\n # assign a current working directory + '/logs' to log_dir variable (platform independent)\n log_dir = os.path.join(os.getcwd(), \"logs\")\n # or --> script directory: log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"logs\")\n # or --> user directory: log_dir = os.path.join(os.path.expanduser(\"~\"), \"logs\")\n\n try:\n # if logs directory(!) doesn't exist, create it\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n # open log file with prefix and timestamp (platform independent) in Append mode\n log = open(os.path.join(log_dir, \"rfaRunner_\" + getCurTime(\"%Y%m%d_%H-%M\") + \".log\"), \"a\")\n return log\n except (OSError, IOError):\n # return -1 in case of exception\n return -1", "def get_logger():\n return logging.getLogger(__name__)", "def defaultDirectory(self):\n return self.__defaultDirectory", "def get_logger(logger_name):\n logger_path = os.path.join(PATH, 'config', \"logging.conf\")\n if os.path.exists(logger_path):\n logging.config.fileConfig(logger_path)\n logging.info(\"%s started\" % logger_name)\n return logging.getLogger(logger_name)", "def config_logger(log_cfg_file, experiment_name=None, output_dir='logs'):\n timestr = time.strftime(\"%Y.%m.%d-%H%M%S\")\n exp_full_name = timestr if experiment_name is None else experiment_name + '___' + timestr\n logdir = os.path.join(output_dir, exp_full_name)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n log_filename = os.path.join(logdir, exp_full_name + '.log')\n if os.path.isfile(log_cfg_file):\n logging.config.fileConfig(log_cfg_file, defaults={'logfilename': log_filename})\n msglogger = logging.getLogger()\n msglogger.logdir = logdir\n msglogger.log_filename = log_filename\n msglogger.info('Log file for this run: ' + os.path.realpath(log_filename))\n\n # Create a symbollic link to the last log file created (for easier access)\n try:\n os.unlink(\"latest_log_file\")\n except FileNotFoundError:\n pass\n try:\n os.unlink(\"latest_log_dir\")\n except FileNotFoundError:\n pass\n try:\n os.symlink(logdir, \"latest_log_dir\")\n os.symlink(log_filename, \"latest_log_file\")\n except OSError:\n msglogger.debug(\"Failed to create symlinks to latest logs\")\n return msglogger", "def log_name(self) -> Optional[str]:\n return self._log_name", "def get_lensdir():\n if 'LENSDIR' not in os.environ:\n raise ValueError(\"LENSDIR is not set\")\n return os.environ['LENSDIR']", "def get_fifteen_logfile():\n return \"fifteenStrategy\" + get_day() + \".log\"", "def getLogger():\n return GlobalLogger.logger", "def get_log_dir(device: torch.device, lr: float, comment: str) -> Path:\n\n dev_str = str(device).split(\":\")[0]\n tensorboard_suffix = f\"_dev{dev_str}_lr{lr}\" + comment\n current_time = datetime.datetime.now().strftime(\"%b%d_%H-%M-%S\")\n log_dir = TENSORBOARD_DIR / (current_time + \"_\" + tensorboard_suffix)\n return log_dir", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def GetMyHomeDir():\n # type: () -> Optional[str]\n uid = posix.getuid()\n try:\n e = pwd.getpwuid(uid)\n except KeyError:\n return None\n else:\n return e.pw_dir", "def logger(self):\n return logging.getLogger(self.logger_name)", "def config_dir(self) -> str:\n if not self._config_dir:\n self._config_dir = self._detect_config_dir()\n return self._config_dir", "def getFSUserDir(self):\n\n return self.config.get(\"FileMan\",\"homedir\") + self.getRole()[\"roleName\"]", "def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))", "def logpath(self):\n return self.outpath", "def logging_conf_module_path(monkeypatch: pytest.MonkeyPatch) -> str:\n path = \"inboard.logging_conf\"\n monkeypatch.setenv(\"LOGGING_CONF\", path)\n assert os.getenv(\"LOGGING_CONF\") == path\n return path", "def _getSshDir():\n return f'{Path.home()}/.ssh'", "def get_logger(context):\n Log.job_log = logging.getLogger(context)\n return Log.job_log", "def default_configfile():\n dirname=None\n if os.getenv(\"HOME\"):\n dirname=os.getenv(\"HOME\")\n elif os.getenv(\"USERPROFILE\"):\n dirname=os.getenv(\"USERPROFILE\")\n\n else:\n raise FattyException(\"No HOME or USERPROFILE variable set, unable to determine default config file\")\n\n return os.path.join(dirname,\".fattybugs\")", "def outdir_str(d):\n f = folder_str(d)\n logs_dir = os.path.join(f, 'logs')\n try:\n if not os.path.exists(logs_dir):\n os.makedirs(logs_dir)\n except OSError:\n raise argparse.ArgumentTypeError('could not create \"%s\" directory' % logs_dir)\n return f", "def get_log_file_strategy():\n try:\n strategy = Config.get('logs', 'files')\n except (configparser.NoSectionError, configparser.NoOptionError):\n # default to collecting all logs to a single file\n strategy = LOG_FILE_STRATEGY_DEFAULT\n if strategy.lower() not in LOG_FILE_STRATEGIES:\n return LOG_FILE_STRATEGY_DEFAULT\n return strategy.lower()", "def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def GetLogFileForTask(cls, task):\n rel_path = cls.TaskRelativeName(task)\n if not rel_path or not PipelineConfig.Instance().pipeline_log_dir(): return None\n # Flatten the path.\n rel_path = rel_path.replace(os.sep, '.')\n return os.path.join(PipelineConfig.Instance().pipeline_log_dir(), rel_path + '.log')", "def get_workdir() -> str:\n Config.__get()\n assert Config.__config is not None\n return get_abspath(Config.__config.get('wsgi', 'workdir').strip())", "def _locate_config_dir():\n if CONFIG_DIR_ENV in os.environ:\n config_dir = os.environ[CONFIG_DIR_ENV]\n else:\n config_dir = os.path.join(os.environ[\"HOME\"], CONFIG_HOME_DIR)\n return config_dir", "def get_action_logfile():\n return \"action\" + get_day() + \".log\"", "def get_directory() -> str:\n return directory", "def user_conf_dir(self):\n return os.path.join(BaseDirectory.xdg_config_home, \"speech-dispatcher\")", "def get_pipe_configuration_dir(self) -> str:\n ret = os.path.join(self.get_home_dir(), '.fiepipe')\n if not os.path.exists(ret):\n os.makedirs(ret)\n return ret", "def get_working_dir(self):\n return self.role.directory", "def system_conf_dir(self):\n return buildconfig.SPD_CONF_PATH" ]
[ "0.7954862", "0.7615036", "0.75734735", "0.7549197", "0.7508237", "0.74224484", "0.7221798", "0.71972513", "0.7154093", "0.7087117", "0.706262", "0.6988383", "0.6986975", "0.69751024", "0.68904305", "0.6860191", "0.6823196", "0.67315704", "0.6727358", "0.6695155", "0.6671013", "0.6646318", "0.6557566", "0.64734256", "0.64678323", "0.64372915", "0.6411352", "0.63782746", "0.633779", "0.6335772", "0.63105214", "0.62447596", "0.61588216", "0.6145725", "0.6145567", "0.6133527", "0.61263835", "0.6105772", "0.610018", "0.6057498", "0.60473126", "0.6018222", "0.6013474", "0.6002729", "0.59807897", "0.59803516", "0.5979332", "0.59676695", "0.59633136", "0.5929581", "0.5926153", "0.5913507", "0.5885645", "0.58814836", "0.585753", "0.5843798", "0.58297783", "0.5824241", "0.5809809", "0.5807074", "0.57865304", "0.57842535", "0.5769931", "0.5753657", "0.57356536", "0.5726738", "0.5711401", "0.57051885", "0.5704977", "0.5703559", "0.5702786", "0.56941915", "0.5681224", "0.56751674", "0.5672205", "0.56706566", "0.5670394", "0.5662711", "0.56534576", "0.5647465", "0.5645722", "0.5636583", "0.5628833", "0.56240076", "0.56205034", "0.56185365", "0.56163204", "0.5613982", "0.5606891", "0.56061757", "0.56048995", "0.55952215", "0.5589158", "0.5588376", "0.558562", "0.55816996", "0.5568006", "0.55651575", "0.55650395", "0.5564499" ]
0.88646066
0
Call when session is complete. Returns the name of the XML file
def done(self): self.logfile.write("]]></result>\n</cli-logger-entry>\n</cli-logger>\n") self.logfile.close() if self.debugfilename is not None: self.debugfile.write("</cli-debug>") return self.raw_to_xml()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()", "def get_filename(self) -> str:\n\t\treturn self.xml_name", "def get_xml_file_path(data_file_path: str):\n session_path = Path(data_file_path).parent\n session_id = session_path.stem\n return str((session_path / f\"{session_id}.xml\").absolute())", "def save_complete(self):\n self.save_assets(reset_html=False)\n # new_file(self.url_obj.file_path, content=tostring(self.lxml, encoding=self.encoding))\n self.lxml.getroottree().write(self.url_obj.file_path, method=\"html\")\n\n self._lxml = None # reset the tree", "def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))", "def save_xml(self, file_path: Path = None) -> bool:\n if not file_path and not self.session.file:\n logger.error(\"trying to save xml for session with no file\")\n return False\n if not file_path:\n file_path = self.session.file\n result = False\n try:\n if not self.is_runtime():\n logger.debug(\"sending session data to the daemon\")\n result, exceptions = self.start_session(definition=True)\n if not result:\n message = \"\\n\".join(exceptions)\n self.app.show_exception_data(\n \"Session Definition Exception\",\n \"Failed to define session\",\n message,\n )\n self.client.save_xml(self.session.id, str(file_path))\n if self.session.file != file_path:\n self.session.file = file_path\n self.update_session_title()\n logger.info(\"saved xml file %s\", file_path)\n result = True\n except grpc.RpcError as e:\n self.app.show_grpc_exception(\"Save XML Error\", e)\n return result", "def openXMLSession(self, filename):\r\n #doc = xml.dom.minidom.parse(filename)\r\n handle = open(filename)\r\n xmlStr = handle.read()\r\n \r\n spaceGroupInt, a, b, c, alpha, beta, gamma, Na, Nb, Nc, Na, Nb, Nc, atomData = self.loadXMLStr(xmlStr, notifyGUI = True)\r\n \r\n #Send Message to GUI\r\n send(signal = \"File Load\", sender = \"Session\", spaceGroup = spaceGroupInt, a = a, b = b, c = c, alpha = alpha, beta = beta, gamma = gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc)\n send(signal = \"Generate Bonds\", sender = \"Session\", event=None)", "def on_session_finish(context):\n pass", "def completed_file(self, context):", "def __createXMLFileForClear():\r\n #description\r\n #Root\r\n clear_root = Element('clear-users-request', {'xmlns':SYMPLECTIC_XMLNS_URI,} )\r\n #Feed\r\n SubElement(clear_root, 'feed-id').text = IMPORT_USERS_FEED_ID\r\n #Convert to ElementTree and write xml version to file\r\n xml_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_USER_FOLDER + SYMPLECTIC_LOCAL_USER_CLEARFILE\r\n ElementTree(clear_root).write(xml_filename)\r\n #Return xml filename\r\n return xml_filename", "def get_document(self):\n\t\tif(self.fs.tmp_dir):\n\t\t\tfull_filename = self.fs.tmp_dir + os.sep + self.fs.get_document()\n\t\telse:\n\t\t\tfull_filename = self.fs.get_document()\n\t\t\n\t\treturn full_filename", "def open_save_filename(self, startpath=expanduser(\"~\")):\n xml_filepath = QFileDialog.getSaveFileName(\n parent=self.main_view,\n caption=\"Select XML file for saving\",\n directory=startpath,\n filter=\"XML files (*.xml)\",\n options=QFileDialog.DontUseNativeDialog)\n return xml_filepath", "def fetch_current_xml(self):\n return self.android_device_driver.fetch_current_xml()", "def on_session_ended():\n #print(\"on_session_ended\")", "def on_session_ended():\n #print(\"on_session_ended\")", "def _finished_fired(self):\n\n csp = self.pages[0]\n fwp = self.pages[1]\n\n file = File(fwp.abs_path)\n file.create_file()\n\n # Refresh the workspace tree view\n view = self.window.get_view_by_id(RESOURCE_VIEW)\n if view is not None:\n # FIXME: Refresh the parent folder, not the whole tree.\n workspace = self.window.application.get_service(IWorkspace)\n wtv = view.tree_viewer.refresh(workspace)", "def sessionEnded(self):\r\n if self.sessionStarted == True: \r\n self.sessionCompleted = True", "def save_session():\n\n filename = request.json.get(\"path\")\n finished = request.json.get(\"finished\")\n config = request.json.get(\"config\")\n\n success = engine.io.save(filename, state.proc, state.corpus, state.test_corpus, state.classifier, state.last_result, finished, config)\n\n if success:\n return jsonify({\"saved\":True})\n else:\n return 'Could not save session file.', 428", "def success_callback(self):\n temp_schedule = self.run_dir / \"SLABSurfaceTemps.txt\"\n if temp_schedule.exists():\n with open(self.idf.idfname, \"a\") as outfile:\n with open(temp_schedule) as infile:\n next(infile) # Skipping first line\n next(infile) # Skipping second line\n for line in infile:\n outfile.write(line)\n # invalidate attributes dependant on idfname, since it has changed\n self.idf._reset_dependant_vars(\"idfname\")\n self.cleanup_callback()", "def grab_saved(self):\r\n filezilla = os.path.join(os.getenv(\"APPDATA\"), \"FileZilla\")\r\n if os.path.exists(filezilla):\r\n saved_pass_file = os.path.join(filezilla, \"recentservers.xml\")\r\n if os.path.exists(saved_pass_file):\r\n xml_tree = ET.parse(saved_pass_file).getroot()\r\n if xml_tree.findall('RecentServers/Server'):\r\n servers = xml_tree.findall('RecentServers/Server')\r\n else:\r\n servers = xml_tree.findall('Servers/Server')\r\n \r\n for server in servers:\r\n host = server.find('Host')\r\n port = server.find('Port')\r\n user = server.find('User')\r\n password = server.find('Pass')\r\n self.saved += \"==== %s ====\\nHOST: %s\\nPORT: %s\\nUSER: %s\\nPASS: %s\\n\" % (host.text, host.text, port.text, user.text, base64.b64decode(password.text).decode())", "def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()", "def _savefilename(self):\n logger.debug(\"Popping SaveFilename browser\")\n return filedialog.asksaveasfilename(**self._kwargs)", "def xml_report_path(self, targets, conf):\r\n org, name = self.identify(targets)\r\n cachedir = Bootstrapper.instance().ivy_cache_dir\r\n return os.path.join(cachedir, '%s-%s-%s.xml' % (org, name, conf))", "def new_session_loaded(self):\n session = self.parent.session\n if session is None: return None\n #logger.debug(\"LOADING NEW SESSION\")\n self.figure.new_session(session)\n self.refresh_table()\n self.summarize_current_table()\n self.refresh_plots()\n self.update_fitting_options()\n return None", "def on_session_ended(session_ended_request, session):", "def save_session(self):\n filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))\n filehandler = open(filename, \"wb\")\n # Do not save the loaded LFP matrices since they are too big\n temp = self\n temp.probes = dict.fromkeys(temp.probes.keys())\n temp.loaded_cond = None\n temp.layer_selected = False\n cPickle.dump(temp.__dict__, filehandler)\n filehandler.close()\n return filename", "def OnFileSaveAs(self):\n global outputPDFName\n ret = False\n dlg = wx.FileDialog(self, \"Save As\", \"\", \"\",\n \"PDF Files (*.pdf)|*.pdf|All Files|*.*\", wx.SAVE)\n if (dlg.ShowModal() == wx.ID_OK):\n fileName = dlg.GetFilename()\n dirName = dlg.GetDirectory()\n outputPDFName = dirName+\"\\\\\"+fileName\n ret = True\n dlg.Destroy()\n return ret", "def pytest_sessionfinish(self, session):\n _buildname = self.buildname()\n if _buildname is None:\n if not self._init_session:\n self._sessionstart(session)\n\n if self.post_queue:\n if 'sanity' in self._opts.markexpr:\n self._send_post_queue(session, sanity=True)\n else:\n self._send_post_queue(session)\n self.class_logger.warning(\"Cannot determinate buildname. Probably test setup is failed. Skipping report.close step.\")\n self.server_cmd(\"close\", [self.self_name])", "def save(self, filename=None):\n f = filename if filename else self.path\n etree.register_namespace('', TEI)\n etree.register_namespace('mith', MITH)\n self.doc.write(f, xml_declaration=True, encoding='utf-8', method='xml')", "def completed(self):\r\n open_(self._has_completed_path, \"w+\").close()", "def export_file(self):\n\n if not self.session_filename:\n return\n\n data = {\n \"session_filename\": self.session_filename,\n \"index_start\": self.total_mutant_index,\n \"sleep_time\": self.sleep_time,\n \"restart_sleep_time\": self.restart_sleep_time,\n \"restart_interval\": self.restart_interval,\n \"web_port\": self.web_port,\n \"crash_threshold\": self._crash_threshold_node,\n \"total_num_mutations\": self.total_num_mutations,\n \"total_mutant_index\": self.total_mutant_index,\n \"netmon_results\": self.netmon_results,\n \"procmon_results\": self.procmon_results,\n \"is_paused\": self.is_paused\n }\n\n fh = open(self.session_filename, \"wb+\")\n fh.write(zlib.compress(cPickle.dumps(data, protocol=2)))\n fh.close()", "def SaveXMLToDB(xmlFileName):", "def startNewFileTransmitSession(self, data):\n return self.session.request('exchangedll/newsession/', 'POST',\n self.getXML(data, 'fileTransmitRequest'))", "def load_last_session_id(cfg: Config) -> Union[str, None]:\n last_session_file = cfg.project.data_dir / LAST_SESSION_FILENAME\n if not last_session_file.exists():\n return None\n with open(last_session_file, \"r\") as f:\n return f.read()", "def parsing_xml(self, path, filemoving):\n name = str(filemoving.listoffiles(path)[0])\n tree = ET.parse(os.path.join(path, name))\n root = tree.getroot()\n return root", "def open_load_filename(self, startpath=expanduser(\"~\")):\n xml_filepath = QFileDialog.getOpenFileName(\n parent=self.main_view,\n caption=\"Select XML file for loading\",\n directory=startpath,\n filter=\"XML files (*.xml)\",\n options=QFileDialog.DontUseNativeDialog)\n return xml_filepath", "def xml_path(self):\n return self.__xml_path", "def afterEndElement(self, name):\n pass", "def action_done(self):\n root = self.generate_txt()\n self._write_attachment(root)\n self.write({'state': 'done'})\n\n return True", "def load_game_session(self, game_name, session_name):\n self.file_path = os.path.join(self.file_directory % game_name, '%s.%s' % (session_name, self.file_extension))\n self.file_path = os.path.expanduser(self.file_path)\n self.load()", "def process_xml(self):\n self.process_gpx_file(str(self.filename))", "def load_xml_startup_script(name):\n\n for dir in (\"%sshare/gps/support/core/\" % GPS.get_system_dir(),\n \"%sshare/gps/support/ui/\" % GPS.get_system_dir(),\n \"%sshare/gps/library/\" % GPS.get_system_dir(),\n \"%sshare/gps/plug-ins/\" % GPS.get_system_dir()):\n\n try:\n f = file(\"%s%s\" % (dir, name)).read()\n break\n except:\n f = None\n\n GPS.parse_xml(f)\n process_all_events()", "def saveFile(self, filename):\n ret = libxml2mod.xmlSaveFile(filename, self._o)\n return ret", "def create_output_loc(self):\n self.output_name = [self.args.xml_out, 'gatk4_' + self.json_file['name'].lower().split(' ')[0] + '.xml']\n if not self.args.xml_out.endswith('/'):\n return '/'.join(self.output_name)\n else:\n return ''.join(self.output_name)", "def generate_file_name(self):\n self._session_iterator = None # New file invalidate old interator\n self._img_count += 1\n self._current_file = '{0}/frame_{1}.jpg'.format(self._relative_path,self._img_count)\n return self.current_file", "def ReturnXmlElementName(self) -> str:", "def get_current_file(self):\n#-----------on attend la fin de creation du fichier Nexus\n \n while self._ismoving():\n self.logger.debug(\"DataRecorder creat Nexus file\") \n time.sleep(1.0)\n return self.dp.currentFiles[0]", "def finish(self):\r\n\r\n self.text += \"</html>\\n\"\r\n\r\n if self.filename != None:\r\n with open(self.filename, \"w\") as f:\r\n f.write(self.text)\r\n\r\n return self.text", "def saveState(self):\n e = xml.Element(self.type)\n e.attrib['lastUpdate'] = str(clock.now())\n e.attrib['name'] = self.name\n #e.attrib['status'] = ('true' if self.status else 'false')\n return e", "def parse_xmls(user, application, complete_path, init_es, tool, scan_name, user_host, to_name):\n process_files(user, application, complete_path, init_es, tool, scan_name, user_host, to_name)\n info_debug_log(event='Parse xmls',status='success')", "def get_file_inter_name(self):\n\t\tf = tempfile.NamedTemporaryFile(encoding='utf-8',mode='r',delete=False)\n\t\tf.close()\n\t\treturn f.name", "def save_httpd_session(self):\n # EXPLANATION:\n # The Dropbox redirect flow generates a token during the start() method,\n # which you must supply when calling the finish() method to prevent CSRF attacks\n # @see https://www.dropbox.com/developers/core/docs/python#DropboxOAuth2Flow\n # EXERCISE:\n # - save self.httpd_session to session data file self.HTTPD_SESSION_FILE\n # @see http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python\n# TODO ==> INSERT CODE HERE <==\n\n logger.debug('saved HTTPD session data \"{httpd_session}\" in file \"{session_file}\"'.format(\n httpd_session=str(self.httpd_session), session_file=self.HTTPD_SESSION_FILE))", "def save_config_xml(self, fileName: str):\n self._sim.saveConfigXML(fileName)", "def write_xml(self, xmlfile):\n system.xml.write_file(xmlfile, self.status, 'status')", "def _readMoreXML(self,xmlNode):\n self._localReadMoreXML(xmlNode)", "def _readMoreXML(self,xmlNode):\n pass", "def save_xml(tree, file_name, folder_name):\r\n import os # ändrar plats för filer\r\n os.chdir(folder_name)\r\n tree.write(file_name) # Namnet på ny fil\r", "def save_xml_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n step = len(column_names)\n\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"XML\", \"*.xml\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.xml')\n data = import_lst\n\n if len(data[0]) == step:\n pass\n else:\n data = import_lst[step::]\n\n data2 = list(map(list, zip(*data)))\n\n data3 = {key: value for key, value in zip(column_names, data2)}\n\n column = list(data3.keys())\n\n df = pd.DataFrame(data3, columns=column)\n\n data_dict = df.to_dict(orient=\"records\")\n with open('output.json', \"w+\") as f:\n json.dump(data_dict, f, indent=4)\n\n xml_data = dicttoxml(data_dict).decode()\n with open(save_name, \"w+\") as f:\n f.write(xml_data)\n\n data.clear()\n data2.clear()\n data3.clear()", "def saveXML(self, filename):\n root = ET.Element('root')\n pklot = ET.SubElement(root, \"ParkingLot\")\n\n lotname = ET.SubElement(pklot, \"LotName\", name=str(self.name))\n idCounter = ET.SubElement(pklot, \"NextAvailableID\", counter=str(self.spotIDCounter))\n for spot in self.parkingSpots:\n ET.SubElement(pklot, 'Spot', id=str(spot.id), location=' '.join(str(x) for x in spot.location))\n\n tree = ET.ElementTree(root)\n tree.write(filename)", "def notify_file_transfer_completed(self):\n self.presentation.load() if len(self.presentation.presentation_elements) == 0 else self.presentation.reload()", "def get_req_file_name(self):\n # create the req xml file first\n filename = self.get_test_file_path() + self.test_name \\\n + \"_\" + str(self.get_sandesh_req_num())\n req_filename = filename + \"_req.xml\"\n return req_filename", "def LoadXML(NAME):\r\n # Basics snaged from https://docs.python.org/2/library/xml.etree.elementtree.html\r\n Tree = parse(NAME) # opens and turns the xml file into a tree\r\n Root = Tree.getroot()\r\n return(Root)", "def onfinish( request ):", "def onfinish( request ):", "def _WriteSessionCompletion(self, session_completion):\n if self.storage_type != definitions.STORAGE_TYPE_SESSION:\n raise IOError('Session completion not supported by storage type.')\n\n stream_name = 'session_completion.{0:06d}'.format(self._last_session)\n if self._HasStream(stream_name):\n raise IOError('Session completion: {0:06d} already exists.'.format(\n self._last_session))\n\n session_completion_data = self._SerializeAttributeContainer(\n session_completion)\n\n data_stream = _SerializedDataStream(\n self._zipfile, self._temporary_path, stream_name)\n data_stream.WriteInitialize()\n data_stream.WriteEntry(session_completion_data)\n data_stream.WriteFinalize()", "def getrawxml(fp,fn):\n print(\"starting to get the NRE XML Data from historical file\")\n infile = open(fp+fn,\"r\",encoding=\"utf-8\")\n xml_file = infile.read()\n return xml_file", "def _open_changed ( self ):\n file_name = open_file( extensions = FileInfo(), id = demo_id )\n if file_name != '':\n self.file_name = file_name", "def saveFileTo(self, cur, encoding):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n ret = libxml2mod.xmlSaveFileTo(self._o, cur__o, encoding)\n return ret", "def _onEnd(self, name, completed):\n logging.debug(\"onEnd...\")", "def askOutputFile():\n while True:\n print(\"Save the final file\")\n # Try until the final file is saved.\n try:\n fileName = easygui.filesavebox(\"Save your file\",\n \"Save the file\",\n default=\"C:\\\\DefaultFile.txt\",\n filetypes=[\"*.txt\"])\n if fileName == None:\n raise\n except:\n pass\n else:\n return fileName", "def load(self):\n\n pacemaker_xml_filename = ('/tmp/pacemaker_%s.xml'\n % str(uuid.uuid4()))\n\n try:\n if not os.path.exists('/usr/sbin/cibadmin'):\n return\n\n os.system(\"/usr/sbin/cibadmin --query > %s\"\n % pacemaker_xml_filename)\n\n if not os.path.exists(pacemaker_xml_filename):\n return\n\n self._xmldoc = etree.parse(pacemaker_xml_filename)\n if self._xmldoc is None:\n os.remove(pacemaker_xml_filename)\n return\n\n if not etree.iselement(self._xmldoc.getroot()):\n self._xmldoc = None\n os.remove(pacemaker_xml_filename)\n return\n\n if len(self._xmldoc.getroot()) == 0:\n self._xmldoc = None\n os.remove(pacemaker_xml_filename)\n return\n\n os.remove(pacemaker_xml_filename)\n\n except Exception:\n if os.path.exists(pacemaker_xml_filename):\n os.remove(pacemaker_xml_filename)\n\n LOG.error(\"error:\", sys.exc_info()[0])", "def finished(self):\n\t\telog(\"finished\")", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def Save_Current_Profile(self):\r\n #name = tkFileDialog.asksaveasfilename()\r\n #if( name == \"\" ):\r\n # return\r\n #self.system.Save_Current_Profile(name)\r\n self.system.Save_Current_Profile()", "def _get_del_file(self):\r\n loc = os.path.dirname(__file__)\r\n del_file = os.path.join(loc, 'newdelicious.xml')\r\n return open(del_file)", "def refresh_session_data (self, account):\n # load the profiles page (to verify the user)\n response = self._session_get(component='profiles')\n # parse out the needed inline information\n only_script_tags = SoupStrainer('script')\n page_soup = BeautifulSoup(response.text, 'html.parser', parse_only=only_script_tags)\n page_data = self._parse_page_contents(page_soup=page_soup)\n account_hash = self._generate_account_hash(account=account)\n self._save_data(filename=self.data_path + '_' + account_hash)", "def pytest_sessionfinish(session, exitstatus):\n if not updated_files:\n return\n print('\\n\\nInomaly Complete, Updated these files\\n')\n for filepath in updated_files:\n print('\\t%s' % filepath)", "def filename(self):\r\n\t\treturn None", "def main(*args):\r\n print(START_MESSAGE)\r\n print(\"Script Location:\", location)\r\n print(\"Arguments Passed:\", args)\r\n\r\n root = ET.parse(xmlfile).getroot()\r\n keys = []\r\n out = \"\"\r\n\r\n for child in root[1]:\r\n out += child.attrib['Name'] + \";\" + child[0].text + \"\\n\"\r\n\r\n with open(outputfile, 'w') as f:\r\n f.write(out)", "def read(self) -> str:\n\t\treturn self._readXMLfragment()", "def output_file(self, filename, title=\"Bokeh Plot\", autosave=False, mode=\"cdn\", root_dir=None):\n self._file = {\n 'filename' : filename,\n 'resources' : Resources(mode=mode, root_dir=root_dir),\n 'title' : title\n }\n self._autosave = autosave\n\n if os.path.isfile(filename):\n logger.info(\"Session output file '%s' already exists, will be overwritten.\" % filename)", "def fname(self):\n return self._fname", "def _on_load_finished(self):\n loaded = self._web_view.view_state == ViewState.GraphLoaded\n self.loadFinished.emit(loaded)", "def run(self):\n \n while globalvars.ALIVE:\n self.server.handle_request()\n print \"End of Xml connection...\"", "def _tmp(self):\n tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',\n suffix='.out',\n delete=False)\n return tmpfn.name", "def get_name(self):\n\t\troot = self.get_xml()\n\t\treturn root.find(\"name\").text", "def fileIsComplete(self):\n return True", "def done(self):\n self.add_report(self.doc)\n self.timestamp(\"done\")", "def _filename(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilename(**self._kwargs)", "def _get_seq_filename(self):\n fnd = self._get_session_dir()\n self.seq_number += 1\n fn = os.path.join(fnd, 'S%4.4d.tif' % self.seq_number)\n return fn", "def Save_xml(self, accounts):\n try:\n\n self.extension = \".xml\"\n\n colors.info(\"Saving as XML in {}{}\".format(self.file, self.extension))\n\n Main = ET.Element(\"SpotCheck\")\n\n SpotifyFree = ET.SubElement(Main, 'SpotifyFree')\n SpotifyPremium = ET.SubElement(Main, 'SpotifyPremium')\n PremiumFamily = ET.SubElement(Main, 'PremiumFamily')\n AdminPremiumFamily = ET.SubElement(Main, 'AdminPremiumFamily')\n BadAccounts = ET.SubElement(Main, 'BadAccounts')\n\n for account in accounts:\n if account.get(\"account_login\") == \"error\":\n temp = ET.SubElement(BadAccounts, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n else:\n if account.get(\"AccountType\") == \"Spotify Free\":\n temp = ET.SubElement(SpotifyFree, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Spotify Premium\":\n temp = ET.SubElement(SpotifyPremium, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Premium Family\":\n if account.get(\"Admin\"):\n temp = ET.SubElement(AdminPremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n else:\n temp = ET.SubElement(PremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n XML = ET.tostring(Main)\n with open(self.file + self.extension, \"w\") as output_:\n output_.write(XML)\n colors.correct(\"Done! All saved successfully\")\n except Exception as e:\n colors.error(str(e))\n _exit(1)", "def modpricesetter_get_file_name(self):\r\n year, month, day = self._get_market_year_month_day_as_str()\r\n interval_number = self._get_interval_number_as_str()\r\n base_name = \"NEMPriceSetter_{year}{month}{day}{interval_number}00.xml\"\r\n name = base_name.format(\r\n year=year, month=month, day=day, interval_number=interval_number\r\n )\r\n path_name = Path(self.cache_folder) / name\r\n name_OCD = name.replace(\".xml\", \"_OCD.xml\")\r\n path_name_OCD = Path(self.cache_folder) / name_OCD\r\n name_zero = name.replace(\".xml\", \"00.xml\")\r\n path_name_zero = Path(self.cache_folder) / name_zero\r\n if os.path.exists(path_name):\r\n return name\r\n elif os.path.exists(path_name_OCD):\r\n return name_OCD\r\n elif os.path.exists(path_name_zero):\r\n return name_zero\r\n else:\r\n return name", "def getCurrentFileName(self):\n return os.path.basename(self.filePath)", "def handle_finished (self):\n\n print self.in_headers\n print self.in_cookies\n print self.content_type\n print self.content_encoding\n print self.response_code\n print self.is_allowing_persistence\n print self.content", "def load_xml_files_erisk(local_dir, token_position=0):\n users = {}\n prep = Preprocessor()\n c = 0\n for dir_path, dir_names, filenames in os.walk(local_dir):\n for name in filenames:\n tok = name.split(\"_\")\n if token_position > 0:\n key = tok[0] + tok[token_position]\n else:\n key = tok[token_position]\n key = key.strip(\".xml\")\n full_file = os.path.abspath(os.path.join(dir_path, name))\n dom = ET.parse(full_file, parser=ET.XMLParser(encoding=\"utf-8\"))\n writing = dom.findall('WRITING')\n for w in writing:\n title = w.find('TITLE').text\n text = w.find('TEXT').text\n post = title + \" \" + text\n # preprocess text\n new_text = prep.tokenize_reddit(post)\n\n if key in users.keys():\n users[key] += new_text + ' end_ '\n else:\n users[key] = new_text + ' end_ '\n\n c += 1\n print(\"Preprocessed chunk: \", c)\n\n return users", "def getActiveName(self):\n aw = self.activeWindow()\n if aw:\n return aw.getFileName()\n else:\n return None", "def on_session_started():\n #print(\"on_session_started\")", "def on_session_started():\n #print(\"on_session_started\")", "def save(self, fname=None):\n if not fname:\n fname = self.getname() \n assert(fname != None), 'You must specify a filename to save to'\n if not fname.endswith('.png'):\n fname += '.png'\n try:\n urlretrieve(str(self), fname) \n except IOError, e:\n raise IOError, 'Problem saving chart to file: %s'%e \n return fname", "def apply(self):\r\n\r\n file_name = str(sum([ord(i) for i in self.ssid.get()]))\r\n\r\n def saving_file(xml):\r\n \"\"\" Save user profile in xml format to temp_ dir.\"\"\"\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))\r\n\r\n parse_xml = etree.parse(os.path.dirname(os.path.realpath(__file__)) +\r\n \"/data/sampleProfile.xml\")\r\n\r\n # The below code will parse the sample xml file\r\n # and fill important details entered by the user.\r\n root_tree = parse_xml.getroot()\r\n root_tree[0].text = self.ssid.get()\r\n root_tree[1][0][0].text = self.ssid.get()\r\n root_tree[3].text = self.connection_mode.get().lower()\r\n security = root_tree[4][0]\r\n security[0][0].text = self.authentication.get()\r\n security[0][1].text = self.encryption.get()\r\n if self.authentication.get() != \"open\":\r\n etree.SubElement(security, \"sharedKey\")\r\n etree.SubElement(security[1], \"keyType\").text = \"passPhrase\"\r\n etree.SubElement(security[1], \"protected\").text = \"false\"\r\n etree.SubElement(security[1], \"keyMaterial\").text = self.password.get()\r\n\r\n # Save the xml file\r\n saving_file(root_tree)\r\n\r\n # Add profile to the system.\r\n temp_path = 'netsh wlan add profile filename=\"' + self.app_path + \"\\\\temp_\\\\\"\r\n output_ = subprocess.run(temp_path + file_name + '.xml\"', shell=True,\r\n capture_output=True, text=True)\r\n os.remove(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\")\r\n\r\n # If unable to add profile.\r\n if output_.returncode != 0:\r\n message = \"Sorry, Unable to add profile.\\n(You entered wrong details \" \\\r\n \"or else you don't have admin rights.)\"\r\n image_ = \"error\"\r\n\r\n else:\r\n message = \"Profile added successfully (Please Refresh)\"\r\n image_ = \"warning\"\r\n\r\n MessageBox(self.parent, message, image_)" ]
[ "0.5961969", "0.55481595", "0.5306143", "0.5272178", "0.52593595", "0.52341264", "0.5226595", "0.51102614", "0.5057698", "0.5023233", "0.49440277", "0.4889504", "0.48876208", "0.48515698", "0.48515698", "0.48138982", "0.47858512", "0.47524884", "0.47405386", "0.47252148", "0.4719705", "0.47163856", "0.46782959", "0.4674563", "0.46662143", "0.46541283", "0.4648281", "0.4623243", "0.46005392", "0.45530733", "0.45485717", "0.45403314", "0.45298237", "0.45149583", "0.45055297", "0.4503293", "0.44850054", "0.44838014", "0.448114", "0.44588622", "0.44532108", "0.44440624", "0.44415697", "0.44315794", "0.44314137", "0.44116744", "0.44006327", "0.43782288", "0.4371531", "0.4370749", "0.43635806", "0.4361417", "0.43588728", "0.4355967", "0.43475586", "0.43467805", "0.4338726", "0.43361473", "0.43360558", "0.43339446", "0.43258914", "0.4319696", "0.43163663", "0.43163663", "0.43086505", "0.430194", "0.4301167", "0.43001166", "0.42968032", "0.4288983", "0.42871624", "0.4282351", "0.42807347", "0.4277238", "0.42703176", "0.4248686", "0.4241031", "0.42391473", "0.42366818", "0.42347333", "0.4234178", "0.42313415", "0.42245397", "0.42171475", "0.42163563", "0.4214365", "0.42070478", "0.4202139", "0.4189861", "0.41789237", "0.41769746", "0.4172912", "0.41716874", "0.4169116", "0.41666725", "0.41642255", "0.41632202", "0.41632202", "0.41621828", "0.41619593" ]
0.49102354
11
Convert the .raw file, with illegal characters and escape keys, to a proper XML version. Returns the name of the XML file
def raw_to_xml(self): xmlfilename = self.logfilename.replace('.raw','.xml') fout = codecs.open(xmlfilename, encoding="utf-8", mode="w") for line in codecs.open(self.logfilename,encoding="utf-8"): fout.write(sanitize(line)) fout.close() return xmlfilename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sanitizeXML(filename):\n #we have to remove all illegal characters from crossref xml\n full_path = os.path.abspath(filename)\n path, filename = os.path.split(full_path)\n with open(full_path, 'r') as in_file:\n with open(os.path.join(path,\"tmp\"+filename), 'w') as out_file:\n for line in in_file:\n out_file.write(line.replace(r'&', r'&amp;'))\n os.remove(full_path)\n os.rename(os.path.join(path, \"tmp\"+filename), os.path.join(path, filename))\n \n return full_path", "def beautify_xml(XML):\n # convert XML file to modifiable string to beautify it\n text_string = ET.tostring(XML, encoding='UTF-8', method='xml')\n \n # insert line breaks before end of file tag\n file_string = text_string.replace('</aardvark>', '\\n\\n</aardvark>')\n \n # insert double new line before comments to create\n # blocks for each command\n file_string = file_string.replace('<!', '\\n\\n<!')\n \n # insert new line between each set of XML tags\n file_string = file_string.replace('><', '>\\n\\t<')\n \n # remove header\n # file_string = file_string.replace('<?xml version=\\'1.0\\' encoding=\\'utf8\\'?>\\n', '') \n \n return file_string", "def _make_string(self, filename):\n\n if not os.path.isfile(filename):\n str = \"ERROR: Could not find specified XML file %s.\" % filename\n PRINT.info(str)\n raise OSError(str)\n\n return open(filename).read()", "def _clean_xml(raw: str) -> str:\n a = raw.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n no_encoding = _strip_encoding(a)\n no_ns = _strip_namespace(no_encoding)\n return no_ns", "def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))", "def source_xml_file(tmpdir):\n xml_input = tmpdir.mkdir('sub').join('trades_raw.xml')\n xml_input.write(\n '''\n <Trades>\n <Trade CorrelationId=\"701\" NumberOfTrades=\"1\" Limit=\"1000\" TradeID=\"A1\">700</Trade>\n <Trade CorrelationId=\"002\" NumberOfTrades=\"1\" Limit=\"1000\" TradeID=\"B2\">1170</Trade>\n <Trade CorrelationId=\"103\" NumberOfTrades=\"2\" Limit=\"500\" TradeID=\"C3\">200</Trade>\n </Trades>\n '''\n )\n return str(xml_input)", "def example_xml40(example_xml_file40):\n return etree.fromstring(example_xml_file40.encode('utf-8'))", "def fix_xml_encoding(self, file_path):\n\n with open(file_path + self.infile, 'rb') as original:\n with open(file_path + \"Temp File.txt\", 'wb') as temp:\n [temp.write(row.replace(\"utf-16\", \"utf-8\")) for row in original]\n\n os.remove(file_path + self.infile)\n\n with open(file_path + \"Temp File.txt\", 'rb') as temp:\n with open(file_path + self.infile, 'wb') as new:\n [new.write(row) for row in temp]\n\n os.remove(file_path + \"Temp File.txt\")", "def meta2xml(meta, filename):\n\n # this is stupid, just use dict2xml\n xml = dict2xml(meta)\n with open(filename, 'w+') as output:\n output.write(xml)", "def test_utf8_xml_from_xml_file(self):\n # 'Россия' is 'Russia' in Cyrillic, not that it matters.\n xml = u\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <country>Россия</country>\"\"\"\n with tempfile.NamedTemporaryFile(suffix=\".xml\") as xmlfile:\n xmlfile.write(xml.encode('utf-8'))\n xmlfile.flush()\n\n j2k = glymur.Jp2k(self.j2kfile)\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as jfile:\n jp2 = j2k.wrap(jfile.name)\n xmlbox = glymur.jp2box.XMLBox(filename=xmlfile.name)\n jp2.append(xmlbox)\n\n box_xml = jp2.box[-1].xml.getroot()\n box_xml_str = ET.tostring(box_xml,\n encoding='utf-8').decode('utf-8')\n self.assertEqual(box_xml_str,\n u'<country>Россия</country>')", "def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))", "def creer_fichier(nom_file):\n fichier = open(nom_file, 'w')\n fichier.write(\"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\\n\")\n fichier.close()", "def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()", "def ler_arquivo_xml(self, diretorio):\r\n with open(diretorio, 'r') as fxml:\r\n\t strfx = fxml.readlines()\r\n\t string = \"\".join(strfx).replace(\"&\",\" e \")\r\n return string", "def example_xml42(example_xml_file41):\n return etree.fromstring(example_xml_file42.encode('utf-8'))", "def example_xml(example_xml_file):\n return etree.fromstring(example_xml_file.encode('utf-8'))", "def cleanXMLfromSpecialChars(self,line):\n return str(line).replace(\"&\", \"&amp;\").replace(\"\\\"\",\"&quot;\").replace(\"<\",\"&lt;\").replace(\">\",\"&gt;\").replace(\"'\",\"&apos;\")", "def recipe12_3():\n from xml.sax.handler import ContentHandler\n import xml.sax\n\n class textHandler(ContentHandler):\n def characters(self,ch):\n sys.stdout.write(ch.encode(\"Latin-1\"))\n parser=xml.sax.make_parser()\n handler=textHandler()\n parser.setContentHandler(handler)\n parser.parse(\"sample.xml\")", "def example_xml41(example_xml_file41):\n return etree.fromstring(example_xml_file41.encode('utf-8'))", "def file_to_bow(filename,\n raw=True,\n exc_start=None,\n exc_end=None,\n no_http=False):\n with open(filename, 'r') as f:\n file_text = read_doc(f)\n file_text = file_text.lower()\n if not raw:\n new_text = ''\n # Do word-by-word processing of the text.\n # maybe add html stripping\n word_list = file_text.split()\n\n for word in word_list:\n word = word.strip()\n word = word.replace('\\xef\\xbb\\xbf', '')\n word = hparser.unescape(word)\n # word = unicodedata.normalize('NFKD', unicode(word))\n # Check for optional exclude delineators.\n if exc_start and exc_end:\n # word = word.encode('utf-8')\n if word.startswith(exc_start) and \\\n word.endswith(exc_end):\n continue\n\n word = word.replace(\"'s\", \"\")\n # Check if we're excluding http:// addresses\n if no_http and word.startswith('http://'):\n continue\n # Now strip punctuation\n word = word.strip(string.punctuation)\n if word == '' or \\\n word.isdigit():\n continue\n new_text += word\n new_text += ' '\n file_text = new_text\n return file_text", "def getrawxml(fp,fn):\n print(\"starting to get the NRE XML Data from historical file\")\n infile = open(fp+fn,\"r\",encoding=\"utf-8\")\n xml_file = infile.read()\n return xml_file", "def make_xml_filename(file_dir, mri, suffix=\"attributes\"):\n return os.path.join(file_dir, f\"{mri.replace(':', '_')}-{suffix}.xml\")", "def recipe12_4():\n import codecs,encodings\n \"\"\" Caller will hand this library a buffer string, and ask us to convert\n the buffer, or autodetect what codec the buffer probably uses. \"\"\"\n # 'None' stands for a potentially variable byte (\"##\" in the XML spec...)\n autodetect_dict={ # bytepattern : (\"name\",\n (0x00, 0x00, 0xFE, 0xFF) : (\"ucs4_be\"),\n (0xFF, 0xFE, 0x00, 0x00) : (\"ucs4_le\"),\n (0xFE, 0xFF, None, None) : (\"utf_16_be\"),\n (0xFF, 0xFE, None, None) : (\"utf_16_le\"),\n (0x00, 0x3C, 0x00, 0x3F) : (\"utf_16_be\"),\n (0x3C, 0x00, 0x3F, 0x00) : (\"utf_16_le\"),\n (0x3C, 0x3F, 0x78, 0x6D) : (\"utf_8\"),\n (0x4C, 0x6F, 0xA7, 0x94) : (\"EBCDIC\"),\n }\n def autoDetectXMLEncoding(buffer):\n \"\"\"buffer -> encoding_\n The buffer string should be at least four bytes long.\n Returns None if encoding cannot be detected.\n Note than encoding_name might not have an installed\n decoder (e.g., EBCDIC)\n \"\"\"\n # A more efficient implementation would not decode the whole\n # buffer at once, but then we'd have to decode a character at\n # a time looking for the quote character, and that's a pain\n encoding=\"utf_8\" # According to the XML spec, this is the default\n # This code successively tries to refine the default:\n # Whenever it fails to refine, it falls back to\n # the last place encoding was set\n bytes=byte1, byte2, byte3, byte4=map(ord,buffer[0:4])\n enc_info=autodetect_dict.get(bytes,None)\n if not enc_info: # Try autodetection again, removing potentially\n # variable bytes\n bytes=byte1,byte2,None,None\n enc_info=autodetect_dict.get(bytes)\n if enc_info:\n encoding=enc_info # We have a guess...these are\n # the new defaults\n # Try to fidn a more precise encoding using XML declaration\n secret_decoder_ring=codecs.lookup(encoding)[1]\n decoded, length=secret_decoder_ring(buffer)\n first_line=decoded.split(\"\\n\",1)[0]\n if first_line and first_line.startswith(u\"<?xml\"):\n encoding_pos=first_line.find(u\"encoding\")\n if encoding_pos!=-1:\n # Look for double quotes\n quote_pos=first_line.find('\"', encoding_pos)\n if quote_pos==-1: #Look for single quote\n quote_pos=first_line.find(\"'\", encoding_pos)\n if quote_pos>-1:\n quote_char=first_line[quote_pos]\n rest=first_line[quote_pos+1]\n encoding=rest[:rest.find(quote_char)]\n return encoding", "def _normalize_malformed_xml(xml):\n xml = xml.strip()\n if not xml.startswith('<TXT>'):\n xml = '<TXT>' + xml\n if not xml.endswith('</TXT>'):\n xml = xml + '</TXT>'\n return xml", "def _readXMLfragment(self) -> str:\n\t\tself._check()\n\t\tself._raven.reset_output_buffer()\n\t\tself._raven.reset_input_buffer()\n\t\tfirst_char = self._raven.read()\n\t\twhile (first_char == 0 or first_char == b' '):\n\t\t\tfirst_char = self._raven.read()\n\t\tif (first_char != b'<'):\n\t\t\tself._clear()\n\t\t\traise InvalidFormat('INVALID CHAR: {}'.format(first_char))\n\t\tstart_tag = first_char\n\t\tresult = ''\n\t\twhile (start_tag[-1] != 62):\n\t\t\tchar = self._raven.read()\n\t\t\tif len(char) == 0:\n\t\t\t\traise Exception('No data')\n\t\t\tstart_tag += char\n\t\tstart_tag = start_tag.decode('ascii').strip('\\x00')\n\t# Debugging\n\t#\tprint(start_tag)\n\t\tif \"/\" in start_tag:\n\t\t\traise InvalidFormat('\"/\" detected in XML starting tag')\n\t\tresult = start_tag\n\t\tend_tag = start_tag[0] + '/' + start_tag[1:] \n\t\tend_tag_len = len(end_tag)\n\t\twhile (result[-end_tag_len:] != end_tag):\n\t\t\tchar = self._raven.read()\n\t\t\tif len(char) == 0:\n\t\t\t\traise Exception('No data')\n\t\t\tresult += char.decode('ascii')\n\t\ttry:\n\t\t\treturn ET.fromstring(result)\n\t\texcept:\n\t\t\traise InvalidFormat", "def prepare_xml(original_xml, mangled_xml):\n in_handle = open(original_xml)\n footer = \" </BlastOutput_iterations>\\n</BlastOutput>\\n\"\n header = \"\"\n while True:\n line = in_handle.readline()\n if not line:\n #No hits?\n stop_err(\"Problem with XML file?\")\n if line.strip() == \"<Iteration>\":\n break\n header += line\n\n if \"<BlastOutput_program>blastx</BlastOutput_program>\" in header:\n print \"BLASTX output identified\"\n elif \"<BlastOutput_program>blastp</BlastOutput_program>\" in header:\n print \"BLASTP output identified\"\n else:\n in_handle.close()\n stop_err(\"Expect BLASTP or BLASTX output\")\n\n out_handle = open(mangled_xml, \"w\")\n out_handle.write(header)\n out_handle.write(line)\n count = 1\n while True:\n line = in_handle.readline()\n if not line:\n break\n elif line.strip() == \"<Iteration>\":\n #Insert footer/header\n out_handle.write(footer)\n out_handle.write(header)\n count += 1\n out_handle.write(line)\n\n out_handle.close()\n in_handle.close()\n print \"Input has %i queries\" % count", "def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )", "def sanitize_characters(raw_input_file, clean_output_file):\n input_file = codecs.open(raw_input_file, 'r', encoding='ascii', errors='ignore')\n output_file = open(clean_output_file, 'w', encoding='ascii', errors='ignore')\n\n for line in input_file:\n # removes extra newline\n line = line.rstrip('\\n')\n output_file.write(line)", "def get_filename(self) -> str:\n\t\treturn self.xml_name", "def ConvertFileName(cls,infile,band):\r\n try:\r\n import os\r\n except:\r\n raise ImportError(\"Can not find module os\")\r\n try:\r\n base = str.split(infile,\"_metadata.xml\")[0]\r\n print base\r\n ext=\"_band\"+str(band)+\".ntf\"\r\n outfile=base+ext\r\n return outfile\r\n except:\r\n raise ImportError(\"Can not covert file names\")", "def example_xml43(example_xml_file41):\n return etree.fromstring(example_xml_file43.encode('utf-8'))", "def importXml ( r ):\n rawText = r.read ()\n rawText = rawText.strip ()\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, '', rawText )\n xml = ET.fromstring ( text )\n assert str ( type ( xml ) ) == \"<type 'instance'>\"\n return xml", "def open_raw(self, name):\n self._canOperate = False\n self._txt = \"\"\n try:\n with open(name, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f:\n l = line.strip(\"\\n\")\n if l != \"\":\n self._txt += l + \" \"\n else:\n # paragraphing\n self._txt += \"\\n\"\n\n # cut the source into words\n self._words = re.findall(\"[\\w\\dÀÁÂÃÄÅàáâãäåÒÓÔÕÖØòóôõöøÈÉÊËèéêëÇçÌÍÎÏìíîïÙÚÛÜùúûüÿÑñ]+\", self._txt)\n self._length = len(self._words)\n except:\n raise FileNotFound(name)", "def _fn2ascii(self, filename): \n nameBase, ext = Path(Path(filename).basename()).splitext()\n try: nameBase.encode('ascii')\n except UnicodeEncodeError:\n nameBase = nameBase.encode('utf-8').encode('hex')\n try:\n ext = ext.encode('ascii')\n except UnicodeEncodeError:\n ext = ext.encode('utf8').encode('hex')\n return str(nameBase + ext)", "def txt_to_xml(ws2ify_path, stage_dir):\r\n\r\n txt_file = get_file(stage_dir, \".txt\", override=True)\r\n obj_file = get_file(stage_dir, \".obj\", override=True)\r\n keyframe_easing_dict = {\"1\": \"LINEAR\", \"2\": \"EASED\"}\r\n\r\n # If both an obj and txt file exist, user can use ws2ify\r\n if txt_file and obj_file:\r\n\r\n while True:\r\n use_ws2ify = input(\"\\nTXT exists. Use ws2ify? (Y/N) \")\r\n if use_ws2ify:\r\n use_ws2ify = use_ws2ify.upper()[0]\r\n\r\n if use_ws2ify != \"Y\" and use_ws2ify != \"N\":\r\n print(\"\\nInvalid input.\")\r\n elif use_ws2ify == \"N\":\r\n return None\r\n else:\r\n break\r\n\r\n txt_file = get_file(stage_dir, \".txt\", override=False)\r\n obj_file = get_file(stage_dir, \".obj\", override=False)\r\n txt_path = os.path.join(stage_dir, txt_file)\r\n obj_path = os.path.join(stage_dir, obj_file)\r\n\r\n while True:\r\n keyframe_easing = input(\"\\nKeyframe easing = linear(1) or eased(2)?: \")\r\n if keyframe_easing != \"1\" and keyframe_easing != \"2\":\r\n print(\"\\nInvalid input.\")\r\n else:\r\n keyframe_easing = keyframe_easing_dict[keyframe_easing]\r\n break\r\n\r\n xml = \"{}.xml\".format(input(\"\\nOutput xml filename: \"))\r\n xml_path = os.path.join(stage_dir, xml)\r\n\r\n ws2ify_path = os.path.expanduser(ws2ify_path)\r\n os.chdir(ws2ify_path)\r\n subprocess.call([\"python\", \"run.py\", txt_path, obj_path, xml_path, keyframe_easing])\r\n os.chdir(config_writer.tool_path)\r\n\r\n return xml", "def escape_xml_characters(data):\n return (\n str(data)\n .replace(\"&\", \"&amp;\")\n .replace(\"<\", \"&lt;\")\n .replace(\">\", \"&gt;\")\n .replace('\"', \"&quot;\")\n )", "def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()", "def fallback_name(orig_name=None):\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]", "def _safe_file_name(self):\n FMT_STR = \"%s - %s - %s (%d) - %s%s\"\n return cleanse_filename(FMT_STR % (self.track,\n self.artist.replace(\"/\", \"\\\\\"),\n self.album.replace(\"/\", \"\\\\\"),\n self.year,\n self.title.replace(\"/\", \"\\\\\"),\n os.path.splitext(self.file_name)[1]))", "def saveFileEnc(self, filename, encoding):\n ret = libxml2mod.xmlSaveFileEnc(filename, self._o, encoding)\n return ret", "def get_xml_encoding(source):\n with get_xml_iterator(source) as iterator:\n start, tag, data, pos = iterator.next()\n if not start or tag != u'xml':\n raise IOError('Invalid XML file')\n\n return data['encoding']", "def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def update_file(filename, items):\n # TODO: Implement something in the templates to denote whether the value\n # being replaced is an XML attribute or a value. Perhaps move to dyanmic\n # XML tree building rather than string replacement.\n should_escape = filename.endswith('addon.xml')\n\n with open(filename, 'r') as inp:\n text = inp.read()\n\n for key, val in items.items():\n if should_escape:\n val = saxutils.quoteattr(val)\n text = text.replace('{%s}' % key, val)\n output = text\n\n with open(filename, 'w') as out:\n out.write(output)", "def save_xml(self, filename):\n if \".xml\" not in filename:\n filename = filename + \".xml\"\n\n shutil.copyfile(self.env.model_file, filename)", "def write_file(file,dir_name):\n opened_file = open(dir_name + '/%s'%file,'w')\n opened_file.write('<?xml version=\"1.0\"?>\\n')\n return opened_file", "def finishXMLFile(filename):\n\n tagStack = []\n def start(name, attributes): tagStack.append(name)\n def end(name): tagStack.pop()\n\n p = expat.ParserCreate()\n p.StartElementHandler = start\n p.EndElementHandler = end\n\n e = None\n fIn = open(filename,'r+')\n try:\n p.ParseFile(fIn)\n except expat.ExpatError, e:\n pass\n\n if not e: return\n\n fIn.seek(0, 0)\n\n for i in range(e.lineno-1): fIn.readline()\n lastLine = fIn.readline()\n\n fIn.seek(-len(lastLine), 1)\n fIn.truncate()\n\n lastLine = lastLine.rstrip() # for some reason python appends a newline\n if e.message.startswith(\"no element found\"):\n # We're in a text section, carry on\n pass\n elif e.message.startswith(\"unclosed token\"):\n # throw away the final token and finish\n lastLine = lastLine[:e.offset]\n elif e.message.startswith(\"unclosed CDATA section\"):\n lastLine = lastLine + u']]>'\n elif e.message.startswith(\"not well-formed (invalid token)\"):\n # We need to worry about where we are. These\n # are the possibilities\n if (lastLine[-1] == u'/'):\n # We have \"<tagName /\"\n lastLine = lastLine + u'>'\n elif (lastLine[-1] == u'<'):\n # We have simply \"<\"\n lastLine = lastLine[:-1]\n elif (lastLine[-1] == u'<'): \n # We have \"</t\" with offset before the \"<\"\n lastLine = lastLine[:e.offset]\n elif (lastLine[-1] == u'!'):\n # We have \"<!\"\n lastLine = lastLine[:-2]\n elif (lastLine[-1] == u'-'):\n if (lastLine[-3:] == u'<!-'):\n lastLine = lastLine[:-3]\n elif (lastLine[-4:] == u'<!--'):\n lastLine = lastLine[:-4]\n else:\n # We have \"<!-- blah --\"\n lastLine = lastLine + u'>'\n elif (lastLine[-1] == u\"A\"):\n # We have \"<![CDATA\"\n lastLine = lastLine[:-8]\n elif (lastLine[-1] == u\"?\"):\n # We have \"<?\"\n lastLine = lastLine[:-2]\n\n fIn.write(lastLine)\n\n tagStack.reverse()\n for tag in tagStack:\n fIn.write(\"</\"+tag+\">\")\n fIn.close()", "def read_file(input_file):\n\n\ttext = open(input_file)\n\traw = text.read()\n#\tdecoded = raw.decode('utf8').encode('ascii', 'replace')\n\tdecoded = raw.decode('utf8')\n\n\t#moves this through the html cleaner\n\ttext = plaintext(decoded)\n\n\treturn text", "def preprocess(path):\n with open(path, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n pat = re.compile(\"&([^;\\\\W]*([^;\\\\w]|$))\")\n log(\"Processing the file '{}'...\".format(path))\n try:\n dom = parseString(re.sub(pat, \"&amp;\", f.read()))\n except ExpatError as e:\n msg = XmlHandler.ERROR_MESSAGE.format(path, e)\n log(msg)\n raise ValueError(e)\n else:\n log(\"Done!\")\n return dom", "def to_xml_file(self, xml_file_path):\n s = self.to_xml()\n with open(xml_file_path, \"w+b\") as f:\n f.write(s)", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def export_xml(self, filename, full_export = False):\n \n # Private functions to write blocks of text\n # --------------------------\n def print_openrave(f, model):\n # print_openrave - OpenRAVE data\n # For compatibility only...\n f.write( ' <Openrave>\\n')\n f.write( ' <name>{0}</name>\\n'.format(model.name))\n f.write( ' <xml>{0}</xml>\\n'.format(model.or_xml))\n f.write( ' <transf>')\n for n in model.or_transf.flat[:]:\n f.write('{0:6f}'.format(n))\n f.write( '</transf>\\n')\n f.write( ' </Openrave>\\n')\n\n # --------------------------\n def print_Points(f, model, full_export):\n # print_Points - Print all Point3D entries\n\n f.write( ' <Points>\\n')\n for i in range(model.pts3D.shape[1]):\n print_Point(f, model, i)\n \n if full_export:\n for j in range(model.pt_info[i].desc.shape[0]):\n print_observ(f, model.pt_info[i], j, \\\n self.desc_name[self.desc_type[i]])\n f.write( '</Point>\\n');\n \n f.write( ' </Points>\\n');\n\n # --------------------------\n def print_observ(f, pt, idx_pt, desc_name):\n # <Observation camera_id=\"n\" desc_type=\"SIFT\" loc=\"x;y;scale;orientation\"\n # desc=\"a;b;c;...\">\n f.write( ' <Observation ');\n f.write( 'camera_id=\"{0}\" '.format(pt.cam_id[idx_pt]))\n f.write( 'desc_type=\"{0}\" '.format(desc_name))\n f.write( 'loc=\"')\n for l in pt.locs[idx_pt, :].ravel():\n f.write('{0:6f} '.format(l))\n f.write( '\" ')\n f.write( 'desc=\"')\n for d in pt.desc[idx_pt, :].ravel():\n f.write( '{0:6f} '.format(d))\n f.write( '\"/>\\n')\n\n # --------------------------\n def print_Point(f, model, idx_pt):\n # <Point p3d=\"x;y;z\" nviews=\"\" avg_err=\"\" color=\"R;G;B\" desc_type=\"SIFT\"\n # desc=\"a;b;c;...\">\n f.write( ' <Point ');\n f.write( 'p3d=\"{0:6f} {1:6f} {2:6f}\" '.format(model.pts3D[0, idx_pt], \\\n model.pts3D[1, idx_pt], \\\n model.pts3D[2, idx_pt]))\n f.write( 'nviews=\"{0:d}\" '.format(model.num_views[idx_pt]))\n f.write( 'avg_err=\"{0:6f}\" '.format(model.avg_err[idx_pt]))\n f.write( 'color=\"{0} {1} {2}\" '.format(model.color3D[0,idx_pt], \\\n model.color3D[1,idx_pt], \\\n model.color3D[2,idx_pt]))\n f.write( 'desc_type=\"{0}\" '\\\n .format(model.desc_name[ model.desc_type[idx_pt] ]))\n f.write( 'desc=\"')\n for d in model.desc[idx_pt].ravel():\n f.write( '{0:6f} '.format(d))\n f.write( '\">\\n')\n\n # --------------------------\n def print_Cameras(f, model):\n # print_Cameras - Print all Camera entries\n\n f.write( ' <Cameras>\\n')\n for idx, cam in enumerate(model.cam_poses.T):\n print_Camera(f, cam, idx)\n f.write( ' </Cameras>\\n')\n\n # --------------------------\n def print_Camera(f, cpose, idx_cam):\n # print_Camera - Camera entry\n\n f.write( ' <Camera ')\n f.write( 'id=\"{0}\" '.format(idx_cam))\n f.write( 'rot_type=\"quat\" ')\n q_t = tf_format.tf_format('quat', cpose)\n f.write( 'rot=\"')\n for val in q_t[:4].ravel():\n f.write( '{0:6f} '.format(val))\n f.write( '\" ')\n f.write( 'tx=\"')\n for val in q_t[4:].ravel():\n f.write( '{0:6f} '.format(val))\n f.write( '\"/>\\n')\n\n # --------------------------\n # Print data to file\n\n # First, update structures\n self.getNumViews()\n self.getNumPointsInCam()\n self.getAverageErr()\n\n with open(filename, 'w') as f:\n f.write('<Model name=\"{0}\" version=\"{1}\">\\n'.format(self.name, \\\n self.version) )\n # print_openrave(f, model)\n print_Points(f, self, full_export)\n if full_export:\n print_Cameras(f, self)\n f.write('</Model>\\n')", "def test_raw_feed(self):\n self.assertEqual(self.feed.feed.raw[:6].decode('utf-8'), \"<?xml \")", "def convert_to_raw(file):\n\n img = Image.open(file)\n img = img.convert('L') # convert to 8 bits per pixels\n (x, y) = img.size\n\n pixels = bytearray(list(img.getdata()))\n\n filename, file_extension = os.path.splitext(file)\n file2 = file.replace(file_extension, '.dat')\n file_name = str(x) + 'x' + str(y) + 'x8x1' + '_' + file2\n\n # print(file_name)\n\n with open(file_name, 'wb') as f:\n f.write(pixels)\n\n return file_name", "def __createXMLFileForClear():\r\n #description\r\n #Root\r\n clear_root = Element('clear-users-request', {'xmlns':SYMPLECTIC_XMLNS_URI,} )\r\n #Feed\r\n SubElement(clear_root, 'feed-id').text = IMPORT_USERS_FEED_ID\r\n #Convert to ElementTree and write xml version to file\r\n xml_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_USER_FOLDER + SYMPLECTIC_LOCAL_USER_CLEARFILE\r\n ElementTree(clear_root).write(xml_filename)\r\n #Return xml filename\r\n return xml_filename", "def sample_xml(opts,file):\r\n with open(file, opts) as xml:\r\n return xml.read()", "def string_raw(self):\n return \"x%x\" % self.encoded", "def adjust_nml_file(fname, replacements):\n f = open(fname, 'r')\n param_str = f.read()\n f.close()\n new_str = replace_keys(param_str, replacements)\n fd, path = tempfile.mkstemp()\n os.write(fd, str.encode(new_str))\n os.close(fd)\n shutil.copy(path, fname)\n os.remove(path)", "def writeFile(self, filename):\n s = ET.tostring(self._root)\n\n #Remove all formatting\n s = s.replace('\\n','')\n s = s.replace('\\t','')\n s = s.replace('\\r','')\n\n f = open(filename, 'w')\n f.write(minidom.parseString(s).toprettyxml())\n f.close()", "def recipe12_8():\n from xml.parsers.xmlproc import utils, xmlval, xmldtd\n def validate_xml_file(xml_filename, app=None, dtd_filename=None):\n # build validating parser object with appropriate error handler\n parser=xmlval.Validator()\n parser.set_error_handler(utils.ErrorPrinter(parser))\n if dtd_filename is None:\n # DTD fiel specified, laod and set it as the DTD to use\n dtd=xmldtd.load_dtd(dtd_filename)\n parser.val.dtd = parser.dtd = parser.ent = dtd\n if app is not None:\n # Application processing requested, set application object\n parser.set_application(app)\n # everything being set correctly, finally perform the parsing\n parser.parse_resource(xml_filename) \n # if XML data is in a string s, use instead\n # parser.feed(s)\n # parser.close(s)", "def encoding():\n\n return render_template(\"UTF-8-demo.txt\")", "def MakeTextXMLReady(text):\n dec_text = DecodeNonASCIIText(text)[0]\n items = []\n for char in dec_text:\n try:\n char = char.encode('ascii')\n except UnicodeEncodeError:\n # We have a non-ASCII character of type unicode. Convert it into an\n # XML-ready format.\n try:\n str(char)\n char.encode('utf-8')\n except UnicodeEncodeError:\n char = '%s;' % hex(ord(char)).replace('0x', '&#x')\n items.append(char)\n return ''.join(items)", "def normalize_filename(filename):\n value = unicodedata.normalize('NFKD', ensure_unicode(filename)).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return ensure_native_str(value)", "def get_ntuple_filenames_from_xml(full_filename):\n with open(full_filename) as f:\n is_comment = False\n for line in f:\n if line.startswith(\"<!--\"):\n is_comment = True\n if line.endswith(\"-->\"):\n is_comment = False\n if is_comment:\n continue\n if line.startswith(\"<In FileName=\"):\n this_line = line.strip()\n this_line = this_line.replace('<In FileName=\"', '')\n this_line = this_line.replace('\" Lumi=\"0.0\"/>', '')\n yield this_line", "def test_dict_to_xml_raw_include(self):\n xmlns = {\n '_': utils.NETCONF_NAMESPACE\n }\n\n xml_node = utils.generate_xml_node(\n {\n 'a': {\n '_!_': \"<g><a n='1'></a><d n='2'></d><c n='3'></c></g>\"\n }\n },\n xmlns,\n 'rpc'\n )\n\n xml_node_string = etree.tostring(\n xml_node, pretty_print=False\n )\n\n self.assertEqual(\n xml_node_string.decode('utf-8'),\n \"\"\"<rpc xmlns=\"urn:ietf:params:xml:ns:netconf:base:\"\"\" +\n \"\"\"1.0\"><a><g><a n=\"1\"/><d n=\"2\"/><c n=\"3\"/></g></a></rpc>\"\"\"\n )", "def beautify_xml(path):\n with open(path) as f:\n content = f.read()\n\n def pretty_print(data):\n return \"\\n\".join(\n [\n line\n for line in parseString(data).toprettyxml(indent=\" \" * 2).split(\"\\n\")\n if line.strip()\n ]\n )\n\n return pretty_print(content)", "def record_to_xml(name):\n if name == 'authors':\n return 'author'\n elif name == 'related_identifiers':\n return 'detail'\n elif name == 'records':\n return 'record'\n else:\n return name", "def to_xml(self, enc='utf-8'):\n return b\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE GIFTI SYSTEM \"http://www.nitrc.org/frs/download.php/115/gifti.dtd\">\n\"\"\" + xml.XmlSerializable.to_xml(self, enc)", "def writeToTempXml(self):\n name = self.fileToProcess.name\n all_tokens = ET.Element(\"tokens\")\n for token in self.tokensTable:\n if token.getType() == KEYWORD:\n keyword = ET.SubElement(all_tokens, \"keyword\")\n keyword.text = ' '+token.getValue()+' '\n elif token.getType() == IDENTIFIER:\n identifier = ET.SubElement(all_tokens, \"identifier\")\n identifier.text = ' '+token.getValue()+' '\n elif token.getType() == SYMBOL:\n symbol = ET.SubElement(all_tokens, \"symbol\")\n symbol.text = ' '+token.getValue()+' '\n elif token.getType() == STRING_CONST:\n stringConstant = ET.SubElement(all_tokens, \"stringConstant\")\n stringConstant.text = ' '+token.getValue()+' '\n elif token.getType() == INT_CONST:\n integerConstant = ET.SubElement(all_tokens, \"integerConstant\")\n integerConstant.text = ' '+token.getValue()+' '\n tree = ET.ElementTree(all_tokens)\n tree.write(name + 'T' + '.xml')", "def replace_with_file_contents(fname):\n try:\n with open(os.path.expanduser(fname[0])) as source_file:\n result = source_file.read()\n except IOError:\n result = '< %s' % fname[0] # wasn't a file after all\n\n # TODO: IF pyparsing input parser logic gets fixed to support empty file, add support to get from paste buffer\n return result", "def test_resourcesXML(self):\n fileName = self.mktemp()\n fp = FilePath(fileName)\n fp.setContent(oldResourcesFormat)\n upgradeResourcesXML(fp)\n self.assertEquals(fp.getContent(), newResourcesFormat)", "def __correct_encoding(self, encode, filename):\n if encode == 'None' or encode == self.__tencoding:\n return\n buffname = '~old' + filename\n self.__os.rename(filename, buffname)\n with open(buffname, 'r', encoding=self.__tencoding) as fr:\n with open(filename, 'w', encoding=self.__tencoding) as fw:\n for line in fr:\n fw.write(line[:-1] + '\\r\\n')\n self.__os.remove(buffname)", "def test_fixed_xml_tag(self):\r\n\r\n # create a error tag with valid xml contents\r\n root = etree.Element('error')\r\n good_xml = '''<sequential display_name=\"fixed\"><video url=\"hi\"/></sequential>'''\r\n root.text = good_xml\r\n\r\n xml_str_in = etree.tostring(root)\r\n\r\n # load it\r\n system = self.get_system()\r\n descriptor = system.process_xml(xml_str_in)\r\n\r\n # export it\r\n node = etree.Element('unknown')\r\n descriptor.add_xml_to_node(node)\r\n\r\n # Now make sure the exported xml is a sequential\r\n self.assertEqual(node.tag, 'sequential')", "def _escape_filename(filename):\n #Is adding the following helpful\n #if os.path.isfile(filename):\n # #On Windows, if the file exists, we can ask for\n # #its alternative short name (DOS style 8.3 format)\n # #which has no spaces in it. Note that this name\n # #is not portable between machines, or even folder!\n # try:\n # import win32api\n # short = win32api.GetShortPathName(filename)\n # assert os.path.isfile(short)\n # return short\n # except ImportError:\n # pass\n if \" \" not in filename:\n return filename\n #We'll just quote it - works on Windows, Mac OS X etc\n if filename.startswith('\"') and filename.endswith('\"'):\n #Its already quoted\n return filename\n else:\n return '\"%s\"' % filename", "def load_xml(filename):\n path = dirname(__file__)\n with open(join(path, 'data', filename)) as file:\n content = file.read()\n return content", "def preprocess_file(self, filename):\n rawfilename = ''\n for command in [self.mplayer_command, \n self.ffmpeg_command]:\n while True:\n rawfilename = self.random_string()\n if not os.path.exists(rawfilename):\n break\n \n if 0 != subprocess.call(\n command.format(self.SRATE, filename, rawfilename), \n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT,\n shell=True):\n os.remove(rawfilename)\n rawfilename = None\n continue\n \n break # file is successfully converted\n return rawfilename", "def _escape_filename(self, filename):\n return filename.replace('\\\\', '\\\\\\\\').replace(' ', '\\\\ ')", "def format(self):\n self.clear_whitespace()\n self.to_ascii()\n return self._filename", "def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&amp;\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)", "def xml_encode(string):\n string = string.replace(\"&\", \"&amp;\")\n string = string.replace(\"<\", \"&lt;\")\n string = string.replace(\">\", \"&gt;\")\n string = string.replace(\"\\\"\",\"&quot;\")\n string = string.replace(SLASH, \"/\")\n return string", "def cleanFilename(filename):\n badChars = {ord('?'): None, ord('*'): None, ord('/'): None,\n ord('\\\\'): None, ord(':'): None, ord('\"'): \"''\",\n ord('<'): None, ord('>'): None, ord('|'): None}\n return filename.translate(badChars)", "def save_as(self, fname, base = None, indent = '', topns = True, namespaces = {}):\n with codecs.open(fname, \"w\", encoding=\"utf-8\") as outf:\n self.serialize_xml(outf.write, base=base, indent=indent, topns=topns, namespaces=namespaces)", "def parse(path):\n try:\n return parseString(open(path, \"r\", encoding=\"utf-8\", errors=\"ignore\").read())\n except ExpatError as e:\n # Some neutrino configuration files may contain text data with invalid character ['&'].\n # https://www.w3.org/TR/xml/#syntax\n # Apparently there is an error in Neutrino itself and the document is not initially formed correctly.\n log(XmlHandler.ERROR_MESSAGE.format(path, e))\n\n return XmlHandler.preprocess(path)", "def saveFormatFileEnc(self, filename, encoding, format):\n ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format)\n return ret", "def _ooxml(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'ooxml')\n try:\n doc = officedissector.doc.Document(self.src_path)\n except Exception:\n self.make_dangerous('invalid ooxml file')\n return\n # There are probably other potentially malicious features:\n # fonts, custom props, custom XML\n if doc.is_macro_enabled or len(doc.features.macros) > 0:\n self.make_dangerous('macro')\n if len(doc.features.embedded_controls) > 0:\n self.make_dangerous('activex')\n if len(doc.features.embedded_objects) > 0:\n # Exploited by CVE-2014-4114 (OLE)\n self.make_dangerous('embedded obj')\n if len(doc.features.embedded_packages) > 0:\n self.make_dangerous('embedded pack')", "def test_dump(file_contents, engine_contents):\n file_name = 'Triangle.java.xml'\n dump = XmlEngine.dump(engine_contents[file_name])\n assert dump == file_contents", "def sniff( self, filename ):\n # TODO - Use a context manager on Python 2.5+ to close handle\n handle = open(filename)\n line = handle.readline()\n handle.close()\n\n # TODO - Is there a more robust way to do this?\n return line.startswith('<?xml ')", "def open_load_filename(self, startpath=expanduser(\"~\")):\n xml_filepath = QFileDialog.getOpenFileName(\n parent=self.main_view,\n caption=\"Select XML file for loading\",\n directory=startpath,\n filter=\"XML files (*.xml)\",\n options=QFileDialog.DontUseNativeDialog)\n return xml_filepath", "def process_xml(self):\n self.process_gpx_file(str(self.filename))", "def test_augmentsXML(self):\n fileName = self.mktemp()\n fp = FilePath(fileName)\n fp.setContent(oldAugmentsFormat)\n upgradeAugmentsXML(fp)\n self.assertEquals(fp.getContent(), newAugmentsFormat)", "def recover_from_xml(path):\n\tlistz = extract_list_from_xml(path)\n\trecover_old_filename(listz)", "def test_xml_file_roundtrip(self, file_path_extension, specified_format):\n # These files will be deleted once garbage collection runs (end of this function)\n iofile1 = NamedTemporaryFile(suffix=\".\" + file_path_extension)\n iofile2 = NamedTemporaryFile(suffix=\".\" + file_path_extension)\n forcefield_1 = ForceField(xml_simple_ff)\n forcefield_1.to_file(iofile1.name, io_format=specified_format)\n forcefield_2 = ForceField(iofile1.name)\n forcefield_2.to_file(iofile2.name, io_format=specified_format)\n assert open(iofile1.name).read() == open(iofile2.name).read()", "def __init__(self, filename):\r\n self.__output__ = open(format(filename, '08X') + '.gen', 'wb')", "def convert_raw_to_elan(transcription_data: Dict[str, Any], xml_path: str, elan_path: str, style_path: str = \"/elpis/elpis/engines/common/output/templates/elan.xsl\"):\n xml_data = dict2xml.dict2xml(transcription_data, wrap=\"data\", indent=\" \" * 4)\n with open(xml_path, \"w\") as xml_file:\n xml_file.write(xml_data)\n xslt_command = \"/elpis-gui/node_modules/.bin/xslt3\" # Later put this folder in $PATH?\n parameters = \" \".join([f'%s=\"{value}\"' % re.sub(r\"\\s\", \"_\", key) for key, value in transcription_data.items() if key != \"segments\"])\n command = f\"\"\"{xslt_command} -s:'{xml_path}' -xsl:'{style_path}' -o:'{elan_path}' {parameters}\"\"\"\n stream = subprocess.run(command, shell=True)\n return elan_path", "def preprocess(self, source, name, filename=None):\n if not name or not os.path.splitext(name)[1] in self.environment.file_extensions:\n return source\n output = StringIO()\n lexer = Lexer(iter(source.splitlines()))\n Parser(lexer, callback=output.write, debug=self.environment.slim_debug).parse()\n\n if self.environment.slim_print:\n print output.getvalue()\n\n return output.getvalue()", "def xml_safe(value):\n return CONTROL_CHARACTERS.sub('?', value)", "def xml_safe(value):\n return CONTROL_CHARACTERS.sub('?', value)", "def convert_xml_to_rst(src_file, dst_file, template_file, author, maintainer,\n version=None, committer=None):\n template = Template(_read_file(template_file))\n try:\n with open(src_file, 'r') as f:\n e = objectify.parse(f).getroot()\n template_string = template.render(e=e, author=author, maintainer=maintainer,\n version=version, committer=committer)\n _write_template_to_file(dst_file, template_string)\n except (TemplateError, LxmlError, OSError, IOError) as e:\n print(e)", "def save_raw(self, filename, typ):\n self.lib.SaveAsRaw(ct.c_char_p(str.encode(filename)),\n ct.c_int(self.savetypes[typ]))", "def encode_file_using_codes(file_name, letter_codes):\r\n contents = \"\"\r\n with open(file_name) as f:\r\n contents = f.read()\r\n file_name_encoded = file_name + \"_encoded\"\r\n with open(file_name_encoded, 'w') as fout:\r\n for c in contents:\r\n fout.write(letter_codes[c])\r\n print(\"Wrote encoded text to {}\".format(file_name_encoded))", "def open_save_filename(self, startpath=expanduser(\"~\")):\n xml_filepath = QFileDialog.getSaveFileName(\n parent=self.main_view,\n caption=\"Select XML file for saving\",\n directory=startpath,\n filter=\"XML files (*.xml)\",\n options=QFileDialog.DontUseNativeDialog)\n return xml_filepath" ]
[ "0.63195187", "0.59571916", "0.590457", "0.5790034", "0.56660265", "0.5458497", "0.53709275", "0.5369985", "0.5312219", "0.5296952", "0.5284404", "0.52644795", "0.52433074", "0.5239629", "0.5220292", "0.5200755", "0.5199187", "0.51793045", "0.5175041", "0.51562613", "0.5145448", "0.5133448", "0.51329625", "0.50987995", "0.506291", "0.50606483", "0.5055475", "0.50390977", "0.50006276", "0.4998994", "0.49911615", "0.49581346", "0.49540126", "0.4951109", "0.49365452", "0.49299997", "0.488789", "0.48801473", "0.487612", "0.48611164", "0.48526904", "0.48266733", "0.48177522", "0.48124617", "0.4799126", "0.47892252", "0.47845542", "0.47740173", "0.47729602", "0.4772026", "0.47633737", "0.47612277", "0.47597283", "0.47482127", "0.472307", "0.4718338", "0.47111493", "0.47083116", "0.47046947", "0.4704203", "0.46945357", "0.46935612", "0.46819085", "0.46766204", "0.46652278", "0.4662047", "0.46593085", "0.46581385", "0.46425584", "0.46315628", "0.46287346", "0.46269366", "0.46235314", "0.46232244", "0.46185452", "0.4616693", "0.4604209", "0.46026638", "0.4597567", "0.45901123", "0.4587201", "0.45815578", "0.4581262", "0.4580602", "0.4579263", "0.45790166", "0.4578955", "0.4576143", "0.45731744", "0.45546103", "0.4553282", "0.4552292", "0.4551883", "0.45432848", "0.45340702", "0.45340702", "0.45339108", "0.4527563", "0.4524273", "0.45221904" ]
0.76060027
0
Record to the debug log
def debug_log(self, buf, shell): # Handle Shell output if shell == True: self.debugfile.write("<shell time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" ) self.debugfile.write("<![CDATA["+buf+"]]></shell>\n") # Handle User Input else: self.debugfile.write("<user time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" ) self.debugfile.write("<![CDATA["+buf+"]]></user>\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug(self, tag, message, exc_info=False): \n \n self.log(logging.debug,tag, message, exc_info)", "def debug(self, msg):\r\n self.logger.debug(msg)", "def debug_log(self, msg, *args, **kwargs):\n if self.debug:\n self.log.debug(msg, *args, **kwargs)", "def logdebug(self, msg):\n self.logger.debug(msg)", "def debug(self, msg):\n\n\t\tif( self.logger ): self.logger.debug( msg )", "def log_debug(self, line):\n logging.debug(\"Telemetry Logger - %s\" % line)", "def debug(self, message):\r\n pass", "def debug(self, msg):\n\n self.logger.debug(msg)", "def debug( cls, msg ):\n cls.log( logging.DEBUG, msg )", "def debug(self, msg):\n self.__logger.debug(msg)", "def log_debug(self, msg):\n self.log(msg, level=LOG_DEBUG)", "def dump(self):\n self.logger.debug(self)", "def debug(self, msg):\n\n if (self.logger): self.logger.debug(msg)", "def debug(self, msg: str):\n self._logger.debug(msg)", "def debug():", "def debug(self, msg):\n debug(msg)", "def demo_log(self):\n self.logger.debug('This is a debug')\n self.logger.debug(self.name)\n self.logger.debug(self.doc)", "def _debuginfo(self,suspect,message):\n suspect.debug(message)\n self.logger.debug(message)", "def debug ( self , message , *args , **kwargs ) :\n return self.logger.debug ( message , *args , **kwargs )", "def debug(self, msg, *args, **kwargs):\n pass", "def _debug_log(self, msg):\n if not self.debug:\n return\n sys.stderr.write('{}\\n'.format(msg))", "def _debug_log(self, msg):\n if not self.debug:\n return\n sys.stderr.write('{}\\n'.format(msg))", "def log_debug(self, line):\n logging.debug(\"E-Mail - %s\" % line)", "def logDebug(self, msg) :\n if self.debug :\n sys.stderr.write(\"%s\\n\" % msg)\n sys.stderr.flush()", "def debug(msg):\n if(CONFIG['debug']):\n logIt(msg)", "def debug(msg):", "def debug(self, msg, *args, **kwargs):\n self._logger.debug(msg, *args, **kwargs)", "def logDebug(self, text):\n time = datetime.now().strftime(\"%H:%M:%S \")\n self.log(time + \"(DBG):\\t\", text)", "def _debuglog(self, string):\n\t\tif self.debug:\n\t\t\tsys.stderr.write(\"MemCached: %s\\n\" % string)", "def debug(tag, message=None):\n Log._post(\"debug\", tag, message)", "def debug(self, message):\n return self.log(\"DEBUG\", message)", "def debug(self, *args):\n\n if self.is_on(_Log.DEBUG):\n self._write(self._out, *args)", "def log_to_debug(self,\n line: str\n ) -> None:\n\n with open(\"Logs/debug.log\", \"a\") as f:\n f.writelines(f\"{self.user_key} | {datetime.now()} | {line}\\n\")", "def debug(self, *args, **kwargs):", "def debug(self, msg):\n if self._debug:\n print \"%s\" % (msg)", "def debug(self, message):\r\n if self._debug:\r\n print('[Debug] %s' % message)", "def debug(self, msg):\n\n self(msg, DEBUG)", "def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)", "def debug(self, msg, stderr=False):\n self.log(msg, level=self.DEBUG, stderr=stderr)", "def write_debug_log(self, msg):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open(self.debug_log, 'a+') as logfile:\n logfile.write(\"%s: %s\\n\" % (now, msg))", "def debug(module, message):\n if loggingLevel >= loggingLevelDebug:\n ModuLog.log(\"D\", module, message)", "def debug(self, message: str):\n self.log(Level.DEBUG, message)", "def debugLog(message):\n if debugFlag != None:\n print \"#debug: \" + str(message)", "def debug(self, *args):\r\n msg = \" \".join([str(x) for x in args])\r\n if not self.signal_debug(self, (msg)):\r\n logging.debug(msg)", "def debug_started(self, command):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n lines = [\n \"%s\\n\" % (\"*\" * self.line_lenght),\n \"Command: %s\\n\" % command,\n \"DateTime: %s\\n\" % now,\n \"%s\\n\" % (\"*\" * self.line_lenght)\n ]\n\n with open(self.debug_log, 'a+') as logfile:\n logfile.writelines(lines)", "def debug(self, message: str) -> None:\n\n self.__add_log(self.DEBUG, message)", "def log_debug(task_request, message):\n _log(logger.debug, task_request, message)", "def debug(self, msg, *args, **kwargs):\n logger = self.__get_logger()\n logger.debug(msg, *args, **kwargs)", "def debug(self, msg):\n if self.ansible._debug or self.ansible._verbosity > 2:\n self.ansible.log(f'[DEBUG] {msg}')", "def debug(cls, message):\n if cls.verbose:\n print('[DEBUG] {0}'.format(message))", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def log(self, message):", "def debug(self):\n raise NotImplementedError", "def debug(msg):\n log_msg(DEBUG, msg)", "def debug(self, message, *args, **kwargs):\n\n self.logger.debug(message, *args, **kwargs)", "def debug(self, *args, **kwargs):\n self.msg(logging.DEBUG, *args, **kwargs)", "def _log(self, message):\n pass", "def DEBUG(self, _strDebugMessage=\"\"):\n self.edLogging.DEBUG(_strDebugMessage)", "def debug(self, msg):\n debug_msg = self._debug_color\n debug_msg += \"[SHOULDER_DEBUG] \" + msg\n debug_msg += self._reset_color\n self.logger.debug(debug_msg)", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def output_debug_info(self):", "def debug(log):\n write(syslog.LOG_DEBUG, 'debug', '{log}'.format(log=log))", "def report_debug(self, rc):\n pass", "def enter_state(self):\r\n self.__log__(logging.debug)\r\n return", "def debug(self, debug):\n\n self._debug = debug", "def print_debug( cls, msg ): #\n if cls.__logger.getEffectiveLevel() <= logging.DEBUG :\n print( msg, flush = True )\n cls.__logger.debug( msg )", "def __debug(msg):\n\n pass", "def print_debug(self, msg):\n if self.debug:\n print(\"[DEBUG {0}] {1}\".format(datetime.datetime.now(), msg))", "def debug(self, text):\n if self.PRINT_DEBUG:\n print('[FileHistory] ' + text)", "def debug(self, log_msg, tags=None):\n now = datetime.datetime.now()\n log_level = \"debug\"\n datestamp = self.create_datestamp(now)\n timestamp = self.create_timestamp(now)\n hrtimestemp = self.create_human_readable_timestamp(now)\n tags = json.dumps(self.tags)\n log_body = self.log_builder(log_level, hrtimestemp, datestamp, timestamp, log_msg, tags)\n self.logger.debug(log_body)", "def debug(self, msg, *args):\n if self.lvl<=logging.DEBUG: return self._log(msg, *args)", "def debug(self, auth_token, message):\n\n self._log(auth_token, logging.DEBUG, message)", "def debug(self, query, fname, sample=-1):\n debug(self, query, fname, sample)", "def debug(self, module, message):\n if self.log_level <= consts.LOG_LEVEL_DEBUG:\n print(\"DEBUG : %s: %s\" % (module, message))", "def message_debug(self, m):\n self.message(m, logging.DEBUG)", "def log(self, msg):\n print(msg)", "def on_a(self):\r\n self.log()", "def printdebug(self, msg):\n if self.debug > 0:\n print(msg)", "def debug(message):\n logging.getLogger().debug(message)", "def debug(self):\n #breakpoint() # infinite loop\n print(self.ttl)", "def record(self, step):", "def slot_debug(self, sender, (msg)):\r\n name = \"%s.%s\" % (sender.__class__.__module__, sender.__class__.__name__)\r\n logging.debug(\"%s:%s\", name, msg)", "def debug(msg):\n return log().debug(msg)", "def InsertLog():", "def record(self):\n # TODO: record the data", "def vv_flag():\n log.setLevel(logging.DEBUG)", "def log(msg):\n\n print('datastore: %s' % msg)", "def on_sync(self):\r\n self.log()", "def event_log(self):\n pass", "def debug(song_id: str, result_id: str, message: str) -> None:\n\n logger.log(MATCH, \"[%s|%s] %s\", song_id, result_id, message)", "def set_debug(self):\n self.logger.setLevel(5)\n if self.uses_adc:\n self.adc.logger.setLevel(5)", "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def debugLog(self, logStr):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'debugLog', 'value': logStr}\n self._sendMessageToWeb(cmd)\n else:\n print(\"DebugLog: \" + logStr)", "def log_debug(var):\n\n GPS.Logger('testsuite').log(\"%s\" % (var, ))", "def debug(msg):\n #print(msg)\n pass\n #end debug", "def record(*args, **kwargs):\n LOG.info(\"args={}, kwargs={}\".format(args, kwargs))", "def debug(msg):\n if not DEBUG_ON:\n return\n print(\"DEBUG:\" + str(msg))", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def debug(message: str, *args: Any) -> None:\n Logger.log(logging.DEBUG, message, *args)", "def handle_debug(self, api, command):\n return self.handle_log(api, command, level=logging.DEBUG)", "def on_start(self):\r\n self.log()" ]
[ "0.77450716", "0.7598348", "0.7553728", "0.7524138", "0.75175226", "0.7485843", "0.7438288", "0.7433718", "0.7425045", "0.7416185", "0.74065304", "0.73936254", "0.7383948", "0.7310336", "0.72510886", "0.7248811", "0.7204248", "0.7197199", "0.71952343", "0.71625316", "0.7152155", "0.7152155", "0.70971835", "0.709637", "0.70630354", "0.7048951", "0.70480376", "0.70448476", "0.7017509", "0.70097995", "0.7006375", "0.69996697", "0.6969553", "0.6957354", "0.69517547", "0.69404405", "0.6882583", "0.68785965", "0.686398", "0.6862568", "0.6820534", "0.681419", "0.6808643", "0.68051904", "0.67973685", "0.6792572", "0.6780885", "0.6771641", "0.67665005", "0.67646706", "0.6756401", "0.675005", "0.67376316", "0.673453", "0.6728111", "0.671783", "0.67070323", "0.6706084", "0.67008686", "0.6683918", "0.6644496", "0.66320163", "0.6621818", "0.6614037", "0.66100687", "0.6597715", "0.65945965", "0.6564449", "0.6558798", "0.65577346", "0.65557116", "0.6549933", "0.65426135", "0.65325755", "0.6519132", "0.6505891", "0.64797115", "0.6468045", "0.6462567", "0.6460924", "0.64585644", "0.6447469", "0.64392126", "0.6437132", "0.64173794", "0.6414192", "0.6408878", "0.64033854", "0.639638", "0.63908577", "0.6389606", "0.6379393", "0.6349841", "0.63405716", "0.63306326", "0.6304655", "0.6300877", "0.6299281", "0.6299221", "0.6291018", "0.6274346" ]
0.0
-1
Strip all control characters and nonUTF8 characters from a file. Prints the output to standard out
def sanitize_file(infilename, outfilename): fout = codecs.open(outfilename, encoding="utf-8", mode="w") for line in codecs.open(infilename, encoding="utf-8"): fout.write(sanitize(line))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sanitize_characters(raw_input_file, clean_output_file):\n input_file = codecs.open(raw_input_file, 'r', encoding='ascii', errors='ignore')\n output_file = open(clean_output_file, 'w', encoding='ascii', errors='ignore')\n\n for line in input_file:\n # removes extra newline\n line = line.rstrip('\\n')\n output_file.write(line)", "def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def strip_unsafe_characters(filename: str):\n return \"\".join([c for c in filename if c.isalpha() or c.isdigit() or c==' ' or c=='_']).rstrip()", "def txt2txt(fname, skip=None):\n with codecs.open(fname, 'r', encoding='utf-8') as f_in:\n content = f_in.read()\n\n\tcontent.replace('\\r', '').replace('\\x0C', '')\n\n return content", "def wipe_bad_chars(filename):\n return multi_replace(filename, {'(': '', ' ': '_', ')': '', '/': '_'})", "def stripchars(target_folder):\n\tfor filepath in iglob(target_folder, recursive=True):\n\t\tp = Path(filepath)\n\t\tfn = p.parts[-1]\n\n\t\twith open(filepath, encoding='cp1252') as file:\n\t\t\tlogger.info(f'read: {fn}')\n\t\t\tfor line in file:\n\t\t\t\toutput = line.strip().replace(\"Ð\", \"–\").replace(\"Õ\", \"'\").replace(\"Ô\", \"'\").replace(\"Ž\", \"é\").replace(\"Ò\", \"'\").replace(\"Ó\", \"'\").replace(\"ª\", \"™\").replace(\"’\", \"'\").replace(\"‘\", \"'\").replace(\"–\",\"–\")\n\t\t\t\t# only write back the block paragraph by stripping other shorter lines\n\t\t\t\tif len(output) >= 7:\n\t\t\t\t\twith open(filepath, 'w') as file:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfile.write(output)\n\t\t\t\t\t\t\tlogger.info(f'write {fn}')\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tlogger.error(e)\n\t\t\t\t\t\t\tcontinue", "def strip_ansi_escape(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n return re.sub(r\"\\x1b[^m]*m\", \"\", data)", "def remove_control_chars(json_string):\n return re.sub('[\\x00-\\x1f]', '',json_string)", "def strip_ansi(content):\n return ANSI_ESCAPES_REGEX.sub('', content)", "def main():\n try:\n filename = sys.argv[1]\n except IndexError:\n sys.exit(\"Usage: TODO\")\n\n with codecs.open(filename, \"r+\", encoding=\"utf8\") as f:\n nb = read(f, as_version=NO_CONVERT)\n stripped = strip_output(nb)\n return stripped", "def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)", "def sanitize(buf,\n backspaces=['\\x08\\x1b[K', '\\x08 \\x08'],\n escape_regex=re.compile(r'\\x1b(\\[|\\]|\\(|\\))[;?0-9]*[0-9A-Za-z](.*\\x07)?')):\n # Filter out control characters\n\n # First, handle the backspaces.\n for backspace in backspaces:\n try:\n while True:\n ind = buf.index(backspace)\n buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))\n except:\n pass\n\n strip_escapes = escape_regex.sub('',buf)\n\n # strip non-printable ASCII characters\n\n clean = ''.join([x for x in strip_escapes if is_printable(x)])\n return clean", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def strip_non_unicode(value):\n UNICODE_PATTERN = r'[^\\x00-\\x7F]+'\n try:\n value = re.sub(UNICODE_PATTERN, '', value)\n return value.strip()\n except Exception:\n return value", "def copy_and_strip_bom(infilename, outfilename):\r\n buffer_size = 4096\r\n\r\n with open(infilename, \"r+b\") as infile:\r\n with open(outfilename, \"wb\") as outfile:\r\n chunk = infile.read(buffer_size)\r\n if chunk.startswith(codecs.BOM_UTF8):\r\n chunk = chunk[BOMLEN:]\r\n while chunk:\r\n outfile.write(chunk)\r\n chunk = infile.read(buffer_size)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")", "def strip_other_charcter():\n pass", "def _remove_accents_(unicode_filename):\n valid_characters = bytes(b'-_.() 1234567890abcdefghijklmnopqrstuvwxyz')\n cleaned_filename = unicodedata.normalize('NFKD', unicode_filename).encode('ASCII', 'ignore')\n\n new_filename = \"\"\n\n for char_int in bytes(cleaned_filename):\n char_byte = bytes([char_int])\n if char_byte in valid_characters:\n new_filename += char_byte.decode()\n\n return new_filename", "def strip_illegal_chars(filename: str) -> str:\n if OPTIONS['download']['ascii']:\n return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII)\n else:\n return ''.join(i for i in filename if i not in FILENAME_BANNED)", "def stripped_tokens_of_file(filename):\n tokens = list(tokenize.tokenize(BytesIO(bytes(open(filename).read(),\"utf8\")).readline))\n str_tokens = [x.string for x in tokens]\n str_tokens = [x for x in str_tokens if x != '\\n']\n str_tokens = [x for x in str_tokens if x.strip() != '']\n # remove comments\n str_tokens = [x for x in str_tokens if len(x) >= 0 and x[0] != '#']\n return str_tokens[1:] # get rid of utf-8 encoding prefix", "def strip_ansi(text):\n return ANSI_ESCAPE_RE.sub('', text)", "def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()", "def read_file(input_file):\n\n\ttext = open(input_file)\n\traw = text.read()\n#\tdecoded = raw.decode('utf8').encode('ascii', 'replace')\n\tdecoded = raw.decode('utf8')\n\n\t#moves this through the html cleaner\n\ttext = plaintext(decoded)\n\n\treturn text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def _strip_nul(text):\n return text.replace('\\x00', '<NUL>')", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def _remove_accents(unicode_filename):\n # noinspection PyBroadException\n try:\n unicode_filename = unicode_filename.replace(\" \", \"_\")\n cleaned_filename = unicodedata.normalize('NFKD', unicode_filename).encode('ASCII', 'ignore').decode('ASCII')\n\n cleaned_filename = re.sub(r'[^\\w\\s-]', '', cleaned_filename.strip().lower())\n cleaned_filename = re.sub(r'[-\\s]+', '-', cleaned_filename)\n\n return cleaned_filename\n except:\n traceback.print_exc()\n return unicode_filename", "def strip_fileendings(fName):\n with open(fName, 'rb') as f:\n data = f.readlines()\n\n data_new = [d.replace(\"\\r\\n\", \"\\n\") for d in data]\n\n with open(fName, 'wb') as f:\n for row in data_new:\n f.write(row)", "def char_strip(self):\n\n if not self.file_list:\n self.print_to_log(\"No files fit parameters, exiting\")\n return None\n\n\n result = []\n\n #pass list of files, set to inplace, and byte mode\n fi = fileinput.FileInput(self.file_list,\n inplace=1,\n mode='U')\n fname = \"\"\n count = 0\n self.error = 0\n for line in fi:\n\n #create info for logging\n if fi.isfirstline():\n #skip for first file\n if fi.lineno() > 1:\n result.append(\"Processed %s replaced '%s' by '%s' a total of %s\" % (\n fname, self.char_to_strip, self.char_for_replace, str(count)))\n count = 0\n fname = fi.filename()\n ltemp = ''\n #test and replace\n for char in line:\n if char == self.char_to_strip:\n count += 1\n #if you need to handle occurrences in the batch file\n self.error = 1\n char = self.char_for_replace\n ltemp += char\n sys.stdout.write(ltemp)\n fname = fi.filename()\n #logging for last file\n result.append(\"Processed %s replaced '%s' by '%s' a total of %s\" % (\n fname, self.char_to_strip, self.char_for_replace, str(count)))\n fi.close()\n #write out to log\n for item in result:\n self.print_to_log(item)", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def strip_raw_ansi(string, parser=ANSI_PARSER):\n string = string or \"\"\n return parser.strip_raw_codes(string)", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def remove_non_ascii(words):\n removed_nonascii = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n removed_nonascii.append(new_word)\n return removed_nonascii", "def _remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n #Revisar esta funcion porque no filtra nada...\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def swallow_windows_unicode(fileobj, rewind=True):\n\tif rewind:\n\t\ttry:\n\t\t\tpos = fileobj.tell()\n\t\texcept:\n\t\t\tpos = None\n\n\ttry:\n\t\tbom = fileobj.read(3)\n\texcept:\n\t\t# End of file, revert!\n\t\tfileobj.seek(pos)\n\tif bom == '\\xef\\xbb\\xbf':\n\t\treturn fileobj\n\n\t# Bytes not present, rewind the stream\n\tif rewind:\n\t\tif pos is None:\n\t\t\t# .tell is not supported, dump the file contents into a cStringID\n\t\t\tfileobj = StringIO(bom + fileobj.read())\n\t\telse:\n\t\t\tfileobj.seek(pos)\n\treturn fileobj", "def read_file_pretty(input_file):\n\n\ttext = open(input_file)\n\traw = text.readlines()\n\tdecoded = [line.decode('utf8') for line in raw]\n\tlines = [line.strip() for line in decoded if line.strip() != '']\n\tlines = [(\"<p>\" + line + \"</p>\") for line in lines]\n\tlines.insert(0, '<meta charset=\"UTF-8\">')\n\n\treturn lines", "def cleanse_filename(fname):\n fname = os.path.split(fname)[1]\n INVALID = u\"\\\"*/:<>?\\\\|\"\n VALID_RANGE = range(128)\n result = []\n for c in fname:\n val = ord(c)\n if not c in INVALID and val in VALID_RANGE:\n result.append(c)\n else:\n result.append(u\"_\")\n result = u\"\".join(result)\n return result.replace(u\" \", u\"_\")", "def sanitize_unicode(value):\n return re.sub(\"[\\x00-\\x08\\x0B\\x0C\\x0E-\\x1F\\uD800-\\uDFFF\\uFFFE\\uFFFF]\", \"\", value)", "def _strip_ansi(s):\n if isinstance(s, str):\n return _ansi_codes.sub(r\"\\4\", s)\n else: # a bytestring\n return _ansi_codes_bytes.sub(r\"\\4\", s)", "def clean_filename(self, filename):\n return remove(filename,self.unwanted_chars_in_filenames)", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def clean_crlf(fpath):\n sub = path.basename(path.dirname(fpath))\n \n with open(fpath, 'rb') as f:\n raw_content = f.read()\n lfnull_content = raw_content.replace(b'\\r',b'')\n \n outpath = path.join('..','sourcedata','ds3','sub-'+sub,'sub-'+sub+'_task-all_beh.tsv')\n with open(outpath, 'w') as f:\n f.write(lfnull_content.decode(\"utf-8\"))\n\n return(pd.read_csv(outpath, delimiter='\\t'))", "def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii", "def remove_non_ascii(words):\r\n new_words = []\r\n for word in words:\r\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\r\n new_words.append(new_word)\r\n return new_words", "def remove_bom(file):\n bufsize = 4096\n bomlen = len(codecs.BOM_UTF8)\n\n with open(file, \"r+b\") as fp:\n chunk = fp.read(bufsize)\n if chunk.startswith(codecs.BOM_UTF8):\n i = 0\n chunk = chunk[bomlen:]\n while chunk:\n fp.seek(i)\n fp.write(chunk)\n i += len(chunk)\n fp.seek(bomlen, os.SEEK_CUR)\n chunk = fp.read(bufsize)\n fp.seek(-bomlen, os.SEEK_CUR)\n fp.truncate()", "def preprocess_ng_file(self):\n with open(self.ng_name, \"r\") as f:\n no_null_chars = f.read().replace(\"\\0\", \"\")\n\n with open(self.ng_name, \"w\") as f:\n f.write(no_null_chars)", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def remove_unicode(str):\n return unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s", "def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s", "def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))", "def scrub_output(output):\n ansi_escape = re.compile(r'\\x1b[^m]*m')\n return ansi_escape.sub('', output)", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new", "def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line", "def read_file(filename=\"\"):\n with open(filename, encoding=\"UTF-8\") as f:\n for line in f:\n print(line, end='')", "def strip_newlines(file):\n newline_regex = re.compile(os.linesep + \"$\")\n \n strings = [] \n for line in file:\n string = newline_regex.sub(\"\", line)\n strings.append(string)\n\n return strings", "def text_cr(file):\r\n f = open(file, 'r', encoding = 'utf-8-sig')\r\n text = f.read()\r\n f.close()\r\n return text", "def fix_xml_encoding(self, file_path):\n\n with open(file_path + self.infile, 'rb') as original:\n with open(file_path + \"Temp File.txt\", 'wb') as temp:\n [temp.write(row.replace(\"utf-16\", \"utf-8\")) for row in original]\n\n os.remove(file_path + self.infile)\n\n with open(file_path + \"Temp File.txt\", 'rb') as temp:\n with open(file_path + self.infile, 'wb') as new:\n [new.write(row) for row in temp]\n\n os.remove(file_path + \"Temp File.txt\")", "def cleanFilename(filename):\n badChars = {ord('?'): None, ord('*'): None, ord('/'): None,\n ord('\\\\'): None, ord(':'): None, ord('\"'): \"''\",\n ord('<'): None, ord('>'): None, ord('|'): None}\n return filename.translate(badChars)", "def clean(row):\r\n for v in row:\r\n \tv = v.replace(\"\\xef\\xbb\\xbf\",\"\")\r\n return row", "def clean_file(file_contents):\n commentless_file = _strip_comments(file_contents)\n assembly_code = _remove_whitespace(commentless_file)\n return assembly_code", "def strip_raw_codes(self, string):\n return self.ansi_regex.sub(\"\", string)", "def sanitize_filename(f):\n keepchars = (\" \", \".\", \"_\")\n return \"\".join(c for c in f if c.isalnum() or c in keepchars).rstrip()", "def safe(self, string):\n if sys.version_info.major >= 3 and isinstance(string, bytes):\n string = string.decode('utf8')\n elif sys.version_info.major < 3:\n if not isinstance(string, unicode):\n string = unicode(string, encoding='utf8')\n string = string.replace('\\n', '')\n string = string.replace('\\r', '')\n return string", "def removeOwnPunctuation(self):\n\t\tself.textFile = self.removePunctuation(self.open(self.filePath)).split()", "def strip_eps_font(filename):\r\n inf = open(filename)\r\n filecache = []\r\n in_ttf = False\r\n for line in inf:\r\n if \"Bitstream\" in line:\r\n line = line.replace(\"BitstreamVeraSans-Roman\", \"Arial\")\r\n if line.startswith(\"\"\"%%BeginFont\"\"\"):\r\n in_ttf = True\r\n if line.startswith(\"\"\"%%EndFont\"\"\"):\r\n in_ttf = False\r\n continue\r\n if in_ttf:\r\n continue\r\n else:\r\n filecache.append(line)\r\n\r\n inf.close()\r\n ouf = open(filename, \"w+\")\r\n ouf.write(''.join(filecache))\r\n ouf.close()", "def test_file_utf8_write_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(self.unicode_string, unicode_text)", "def sexyStrip(dataFile):\n correct = open('correct.txt', 'w+')\n for line in dataFile:\n stripLines = line.rstrip()\n fixedLines = stripLines + \"\\n\"\n correct.write(fixedLines)\n correct.close()", "def processFilename(filename):\n\n badchars = [\" \", \",\", \"+\", \"$\", \"_\", \"{\", \"}\", \"/\", \"&\"]\n fn = filename\n for bc in badchars:\n fn = fn.replace(bc, \"\")\n return fn", "def force_ascii(text):\n return \"\".join([c for c in text if ord(c) < 128])", "def stripchar(self):\n self.error = 0\n regexobj = re.compile(self.char_to_strip)\n\n def process_file(in_file, out_file):\n reader = csv.reader(in_file)\n writer = csv.writer(out_file, reader.dialect)\n char_strip_count = 0\n curr_line_number = 0\n line_changed = []\n\n for line in reader:\n curr_line_number += 1\n temp = []\n #alt_line = [[new row], replacement count]]\n line_alt_count = 0\n for item in line:\n new_item, count_temp = regexobj.subn(self.char_for_replace, item)\n temp.append(new_item)\n line_alt_count += count_temp\n if line_alt_count:\n self.error = 1\n line_changed.append(curr_line_number)\n char_strip_count += line_alt_count\n #keep only one line in memory\n writer.writerow(temp)\n self.print_to_log(\n \"\"\"Processed file: \\\"%s\\\", replaced %s characters on %s lines \\r\\nAltered Lines: %s\"\"\"\n % (str(out_file.name), str(char_strip_count), str(len(line_changed)), str(line_changed)))\n\n for f in self.file_list:\n try:\n shutil.copyfile(f, f + '.backup')\n in_file = open(f + '.backup', 'rU')\n out_file = open(f, 'wb')\n process_file(in_file, out_file)\n in_file.close()\n out_file.close()\n os.remove(f + '.backup')\n except OSError:\n self.print_to_log('Can not make backup of file: %s' % f)\n self.error = 1\n except IOError:\n self.print_to_log('Can not open backup file or write to new file: %s' % f)\n self.error = 1\n except:\n self.print_to_log('Total Failure on file %s' % f)\n self.error = 1", "def cleanupCharEncodingHandlers():\n libxml2mod.xmlCleanupCharEncodingHandlers()", "def normalize_unicode_data(data):\n normalized_data = unicodedata.normalize('NFKD', data).encode('ascii', 'ignore')\n return normalized_data", "def strip_ansi(text: str):\n return _ANSI_SEQUENCE_REGEX.sub('', text)", "def remove_unicode_diac(text):\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text", "def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text", "def normalize_file(in_file, out_file):\n with open(in_file, \"r\") as book, open(out_file, \"w\") as out:\n for line in book:\n if line.strip() == \"\":\n continue\n\n line = normalize_text(line)\n\n out.write(line.lstrip())", "def __raw(self, lines):\n os.sys.stdout.write(u\"\\n\".join(lines).encode('utf-8', 'ignore'))\n os.sys.stdout.flush()", "def stripEscapes(s):\r\n result = ''\r\n show = 1\r\n i = 0\r\n L = len(s)\r\n while i < L:\r\n if show == 0 and s[i] in ANSI_TERMINATORS:\r\n show = 1\r\n elif show:\r\n n = s.find(ANSI_ESCAPE_BEGIN, i)\r\n if n == -1:\r\n return result + s[i:]\r\n else:\r\n result = result + s[i:n]\r\n i = n\r\n show = 0\r\n i += 1\r\n return result", "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "def sanitize(instring):\r\n return instring.encode('ascii','replace')", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def test_common_non_ascii_positive(tmp_path):\n d = tmp_path\n some_file = d / \"test_01.txt\"\n some_file.write_text(\"\\\\u00fc! , : \\\\u00f6 \\\\u00f6 asdf\")\n assert get_most_common_non_ascii_char(str(some_file)) == \"\\u00f6\"", "def strip_file(f,leave_header=True):\n thefile = open(f)\n if leave_header : n=1\n else : n = 2\n r = map(str.strip,thefile.readlines())\n thefile.close()\n try :\n r = r[r.index(\"\")+n:]\n except :\n print( \"Incorrect headers in %s\" % f)\n \n return(r)" ]
[ "0.73079276", "0.68050337", "0.638918", "0.6264593", "0.6186959", "0.61030376", "0.6031194", "0.59303033", "0.5906998", "0.5861738", "0.58548003", "0.5850855", "0.58266735", "0.58138204", "0.5802653", "0.5782039", "0.57783765", "0.572407", "0.57045716", "0.5688748", "0.56876314", "0.56560564", "0.5633553", "0.56285113", "0.5614045", "0.5593968", "0.55579364", "0.5544863", "0.55385524", "0.55385524", "0.55366766", "0.5520549", "0.55072", "0.5492951", "0.54894173", "0.54840153", "0.54674625", "0.5446879", "0.5445589", "0.543517", "0.5429703", "0.5416508", "0.541636", "0.5411577", "0.5405157", "0.53977644", "0.5392779", "0.5384861", "0.53799725", "0.53799725", "0.53799725", "0.53799725", "0.53799725", "0.53799725", "0.5365892", "0.53574157", "0.5353021", "0.53411305", "0.5332347", "0.532797", "0.53242105", "0.532236", "0.53126365", "0.53126365", "0.53020287", "0.5289385", "0.5280405", "0.5280224", "0.5280129", "0.52762085", "0.5272198", "0.52714676", "0.52460676", "0.52283496", "0.52210075", "0.521991", "0.5215458", "0.52138174", "0.5210064", "0.5205268", "0.5196647", "0.519369", "0.51898026", "0.51856107", "0.5177536", "0.5176958", "0.5170105", "0.5162697", "0.51502895", "0.5149365", "0.5144304", "0.51438713", "0.51421124", "0.51347053", "0.51216996", "0.5115537", "0.5111698", "0.51033497", "0.5095914", "0.5095076" ]
0.6622482
2
get the report template
def _report_template(): current_dir = Path(__file__).parent with open(current_dir / "report_template.html", "r") as f: template = f.read() template = re.sub(r"\s{2,}", " ", template) template = re.sub(r"\n", "", template) template = re.sub(r"> <", "><", template) return template
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_template(self):\n try:\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_code = Path(f'{template_path}/{self._get_template_filename()}').read_text()\n # substitute template parts\n template_code = self._substitute_template_parts(template_code)\n except Exception as err: # noqa: B902; just logging\n current_app.logger.error(err)\n raise err\n return template_code", "def get_template(self):\n return self.template", "def _get_template_filename(self):\n file_name = ReportMeta.reports[self._report_key]['fileName']\n return '{}.html'.format(file_name)", "def get_template(self):\n template_string = self.remgr.render_template(self)\n return self.provider.format_template(template_string)", "def template(self):\n return self._template", "def template(self):\n return self._template", "def template(self):\n return self._template", "def get_template(self):\n if self.get_website:\n return self.get_website.get_template()\n else:\n return default_entity.get_website.get_template()", "def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)", "def template(self):\n return self.conf.get(\"template\", None)", "def GetTemplate(self, _page_data):\n return self.template", "def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)", "def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None", "def get_template(self, templateType, blogid=1):\n return self.execute(\"metaWeblog.getTemplate\", self.appkey, blogid, self.username, self.password, templateType)", "def get_html_report(self) -> str:\n template_contents = dict(\n vendor_bundle_js=self.vendor_bundle,\n app_bundle_js=self.app_bundle,\n # results\n results=self.results,\n # account metadata\n account_id=self.account_id,\n account_name=self.account_name,\n report_generated_time=str(self.report_generated_time),\n cloudsplaining_version=__version__,\n )\n template_path = os.path.dirname(__file__)\n env = Environment(loader=FileSystemLoader(template_path)) # nosec\n template = env.get_template(\"template.html\")\n return template.render(t=template_contents)", "def template(self):\n with open(self.compute.submission_template, \"r\") as f:\n return f.read()", "def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]", "def get_template(self):\n model = self.get_object()\n template_name = self.model_template_name or 'template'\n try:\n template_string = getattr(model, template_name)\n except AttributeError as e:\n raise ImproperlyConfigured(\n \"%(model)s is missing a template. Define \"\n \"%(model)s.template, %(cls)s.model_template_name \"\n \"or override %(cls)s.get_template().\" % {\n 'model': model.__class__.__name__,\n 'cls': self.__class__.__name__\n }\n )\n return template_string", "def get_notification_template(self):\n if self.db_config_file.key_exists(\"notification_template_file\"):\n filename = self.db_config_file_value(\"notification_template_file\").strip('\"')\n return open(filename, 'rt').read()\n\n return get_data(\"asebackupcli\", \"notification.json\")", "def _GetTemplate(self):\n# First read default template.\n tmplt = self._LoadTemplate(c.preproc_template_default)\n tmplt['proc'] = self.topdir\n self.template_type = 'default'\n\n self.templates = []\n if self.template_file is not None:\n tmplt.update(self._LoadTemplate(self.template_file))\n self.template_type = 'command-line'\n self.templates.append(os.path.abspath(self.template_file))\n found_template = True\n else:\n# Find a study specific template file.\n study_template_file = self._FindTemplateFile('%s/..' % self.topdir)\n if study_template_file is not None:\n# Merge study template into default, study template has precedence.\n if self.verbose:\n print \"Using study template at \" + study_template_file\n tmplt.update(self._LoadTemplate(study_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(study_template_file))\n found_template = True\n else:\n found_template = False\n# Now look for a subject-specific template file.\n subject_template_file = self._FindTemplateFile('%s' % self.topdir)\n if subject_template_file is not None:\n# Merge subject template, subject template has precedence.\n if self.verbose:\n print \"Using subject-specific template at %s\" % \\\n subject_template_file\n tmplt.update(self._LoadTemplate(subject_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(subject_template_file))\n found_template = True\n\n if not found_template:\n raise RuntimeError('Could not find template file.')\n\n if tmplt.get('subject','same') == 'same':\n# Default subdirectory is same as data directory.\n tmplt['subject'] = self.topdir.split('/')[-1]\n else:\n if not isinstance(tmplt['subject'],str):\n errstr = 'preprocess: Invalid subject number. Be sure to ' + \\\n 'enclose the subject number item with double quotes.'\n raise RuntimeError(errstr)\n\n# Keys that apply to all EPIs.\n self.fsl_flip = tmplt.get('fsl_flip', False)\n if self.fsl_flip:\n self.flip_opts = '-LT'\n else:\n self.flip_opts = ''\n\n# Replace strings with python types.\n for key in tmplt.keys():\n if tmplt[key] == 'None':\n tmplt[key] = None\n elif key == 'True':\n tmplt[key] = True\n elif key == 'False':\n tmplt[key] = False\n return tmplt", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def getTemplate():\n\n with open('/home/sevudan/Scripts/projects/topogen/template.cfg', 'r') as file:\n data = file.read()\n file.close()\n return Template(data)", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def template(self) -> 'outputs.PipelineTemplateResponse':\n return pulumi.get(self, \"template\")", "def get_template(self):\n endpoint = \"/isam/wga_templates/dynurl_template\"\n response = self.client.get_json(endpoint)\n response.success = response.status_code == 200\n return response", "def get_string(self):\n self._populate_output()\n string = self._jinja_template.render(\n outp=self._outp, config=self._report_generator.config)\n return string", "def html_template_file(self):\n pass", "def get_template(self, format):\n for pattern, converter in self._patterns:\n if converter.format == format:\n template = pattern.generate('{name}')\n if template:\n return template\n return '{name}' f'.{format}'", "def _get_template(self, template_name):\n if template_name not in self.chached_templates:\n self.chached_templates[template_name] = self.env.get_template(template_name)\n return self.chached_templates[template_name]", "def get_report(self) -> str:\n return self.diagnostics.get_report()", "def _setup_report_data(self):\n # current_app.logger.debug('Setup report data template starting.')\n template = self._get_template()\n current_app.logger.debug('Setup report data template completed, setup data starting.')\n data = {\n 'reportName': self._get_report_filename(),\n 'template': template,\n 'templateVars': self._get_template_data()\n }\n current_app.logger.debug('Setup report data completed.')\n return data", "def get_template(template_file=None, default_template=None):\n if template_file is None:\n template_file = os.path.join(os.path.dirname(__file__), default_template)\n with open(template_file) as f0:\n job_template = Template(f0.read())\n return job_template", "def generate_report(template_filename, report_title, report_dir):\n\n def inner(output_dir: Optional[str] = None):\n output_dir = output_dir or report_dir\n with open(template_filename) as fd:\n template = jinja2.Template(fd.read())\n\n template.globals.update(\n {\"date\": str(datetime.datetime.now()), \"lettergen\": lettergen, \"zip\": zip}\n )\n\n headers = iterfiles(output_dir, \"head.\")\n results = iterfiles(output_dir, \"result.\")\n stream = template.stream(headers=headers, results=results, project=report_title)\n artifact = os.path.join(output_dir, \"index.html\")\n stream.dump(artifact)\n logging.info(f\"Created report: {artifact}\")\n\n return inner", "def get_template(self):\n if not self.cache:\n self.close()\n return self.cache", "def _get_template(specified_template, default_template):\n template_file_path = specified_template\n if template_file_path:\n if not (os.path.exists(template_file_path) and os.path.isfile(template_file_path)):\n LOG.error(u\"Template file: %s doesn't exist, using default template\",\n template_file_path)\n template_file_path = None\n\n if not template_file_path:\n # using default template\n template_file_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n default_template\n )\n\n LOG.debug(u\"template file used: %s\", template_file_path)\n with open(template_file_path, \"r\") as definition:\n return definition.read()", "def _get_report(self, entry):\n script = entry.get('@fields').get('script_name', '')\n message = entry.get('@message').encode('utf8')\n error = entry.get('@context').get('error', 'n/a').encode('utf8')\n\n # extract SQL from the error\n (error, sql) = self.extract_error_and_sql(error)\n\n description = self.REPORT_TEMPLATE.format(\n full_message=message,\n error=error,\n sql=sql,\n details=json.dumps(entry, indent=True)\n ).strip()\n\n report = Report(\n summary='{} - {}'.format(script, message),\n description=description,\n label=self.REPORT_LABEL\n )\n\n return report", "def cheetah_template(self, pre=False):\n if self.is_req_output:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_output:\n xml_out = self.xml_out\n xml_out['out_sel_name'] = self.out_sel_name\n cht_tmpl = self.file_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and not pre:\n if self.pname in self.gen_in_fmt:\n if self.gen_in_fmt[self.pname] == 'vcf,vcf_bgzip':\n cht_tmpl = self.vcf_choose\n else:\n cht_tmpl = PercentTemplate(self.reg_arg)\n elif self.pname in self.tool_data[self.tool_name]['input_fmt']:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and pre:\n cht_tmpl = self.vcf_tabix\n return cht_tmpl.substitute(self.xml_out)\n else:\n if self.xml_out['section'] not in ['required']:\n template_string = self.ext_arg\n else:\n template_string = self.reg_arg\n if self.xml_out['type'] == 'boolean':\n cht_tmpl = PercentTemplate(template_string.replace('%argument ', ''))\n else:\n cht_tmpl = PercentTemplate(template_string)\n return cht_tmpl.substitute(self.xml_out)", "def get_template_path(self):\n raise NotImplementedError()", "def read_template():\n\n text_msg = \"\"\"${PERSON_NAME} - Calling Campaign Summary - ${DATE}:\\n\n Total Called = ${TOTAL_CALLED}\\n\n Answered = ${ANSWERED}\\n\n Not Answered = ${NOT_ANSWERED}\\n\n Declines = ${DECLINES}\\n\n Remaining = ${REMAINING}\\n\n \\n\n Thank You.\"\"\"\n\n return Template(text_msg)", "def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data", "def get_base_template(self):\n\n return self._original", "def template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"template_name\")", "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def get_template(self, name):\n with open(name, 'r+') as open_f:\n template_content = open_f.read()\n return template_content", "def _get_mail_template(request, issue, full_diff=False):\n context = {}\n template = 'mails/comment.txt'\n if request.user == issue.owner:\n query = models.Message.query(\n models.Message.sender == request.user.email(), ancestor=issue.key)\n if query.count(1) == 0:\n template = 'mails/review.txt'\n files, patch = _get_affected_files(issue, full_diff)\n context.update({'files': files, 'patch': patch, 'base': issue.base})\n return template, context", "def get_configuration_template(self):\n return CONFIG_TEMPLATE", "def template_data(self) -> Any:\n return pulumi.get(self, \"template_data\")", "def get_template(rootdir = None):\n if rootdir is None:\n rootdir = os.path.dirname(os.path.abspath(__file__))\n f = open(os.path.join(rootdir, 'st_index_pyt.html'), 'r')\n index_template = string.Template(f.read())\n f.close()\n return index_template", "def template_data(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"template_data\")", "def get(self):\n return self._template_data()", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def create_report(self, output):\n if output == 'xml':\n report = super(Report, self).create_report()\n return report\n elif output == 'csv':\n return self.statement_detail_csv()", "def get_template(self, name):\n return self.templates.get(name)", "def get_wrapper_template():\n\n return wrapper_templates.template_collection", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def generate_report(self):\n if self.submission_metadata:\n return self._submission_allowed()[1]", "def template_path(self):\n return self.get_config(\"templates\")", "def __generate_flowcell_report_text__(self,config,mockdb,report_type=\"subset_report\"):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n dictionary.update({k:str(v)})\n pdf_report = initialize_standard_doc(self.report_pdf)\n pdf_elements = []\n outlier_table = produce_outlier_table(config,mockdb,self.current_report)\n if outlier_table is None:\n template_subject = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_subject'))\n template_body = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_no_outliers_body'))\n else:\n outlier_table += \"\\n\"\n outlier_table_for_pdf(config,mockdb,pdf_elements,self.current_report)\n template_subject = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_subject'))\n template_body = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_body'))\n image_files = []\n image_files.append(self.concordance_jpeg)\n image_files.append(self.hethomratio_jpeg)\n image_files.append(self.dbsnp_jpeg)\n image_files.append(self.greater_than_10x_jpeg)\n image_files.append(self.zero_coverage_jpeg)\n pdf_elements.extend(add_square_images(image_files))\n pdf_report.build(pdf_elements)\n sample_keys = self.sample_keys.split(\";\")\n number_samples = len(sample_keys)\n dictionary.update({'number_samples': str(number_samples)})\n subject = fill_template(template_subject,dictionary)\n body = fill_template(template_body,dictionary)\n return subject, body", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def get_config_template(self) -> cconfig.Config:", "def get_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'date_start': self.date_start,\n 'date_end': self.date_end,\n },\n }\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `_get_report_values()` and pass `data` automatically.\n return self.env.ref('base_enh.recap_report').report_action(self, data=data)", "def gen_html_report(self, html_report_name=None, html_report_template=None):\n return render_html_report(\n self.summary,\n html_report_name,\n html_report_template\n )", "def getreport(planid, stamp):\n try:\n plan = Plan.objects.get(pk=planid)\n except:\n return 'error'\n\n filename = '%s_p%d_v%d_%s' % (plan.owner.username, plan.id,\n plan.version, stamp)\n\n return '/reports/%s.html' % filename", "def get_template_object(self):\n return Engine().from_string(self.get_template())", "def create_template(self):\n return '{}/{}.html'.format(self.object_name, self.create_endpoint)", "def _get_template(self, template_name, template_file):\n template = os.path.join(self.location, 'templates',\n template_name, template_file)\n return jinja2.Template(open(template).read())", "def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )", "def read_template(self):\n template_file = open(self._template)\n template = template_file.readlines()\n template_file.close()\n return template", "def generate_report(cls, report_tmpl, ctx=None, objects=[], rep_exec_type=None,\n report_format=None, view=False, tmpl_file_id=None, dlg_args={}):\n # get the report template and source\n if isinstance(report_tmpl, basestring):\n tmpl = XMLReportTemplate.KeywordQuery(cdb_object_id=report_tmpl)\n else:\n tmpl = XMLReportTemplate.KeywordQuery(name=report_tmpl[\"name\"],\n iso_code=report_tmpl[\"iso_code\"],\n report_title=report_tmpl[\"report_title\"])\n if not tmpl:\n raise ue.Exception(\"powerreports_tmpl_not_found_for_keys\", (\"%s\" % report_tmpl))\n tmpl = tmpl[0]\n source = tmpl.XMLSource\n if not source:\n raise RuntimeError(\"powerreports_source_not_found_for_tmpl\", tmpl.GetDescription())\n\n # check if we have a supported Excel file\n try:\n cdbfile = None\n if tmpl_file_id is not None:\n cdbfile = CDB_File.ByKeys(cdb_object_id=tmpl_file_id)\n cdbfile = filter(lambda f: os.path.splitext(f.cdbf_name)[1].lower()\n in SUPPORTED_FILETYPES, [cdbfile])[0]\n else:\n xls_list = filter(lambda f: os.path.splitext(f.cdbf_name)[1].lower()\n in SUPPORTED_FILETYPES, tmpl.Files)\n cdbfile = (filter(lambda f: int(f.cdbf_primary) == 1, xls_list) or xls_list)[0]\n except Exception:\n if cdbfile is not None:\n raise ue.Exception(\"powerreports_tmpl_file_type_not_supported\", cdbfile.cdbf_name)\n else:\n oid = \"\" if not tmpl_file_id else (\" (OID=%s)\" % tmpl_file_id)\n raise ue.Exception(\"powerreports_primary_tmpl_file_not_found\", oid)\n\n # setup args: additional parameters defined by report specific dialogs\n # are passed as kwargs to the XMLSource and the assigned data providers\n report = tmpl.XMLReport\n args = report.getParameters() # public and user specific defaults\n\n # Get report type and format from report config when not given via method args (E046596)\n if not rep_exec_type:\n rep_exec_type = report.cdbxml_rep_exec_type\n if not report_format:\n report_format = report.cdbxml_report_format\n\n if ctx is not None:\n for attr in ctx.dialog.get_attribute_names():\n # skip if standard dlg attribute\n if attr in [\"cdbxml_source_name\",\n \"cdbxml_report_title\",\n \"cdbxml_report_tmpl_title\",\n \"cdbxml_context\",\n \"cdbxml_card\"]:\n # we need following values in 'MakeReportURL'\n # \"cdbxml_report_lang\",\n # \"cdbxml_rep_exec_type\",\n # \"cdbxml_report_format\"\n continue\n if (not source.context) or (attr not in cls.KeyNames()):\n args[attr] = ctx.dialog[attr]\n # overwrite with optional parameter\n args.update(dlg_args)\n\n # get context object(s) from ctx if required\n if ctx is not None and source.context and not objects:\n objects = cls.PersistentObjectsFromContext(ctx)\n\n # build report filename(s)\n timestamp = time.strftime(\"%a-%d-%b-%Y-%H-%M-%S\", time.localtime(time.time()))\n fbasename = \"%s_%s_%s%s\" % (tmpl.title,\n timestamp,\n auth.persno,\n os.path.splitext(cdbfile.cdbf_name)[1])\n fbasename = re.sub(\":|\\\\\\|/|<|>|:|\\*|\\?|\\\"|\\|\", \"\", fbasename)\n fname = os.path.join(CADDOK.TMPDIR, fbasename)\n\n # perform actions\n if rep_exec_type == 'Client':\n try:\n cdbfile.checkout_file(fname)\n except Exception as ex:\n raise ue.Exception(\"powerreports_tmpl_file_not_loaded\",\n cdbfile.cdbf_name, unicode(ex))\n\n try:\n if report_format.startswith('Excel'):\n # copy report template file and set for viewing\n ctx.file(fname)\n # can the user update the report later on?\n updateable = \"0\" if (source.context and source.ContextProvider and\n (source.ContextProvider.card() == N)) else \"1\"\n # create the export file(s) and upload it to the client\n result_fname = source.export_ex(objects, fname, tmpl.cdb_object_id,\n cdbfile.cdb_object_id, updateable, **args)\n xml_clntfname = os.path.join(client.viewDir, os.path.basename(result_fname))\n ctx.upload_to_client(result_fname, xml_clntfname, delete_file_after_upload=1)\n return result_fname\n except Exception as ex:\n if os.path.exists(fname):\n os.remove(fname)\n misc.log_traceback(\"\")\n raise ex\n\n elif rep_exec_type.startswith('Server'):\n from cs.tools.powerreports.reportserver.report_client \\\n import ReportClientMQ, ReportClientRemote\n SUPPORTED_RESULT_FORMATS = [\"xls\", \"pdf\"]\n # extend args\n sys_args = {}\n oids = [obj.ID() for obj in objects]\n sys_args[\"objects\"] = oids\n sys_args[\"persno\"] = auth.persno\n sys_args[\"target\"] = os.path.splitext(fbasename)[0]\n sys_args[\"source\"] = source.ID()\n sys_args[\"rep_exec_type\"] = rep_exec_type\n sys_args[\"report_format\"] = report_format\n sys_args[\"rep_lang\"] = (ctx.ue_args.lang if (ctx and hasattr(ctx, \"ue_args\")\n and hasattr(ctx.ue_args, \"lang\"))\n else tmpl.iso_code)\n args[\"__sys_args__\"] = sys_args\n\n if rep_exec_type == 'Server (asynchron)':\n # send it queue-based as mail\n rc = ReportClientMQ(tmpl.cdb_object_id, cdbfile.cdb_object_id, **args)\n rc.create_report()\n\n elif rep_exec_type == 'Server (synchron)':\n ret = {\"status\": \"\",\n \"xls\": None,\n \"pdf\": None}\n try:\n rc = ReportClientRemote(tmpl.cdb_object_id, cdbfile.cdb_object_id, **args)\n ret = rc.create_report(target_path=CADDOK.TMPDIR)\n\n if ret[\"status\"] == \"OK\":\n files = [ret[frmt] for frmt in SUPPORTED_RESULT_FORMATS\n if ret[frmt] and os.path.exists(ret[frmt])]\n if not view:\n return files\n\n if report_format == \"Excel\":\n ctx.file(view_filename=ret[\"xls\"], view_extern=1)\n\n elif report_format == \"PDF\":\n ctx.file(view_filename=ret[\"pdf\"], view_extern=1)\n\n elif report_format == \"E-Link\":\n object_id = None\n language = None\n\n # Clean up the path, which we get from CDB Client and\n # make it valid for this platform, in order to avoid\n # mispelled paths (later on) like this one:\n # \"file:///C:\\cdb_view\\/report.pdf\".\n # Might happen when CDBSRV is running on Un*x.\n view_dir = os.path.normpath(client.viewDir.replace('\\\\', os.path.sep))\n\n for f in files:\n client_file = os.path.join(view_dir, os.path.basename(f))\n ctx.upload_to_client(f, client_file, delete_file_after_upload=1)\n excel_file = os.path.join(view_dir, os.path.basename(ret[\"xls\"]))\n pdf_file = os.path.join(view_dir, os.path.basename(ret[\"pdf\"]))\n report_title = urllib.quote(ctx.dialog.cdbxml_report_tmpl_title)\n\n if len(sys_args[\"objects\"]) == 1:\n object_id = sys_args[\"objects\"][0]\n from cdb import objects\n obj = objects.ByID(object_id)\n\n if sys_args[\"rep_lang\"]:\n language = sys_args[\"rep_lang\"]\n\n form_data = {\"filename\": pdf_file,\n \"title\": report_title,\n \"excel\": excel_file,\n \"lang\": language,\n \"subreport\": ctx.ue_args.subreport,\n \"exectype\": rep_exec_type,\n \"format\": report_format\n }\n if object_id:\n form_data[\"oid\"] = object_id\n\n from cs.tools.powerreports.reportElink import renderHtml\n htmlname = \"%s.html\" % os.path.basename(client_file)\n htmlpath = renderHtml(htmlname, **form_data)\n if os.path.exists(htmlpath):\n html_file = os.path.join(view_dir, os.path.basename(htmlpath))\n ctx.upload_to_client(htmlpath, html_file,\n delete_file_after_upload=1)\n ctx.keep(\"elink_client_html\", html_file)\n else:\n ctx.keep(\"elink_client_html\", client_file)\n else:\n raise ue.Exception(\"powerreports_server_error\", ret[\"status\"])\n except Exception as ex:\n for frmt in SUPPORTED_RESULT_FORMATS:\n if ret[frmt] and os.path.exists(ret[frmt]):\n os.remove(ret[frmt])\n misc.log_traceback(\"\")\n raise ex", "def test_get_activity_template(self):\n pass", "def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path", "def name(self):\n return 'Report'", "def _get_template(self, tgt):\n with open(tgt, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n self.template = Template(template_file_content)\n return", "def get_template(self, name, args):\n key = name, len(args)\n template = self.templates.get(key)\n if not template:\n raise mio.MIOException('Undefined template \"%s/%d\"' % (name, len(args)))\n return template", "def get_templates(self, template_name, **kwargs):\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return text", "def get_template():\r\n try:\r\n return CourseEmailTemplate.objects.get()\r\n except CourseEmailTemplate.DoesNotExist:\r\n log.exception(\"Attempting to fetch a non-existent course email template\")\r\n raise", "def storage_get_report_file(self, report_pk):\n return self._get_queryset(pk=report_pk).get()", "def _template_file_default(self):\n return \"index\"", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def get_template(type):\n # read model options file from Cloud Storage\n content = storage.read_file('templates/' + type + '.yaml')\n return Response(content, status=200, mimetype='application/text')", "def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))", "def display(reports, template=\"display.html\"):\n with open(template, 'rU') as f:\n t = Template(f.read())\n c = Context(reports)\n return t.render(c)", "def T(request):\n\treturn all_templates[request.param]", "def get_contest_template_file(gt_id, horizon):\n return os.path.join(\"data\", \"fcstrodeo_nctemplates\",\n get_contest_id(gt_id, horizon)+\"_template.nc\")", "def template_path(self) -> str:\n return self._values.get(\"template_path\")", "def get_reports(self):\n return ['auditree/compliance_config.md']", "def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')", "def _generate_report(self):\n raise NotImplementedError", "def get_templates(self, template_name, **kwargs):\n html = render_template(\"{template}.html\".format(template=template_name), **kwargs)\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return html, text", "def template_body(self) -> str:\n return pulumi.get(self, \"template_body\")", "def template_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"template_id\")", "def _get_template():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/attributes/instance-template',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+instanceTemplates/(.+)', r'\\1', r.text)\n else:\n return ''", "def get_template_name(self):\n if self.template_name:\n return '%s' % self.template_name\n\n if self.template_name_prefix:\n return '%s%s.html' % (self.template_name_prefix, self.mode)\n\n for piece_name in reversed(list(self.pieces.keys())):\n piece = getattr(self, piece_name)\n result = piece.get_template_name()\n if result:\n return '%s.html' % result\n\n return None", "def template(self) -> 'outputs.GoogleCloudRunV2ExecutionTemplateResponse':\n return pulumi.get(self, \"template\")", "def _get_report_filename(self):\n report_date = self._get_report_date()\n report_id = self._get_report_id()\n description = ReportMeta.reports[self._report_key]['reportDescription']\n return '{}_{}_{}.pdf'.format(report_id, report_date, description).replace(' ', '_')", "def template():\n return ENVIVIRTUALIZABLEURI('DEFile')", "def test_get_tosca_template(self):\n pass", "def htm(self):\n if self._htm is None:\n try:\n htm_dict = get_report(\n self.idfname,\n self.simulation_dir,\n output_report=\"htm\",\n output_prefix=self.output_prefix,\n )\n except FileNotFoundError:\n return self.simulate().htm()\n else:\n self._htm = htm_dict\n return self._htm" ]
[ "0.76926655", "0.73982304", "0.7120592", "0.70143956", "0.6962399", "0.6962399", "0.6962399", "0.6943154", "0.68456966", "0.6840729", "0.6796202", "0.6777228", "0.67390454", "0.6727346", "0.6701621", "0.6578594", "0.6480725", "0.6409205", "0.6401734", "0.63737816", "0.6356212", "0.63415956", "0.6317046", "0.6300544", "0.6172457", "0.6171188", "0.61453533", "0.6137075", "0.61318797", "0.61235565", "0.6107943", "0.6104046", "0.6102041", "0.6097684", "0.60867256", "0.60817444", "0.6071807", "0.6049427", "0.6042745", "0.60277337", "0.60170203", "0.60154146", "0.60132974", "0.60085964", "0.5989034", "0.5982482", "0.5980017", "0.5978744", "0.59617233", "0.5947741", "0.5946892", "0.59457386", "0.5935225", "0.5926774", "0.5906132", "0.590594", "0.590594", "0.5878556", "0.5877992", "0.58775485", "0.5874067", "0.58684415", "0.58527994", "0.58480144", "0.5838941", "0.5837668", "0.5825627", "0.5823246", "0.58202535", "0.5816055", "0.5806429", "0.57976735", "0.57926124", "0.5778977", "0.57738954", "0.57710373", "0.576361", "0.57590085", "0.57451004", "0.57342064", "0.5730588", "0.57103896", "0.5705098", "0.57023233", "0.56938636", "0.56916445", "0.569129", "0.5689696", "0.5688686", "0.56886315", "0.5687308", "0.56869704", "0.56734085", "0.5671828", "0.56700426", "0.5663336", "0.5646165", "0.5635607", "0.563507", "0.56346565" ]
0.74037963
1
Render exception_data as an html report
def render_exception_html(exception_data, report_template=None): report_template = report_template or _report_template() jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"]) exception_data["repr"] = repr return jinja_env.from_string(report_template).render(exception_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_exception(self, exception_trace=''):\n txt = [80 * '*', '\\n', BANNER, '\\n', 80 * '*', '\\n', '\\n', '\\n']\n txt.extend(exception_trace)\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n return as_bytes(txt)", "def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):\n exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)\n if data_processor:\n exception_data = data_processor(exception_data)\n\n if output_format == \"html\":\n text = render_exception_html(exception_data)\n elif output_format == \"json\":\n text = render_exception_json(exception_data)\n else:\n raise TypeError(\"Exception report format not correctly specified\")\n\n filename = gen_error_filename(extension=output_format)\n\n report_location = storage_backend.write(filename, text)\n\n return report_location", "def renderHTTP_exception(request, failure):", "def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)", "def get_traceback_html(self):\n\n if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):\n self.template_does_not_exist = True\n self.loader_debug_info = []\n for loader in template_source_loaders:\n try:\n module = import_module(loader.__module__)\n if hasattr(loader, '__class__'):\n source_list_func = loader.get_template_sources\n else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4\n source_list_func = module.get_template_sources\n # NOTE: This assumes exc_value is the name of the template that\n # the loader attempted to load.\n template_list = [{'name': t, 'exists': os.path.exists(t)} \\\n for t in source_list_func(str(self.exc_value))]\n except (ImportError, AttributeError):\n template_list = []\n if hasattr(loader, '__class__'):\n loader_name = loader.__module__ + '.' + loader.__class__.__name__\n else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4\n loader_name = loader.__module__ + '.' + loader.__name__\n self.loader_debug_info.append({\n 'loader': loader_name,\n 'templates': template_list,\n })\n if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and\n isinstance(self.exc_value, TemplateSyntaxError)):\n self.get_template_exception_info()\n\n frames = self.get_traceback_frames()\n for i, frame in enumerate(frames):\n if 'vars' in frame:\n frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]\n frames[i] = frame\n\n unicode_hint = ''\n if self.exc_type and issubclass(self.exc_type, UnicodeError):\n start = getattr(self.exc_value, 'start', None)\n end = getattr(self.exc_value, 'end', None)\n if start is not None and end is not None:\n unicode_str = self.exc_value.args[1]\n unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')\n t = get_template(\"500_metanas.html\")\n #t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')\n c = Context({\n 'is_email': self.is_email,\n 'unicode_hint': unicode_hint,\n 'frames': frames,\n 'request': self.request,\n 'settings': debug.get_safe_settings(),\n 'sys_executable': sys.executable,\n 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],\n 'server_time': datetime.datetime.now(),\n 'sw_version': get_sw_version(),\n 'sys_path': sys.path,\n 'template_info': self.template_info,\n 'template_does_not_exist': self.template_does_not_exist,\n 'loader_debug_info': self.loader_debug_info,\n })\n # Check whether exception info is available\n if self.exc_type:\n c['exception_type'] = self.exc_type.__name__\n if self.exc_value:\n c['exception_value'] = smart_unicode(self.exc_value, errors='replace')\n if frames:\n c['lastframe'] = frames[-1]\n return t.render(c)", "def render_exception_json(exception_data):\n return json.dumps(exception_data, default=_json_serializer)", "def get_processor_exception_html(exception):\r\n\r\n payment_support_email = settings.PAYMENT_SUPPORT_EMAIL\r\n if isinstance(exception, CCProcessorDataException):\r\n msg = dedent(_(\r\n \"\"\"\r\n <p class=\"error_msg\">\r\n Sorry! Our payment processor sent us back a payment confirmation that had inconsistent data!\r\n We apologize that we cannot verify whether the charge went through and take further action on your order.\r\n The specific error message is: <span class=\"exception_msg\">{msg}</span>.\r\n Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}.\r\n </p>\r\n \"\"\".format(msg=exception.message, email=payment_support_email)))\r\n return msg\r\n elif isinstance(exception, CCProcessorWrongAmountException):\r\n msg = dedent(_(\r\n \"\"\"\r\n <p class=\"error_msg\">\r\n Sorry! Due to an error your purchase was charged for a different amount than the order total!\r\n The specific error message is: <span class=\"exception_msg\">{msg}</span>.\r\n Your credit card has probably been charged. Contact us with payment-specific questions at {email}.\r\n </p>\r\n \"\"\".format(msg=exception.message, email=payment_support_email)))\r\n return msg\r\n elif isinstance(exception, CCProcessorSignatureException):\r\n msg = dedent(_(\r\n \"\"\"\r\n <p class=\"error_msg\">\r\n Sorry! Our payment processor sent us back a corrupted message regarding your charge, so we are\r\n unable to validate that the message actually came from the payment processor.\r\n The specific error message is: <span class=\"exception_msg\">{msg}</span>.\r\n We apologize that we cannot verify whether the charge went through and take further action on your order.\r\n Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}.\r\n </p>\r\n \"\"\".format(msg=exception.message, email=payment_support_email)))\r\n return msg\r\n\r\n # fallthrough case, which basically never happens\r\n return '<p class=\"error_msg\">EXCEPTION!</p>'", "def error(self, environ, start_response):\n \n \"Generate an error report\"\n status = '200 Handle error'\n headers = [('Content-type','text/html')]\n start_response(status, headers)\n trace = traceback.extract_tb(sys.exc_traceback)\n return ['Error<br />[Exception] <i><q>%s</q></i> <br /> [File ] <i><q>%s</q></i> <br /><pre>%s</pre>'\n % (sys.exc_info()[0],trace[-1][0],self.print_file(trace[-1][0], trace[-1][1]))]", "def renderInlineException(request, reason):", "def render(data):\n if data is None:\n return ''\n\n if 'rendered_result' not in data:\n if 'result' not in data:\n data['rendered_result'] = ''\n else:\n make_pretty = True\n data['rendered_result'] = SEP2Renderer.export(data['result'], make_pretty)\n\n return data['rendered_result']", "def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html", "def process_exception(self, request, exception):\n gc = GithubCredentials(\n user=settings.EXREPORTER_GITHUB_USER,\n repo=settings.EXREPORTER_GITHUB_REPO,\n auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)\n gs = GithubStore(credentials=gc)\n reporter = ExReporter(\n store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)\n\n reporter.report()", "def test_get_processor_exception_html(self):\r\n for type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]:\r\n error_msg = \"An exception message of with exception type {0}\".format(str(type))\r\n exception = type(error_msg)\r\n html = get_processor_exception_html(exception)\r\n self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)\r\n self.assertIn('Sorry!', html)\r\n self.assertIn(error_msg, html)\r\n\r\n # test base case\r\n self.assertIn(\"EXCEPTION!\", get_processor_exception_html(CCProcessorException()))", "def test_report_from_json():\n\n class CustomException(Exception):\n pass\n\n def a(foo):\n bar = \"hey there\" # noqa\n # ensure it can handle weird characters\n _fuzz_tokens = [\n \"http\",\n \"https\",\n \":\",\n \"//\",\n \"?\",\n \".\",\n \"aaaaa\",\n \"союз\",\n \"-\",\n \"/\",\n \"@\",\n \"%20\",\n \"🌞\",\n \",\",\n \".com\",\n \"http://\",\n \"gov.uk\",\n \"\\udcae\",\n \"%\",\n \"#\",\n \" \",\n \"~\",\n \"\\\\\",\n \"'\",\n \" \" * 180,\n ]\n\n class HardToRender:\n def __repr__(self):\n return \"\".join(_fuzz_tokens)\n\n obj = HardToRender() # noqa\n\n b(foo)\n\n def b(foo):\n c(foo)\n\n def c(foo):\n green = 93 # noqa\n raise CustomException(\"yolo!\")\n\n try:\n a(\"hi\")\n except Exception:\n exception_data = get_exception_data(get_full_tb=False)\n\n frames = exception_data[\"frames\"]\n\n assert exception_data[\"exception_type\"] == \"CustomException\"\n assert exception_data[\"exception_value\"] == \"yolo!\"\n assert len(frames) == 4\n assert exception_data[\"frames\"][-1][\"function\"] == \"c\"\n local_vars = dict(exception_data[\"frames\"][-1][\"vars\"])\n assert local_vars[\"green\"] == \"93\"\n\n html_1 = render_exception_html(exception_data)\n text = render_exception_json(exception_data)\n\n json_based_data = json.loads(text)\n\n html_2 = render_exception_html(json_based_data)\n assert html_1 == html_2", "def formatException(self, exc_info):\n type_, value, trcbk = exc_info\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n row = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n self.writer.writerow(row)\n\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def formatException(self, exc_info):\n keys = [\"type\", \"value\", \"frame\", \"filename\", \"lineno\", \"function\", \"text\"]\n type_, value, trcbk = exc_info\n rows = []\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n values = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n rows.append(dict(zip(keys, values)))\n\n return str(CustomEncoder().encode(rows))", "def custom_500(request, exception=None):\n return render(request, \"500.html\", {\"exception\": exception})", "def xml(self):\n strg = \"<Exception>\\n\"\n strg += \"<Object>\\n\"\n strg += \"%s\\n\" % self.name\n strg += \"</Object>\\n\"\n strg += \"<Message>\\n\"\n strg += self._message\n strg += \"</Message>\\n\"\n strg += \"<DataItems>\\n\"\n for key, value in viewitems(self.data):\n strg += \"<DataItem>\\n\"\n strg += \"<Key>\\n\"\n strg += str(key)\n strg += \"</Key>\\n\"\n strg += \"<Value>\\n\"\n strg += str(value)\n strg += \"</Value>\\n\"\n strg += \"</DataItem>\\n\"\n strg += \"</DataItems>\\n\"\n strg += \"</Exception>\\n\"\n logging.error(strg)\n return strg", "def formatReport(cls, instance, trcback, context=1):\n\n\theader = []\n\theader.append(\"Exception in '{0}'.\".format(getInnerMostFrame(trcback).f_code.co_name))\n\theader.append(\"Exception class: '{0}'.\".format(cls.__name__))\n\theader.append(\"Exception description: '{0}'.\".format(instance.__doc__ and instance.__doc__.strip() or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tConstants.nullObject))\n\tfor i, line in enumerate(str(instance).split(\"\\n\")):\n\t\theader.append(\"Exception message line no. '{0}' : '{1}'.\".format(i + 1, line))\n\n\tframes = []\n\tfor frame, locals in extractLocals(trcback):\n\t\tframes.append(\"Frame '{0}' in '{1}' at line '{2}':\".format(*frame))\n\t\targuments, namelessArgs, keywordArgs, locals = locals\n\t\tany((arguments, namelessArgs, keywordArgs)) and frames.append(\"{0:>40}\".format(\"Arguments:\"))\n\t\tfor key, value in arguments.iteritems():\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tfor value in namelessArgs:\n\t\t\tframes.append(\"{0:>40}\".format(value))\n\t\tfor key, value in sorted(keywordArgs.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tlocals and frames.append(\"{0:>40}\".format(\"Locals:\"))\n\t\tfor key, value in sorted(locals.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tframes.append(str())\n\n\ttrcback = formatException(cls, instance, trcback)\n\n\treturn header, frames, trcback", "def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n context = {'error': \"%d %s\" % (exception.code, exception.title), 'detail': exception.detail}\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n context = {'error': \"500 Server Error\"}\n self.response.set_status(500)\n return self.render_json(context)", "def html(self, environ):\n body = self.make_body(environ, self.template, html_quote, no_quote)\n\n error_template = TEMPLATE\n template_file = None\n\n try:\n template_file = open(os.path.join(self.dir, str(self.code) + '.html'), 'r')\n except IOError:\n try:\n template_file = open(os.path.join(self.dir, 'error.html'), 'r')\n except IOError:\n pass\n\n if template_file:\n try:\n error_template = template_file.read()\n template_file.close()\n except IOError:\n template_file.close()\n\n return error_template % {\n 'title': self.title,\n 'code': self.code,\n 'server': 'OpenCore WSGI Server',\n 'explanation': self.explanation,\n 'detail': self.detail,\n 'comment': self.comment,\n 'body': body }", "def key_error_page(e):\n return render_template(\"index.html\", error=e), 500", "def error(self, request):\n if self.debug:\n import cgitb\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n cgitb.html(sys.exc_info()))\n else:\n errorpage = \"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>Unhandled Exception</title>\n</head><body>\n<h1>Unhandled Exception</h1>\n<p>An unhandled exception was thrown by the application.</p>\n</body></html>\n\"\"\"\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n errorpage)", "def render_error(self, template, *args, **kwargs):\n self._render(template, sys.stderr, *args, **kwargs)", "def formatException(self, exc_info):\n result = super(OneLineExceptionFormatter, self).formatException(exc_info)\n return repr(result) # or format into one line however you want to", "def format_exception(text, status_code):\n return {\"errors\": [{\"status\": str(status_code), \"detail\": text}]}, status_code", "def export(self) -> str:\n return self._collector.get_aggregated_exceptions().to_json() # type: ignore", "def _generate_error_report(self, errno=None):\n # as of now we think this will be the same for every interface\n NIWORKFLOWS_LOG.warn('Report was not generated')\n\n errorstr = '<div><span class=\"error\">Failed to generate report!</span>.\\n'\n if errno:\n errorstr += (' <span class=\"error\">Interface returned exit '\n 'code %d</span>.\\n') % errno\n errorstr += '</div>\\n'\n with open(self._out_report, 'w' if PY3 else 'wb') as outfile:\n outfile.write(errorstr)", "def get_html_report(self) -> str:\n template_contents = dict(\n vendor_bundle_js=self.vendor_bundle,\n app_bundle_js=self.app_bundle,\n # results\n results=self.results,\n # account metadata\n account_id=self.account_id,\n account_name=self.account_name,\n report_generated_time=str(self.report_generated_time),\n cloudsplaining_version=__version__,\n )\n template_path = os.path.dirname(__file__)\n env = Environment(loader=FileSystemLoader(template_path)) # nosec\n template = env.get_template(\"template.html\")\n return template.render(t=template_contents)", "def internal_error(e):\n return render_template(\"errors/500.html\"), 500", "def error_view_handler(request, exception, status):\n if status not in [400, 403, 404, 500]:\n status = 500\n\n return render(\n request,\n template_name=\"richie/error.html\",\n status=status,\n context={\n \"error\": exception,\n \"status\": status,\n \"title\": CONTEXT_ERRORS[status][\"title\"],\n \"content\": CONTEXT_ERRORS[status][\"content\"],\n },\n )", "def bad_request(e):\n return render_template(\"400.html\", page_title=400)", "def render_entry_log(self):\n self.render_log(self.selenium_testcase_entry_template)", "def page_error(e):\n\n return render_template('404.html')", "def build_error_output():\n\n error_type, error_value, error_tb = sys.exc_info()\n\n alert_data = dict()\n alert_data['type'] = type(error_value).__name__\n alert_data['value'] = str(error_value)\n alert_data['host'] = platform.node()\n alert_data['os'] = platform.system()\n alert_data['traceback'] = traceback.format_list(traceback.extract_tb(error_tb))\n\n return alert_data", "def render_exit_log(self):\n self.render_log(self.selenium_testcase_exit_template)", "def create_from_exception(self, exc_info=None, **kwargs):\n if not exc_info:\n exc_info = sys.exc_info()\n\n exc_type, exc_value, exc_traceback = exc_info\n\n def shorten(var):\n var = transform(var)\n if isinstance(var, basestring) and len(var) > 200:\n var = var[:200] + '...'\n return var\n\n reporter = ExceptionReporter(None, exc_type, exc_value, exc_traceback)\n frames = varmap(shorten, reporter.get_traceback_frames())\n\n if not kwargs.get('view'):\n # This should be cached\n modules = get_installed_apps()\n if conf.INCLUDE_PATHS:\n modules = set(list(modules) + conf.INCLUDE_PATHS)\n\n def iter_tb_frames(tb):\n while tb:\n yield tb.tb_frame\n tb = tb.tb_next\n \n def contains(iterator, value):\n for k in iterator:\n if value.startswith(k):\n return True\n return False\n \n # We iterate through each frame looking for an app in INSTALLED_APPS\n # When one is found, we mark it as last \"best guess\" (best_guess) and then\n # check it against SENTRY_EXCLUDE_PATHS. If it isnt listed, then we\n # use this option. If nothing is found, we use the \"best guess\".\n best_guess = None\n view = None\n for frame in iter_tb_frames(exc_traceback):\n try:\n view = '.'.join([frame.f_globals['__name__'], frame.f_code.co_name])\n except:\n continue\n if contains(modules, view):\n if not (contains(conf.EXCLUDE_PATHS, view) and best_guess):\n best_guess = view\n elif best_guess:\n break\n if best_guess:\n view = best_guess\n \n if view:\n kwargs['view'] = view\n\n data = kwargs.pop('data', {}) or {}\n if hasattr(exc_type, '__class__'):\n exc_module = exc_type.__class__.__module__\n else:\n exc_module = None\n data['__sentry__'] = {\n 'exc': map(transform, [exc_module, exc_value.args, frames]),\n }\n\n if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, 'source'):\n origin, (start, end) = exc_value.source\n data['__sentry__'].update({\n 'template': (origin.reload(), start, end, origin.name),\n })\n kwargs['view'] = origin.loadname\n \n tb_message = '\\n'.join(traceback.format_exception(exc_type, exc_value, exc_traceback))\n\n kwargs.setdefault('message', transform(force_unicode(exc_value)))\n\n return self.process(\n class_name=exc_type.__name__,\n traceback=tb_message,\n data=data,\n **kwargs\n )", "def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)", "def precond_failed(e):\n envs = environments()\n return render_template('412.html', envs=envs), 412", "def error(self, **data):\n template_specification = dict(mainContent=\"../error\", title=\"Error page\", data=data)\n template_specification = self._fill_user_specific_attributes(template_specification)\n return self.fill_default_attributes(template_specification)", "def error():\n return render_template(\"error.html\", **locals())", "def exception_output(self, exception: ExceptionValue) -> ExceptionValue:\n return exception", "def display_report(request, **kwargs):\n\n #Getting the report of the tests \n try:\n outputStr = sidecar.events.test_logs(project_id=kwargs['project_id'])\n outputStr = outputStr.results\n except Exception, e:\n outputStr = \"Updating the logs...\"\n \n #Making the output\n context = {\n \"page_title\": _(\"Test Report\"),\n \"test_report\": outputStr\n }\n return render(request, 'rally_dashboard/events/view_report.html', context)", "def report_preparation(data):\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n fd = open(f\"{report_file_path}/mail_report.html\", \"w\")\n fd.write(\n \"\"\"\n <html>\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html charset=UTF-8\" />\n <style>\n table {\n font-family: arial, sans-serif;\n border-collapse: collapse;\n width: 100%;\n }\n\n th {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n td {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n </style>\n </head>\n\n <body>\n <p><font color=\"black\"> Hi All </font></p>\n \"\"\"\n )\n fd.write(\n \"\"\"\n <p><font color=\"black\">{}\n </font></p>\n <table>\n <thead>\n <tr>\n <th> Job Category </th>\n <th> Highlighted information/Test Failure</th>\n <th> Job URL </th>\n <th> Bugzilla </th>\n <th> Job Status </th>\n </tr></thead> \"\"\".format(\n data[\"body\"]\n )\n )\n data.pop(\"body\")\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n\n if os.path.isfile(f\"{report_file_path}/subject\"):\n os.remove(f\"{report_file_path}/subject\")\n if os.path.isfile(f\"{report_file_path}/recipient\"):\n os.remove(f\"{report_file_path}/recipient\")\n with open(f\"{report_file_path}/subject\", \"wb\") as handler:\n pickle.dump(data[\"subject\"], handler)\n data.pop(\"subject\")\n\n with open(f\"{report_file_path}/recipient\", \"wb\") as handler:\n pickle.dump(data[\"recipient\"], handler)\n data.pop(\"recipient\")\n for _ in data:\n fd.write(\"<tr><td>{}</td>\".format(_, data[_]))\n fd.write(\"<td>\")\n for content in data[_][\"highlighted_information\"]:\n if (content.lstrip()).rstrip():\n if re.search(r\"tests.\", f\"{content}\"):\n fd.write(\n f'<font color=red><li align=\"left\">{(content.lstrip()).rstrip()}</li></font>'\n )\n else:\n fd.write(f'<li align=\"left\">{(content.lstrip()).rstrip()}</li>')\n fd.write(\"</td>\")\n fd.write(f\"<td><a href={data[_]['Build Url']}>Job Link</a></td>\")\n fd.write(\"<td>\")\n for bz in data[_][\"bugzilla\"].split(\".\"):\n if bz.lstrip().rstrip():\n fd.write(\n f\" <a href=https://bugzilla.xyz.com/show_bug.cgi?id={bz}>{bz}</a> \"\n )\n else:\n fd.write(f\"{bz}\")\n fd.write(\"</td>\")\n if data[_][\"Build_Status\"] == \"SUCCESS\":\n color = \"green\"\n fd.write(f\"<td><font color={color}>PASSED</font></td>\")\n else:\n color = \"red\"\n fd.write(f\"<td><font color={color}>FAILED</font></td>\")\n fd.write(\n \"\"\"\n </table>\n </body>\n <p><font color=\"black\">Note: For more details</font>\n <form action=\"https://wikipage></form></p>\n <p><font color=\"black\">Thanks</font><br>\n <font color=\"black\">xyz</font><p>\n </html>\"\"\"\n )\n fd.close()\n Common.logger.info(\"Report prepared for the selected job and their type\")", "def exceptionhandler(e):\n response = e.get_response()\n response.data = json.dumps({\n \"code\" : e.code,\n \"name\": e.name,\n \"description\": e.description\n })\n response.content_type = \"application/json\"\n\n return response", "def application_error(e):\n return render_template('500.html', error=e), 500", "def error_page(e):\n \n return render_template('error-page.html'), 404", "def debug_error_handler(environ, start_response):\n exc_info = environ.get('com.xythian.shotweb.exception')\n write = start_response('500 Internal server error',\n [('Content-type', 'text/html')],\n exc_info)\n et, v, tb = exc_info\n import traceback\n traceback.print_exception(et, v, tb, file=sys.stderr)\n return cgitb.html(exc_info)", "def _get_traceback(self, exc_info):\n import traceback\n return '<br/>'.join(traceback.format_exception(*(exc_info or sys.exc_info())))", "def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")", "def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")", "def err500():\n return render_template('404.html', year=datetime.now().year)", "def format_mapping_html_data(header,\r\n mapping_data,\r\n errors,\r\n warnings):\r\n\r\n html_lines = HTML_LINES_INIT\r\n\r\n if not errors and not warnings:\r\n html_lines += \"<h1>No errors or warnings detected.<br></h1>\"\r\n # Find errors/warnings that are not in particular cells\r\n general_errors = \"\"\r\n for curr_err in errors:\r\n loc = curr_err.split('\\t')[1].strip()\r\n if loc == \"-1,-1\":\r\n general_errors += '<td bgcolor=\"red\"><font color=\"white\">' +\\\r\n curr_err.split('\\t')[0] + '<font color=\"black\"></td>'\r\n general_warnings = \"\"\r\n for curr_warning in warnings:\r\n loc = curr_warning.split('\\t')[1].strip()\r\n if loc == \"-1,-1\":\r\n general_warnings += '<td bgcolor=\"yellow\">' +\\\r\n curr_err.split('\\t')[0] + \"</td>\"\r\n\r\n html_lines += HTML_LINES_MSG % (\"+-%./ :,;_\",\r\n general_errors, general_warnings)\r\n\r\n # Check header fields, color and add popup messages if errors/warnings\r\n # are present\r\n formatted_header = \"\"\r\n for curr_field in range(len(header)):\r\n all_errs_warnings = \"\"\r\n curr_pos = \"%s,%s\" % (0, curr_field)\r\n for curr_warning in warnings:\r\n loc = curr_warning.split('\\t')[1].strip()\r\n if loc == curr_pos:\r\n bg_color = \"yellow\"\r\n font_color = \"black\"\r\n all_errs_warnings += curr_warning.split('\\t')[0] + \"<br>\"\r\n\r\n for curr_err in errors:\r\n loc = curr_err.split('\\t')[1].strip()\r\n if loc == curr_pos:\r\n bg_color = \"red\"\r\n font_color = \"white\"\r\n all_errs_warnings += curr_err.split('\\t')[0] + \"<br>\"\r\n if not all_errs_warnings:\r\n formatted_header += \"<th>%s</th>\" % header[curr_field]\r\n elif not header[curr_field]:\r\n formatted_header += \"\"\"<th bgcolor=%s><a href=\"javascript:void(0);\" onmouseover=\"return overlib('%s');\" onmouseout=\"return nd();\"><font color=%s>%s</a></th>\"\"\" % (\r\n bg_color,\r\n all_errs_warnings.replace(\r\n '\"',\r\n ''),\r\n font_color,\r\n \"missing data\")\r\n\r\n else:\r\n formatted_header += \"\"\"<th bgcolor=%s><a href=\"javascript:void(0);\" onmouseover=\"return overlib('%s');\" onmouseout=\"return nd();\"><font color=%s>%s</a></th>\"\"\" % (\r\n bg_color,\r\n all_errs_warnings.replace('\"',\r\n ''),\r\n font_color,\r\n header[curr_field])\r\n\r\n html_lines += HTML_LINES_HEADER % formatted_header\r\n\r\n formatted_data = \"\"\r\n correction_ix = 1\r\n\r\n for curr_row in range(len(mapping_data)):\r\n formatted_data += \"<tr>\"\r\n for curr_cell in range(len(mapping_data[curr_row])):\r\n all_errs_warnings = \"\"\r\n append_location = False\r\n curr_pos = \"%s,%s\" % (curr_row + correction_ix, curr_cell)\r\n for curr_warning in warnings:\r\n loc = curr_warning.split('\\t')[1].strip()\r\n if loc == curr_pos:\r\n append_location = True\r\n bg_color = \"yellow\"\r\n font_color = \"black\"\r\n all_errs_warnings += curr_warning.split('\\t')[0] + \"<br>\"\r\n\r\n for curr_err in errors:\r\n loc = curr_err.split('\\t')[1].strip()\r\n if loc == curr_pos:\r\n append_location = True\r\n bg_color = \"red\"\r\n font_color = \"white\"\r\n all_errs_warnings += curr_err.split('\\t')[0] + \"<br>\"\r\n if append_location:\r\n if len(mapping_data[curr_row][0]) == 0:\r\n sample_id_name = \"missing sample id\"\r\n else:\r\n sample_id_name = mapping_data[curr_row][0]\r\n try:\r\n header_location = header[curr_cell]\r\n except IndexError:\r\n header_location = \"no header\"\r\n location_desc = \"Location (SampleID,Header Field)<br>%s,%s\" %\\\r\n (sample_id_name, header_location)\r\n if not all_errs_warnings:\r\n formatted_data += \"<th><tt>%s</tt></th>\" %\\\r\n mapping_data[curr_row][curr_cell]\r\n elif len(mapping_data[curr_row][curr_cell].replace('\\n', '')) == 0:\r\n formatted_data += \"\"\"<th bgcolor=%s><a href=\"javascript:void(0);\" onmouseover=\"return overlib('%s');\" onmouseout=\"return nd();\"><font color=%s><tt>%s</tt></a></th>\"\"\" % (\r\n bg_color,\r\n all_errs_warnings.replace(\r\n '\"',\r\n '').replace(\r\n \"'\",\r\n \"\") + location_desc,\r\n font_color,\r\n \"missing data\")\r\n else:\r\n formatted_data += \"\"\"<th bgcolor=%s><a href=\"javascript:void(0);\" onmouseover=\"return overlib('%s');\" onmouseout=\"return nd();\"><font color=%s><tt>%s</tt></a></th>\"\"\" % (\r\n bg_color,\r\n all_errs_warnings.replace('\"',\r\n '').replace(\"'\",\r\n \"\") + location_desc,\r\n font_color,\r\n mapping_data[curr_row][curr_cell])\r\n\r\n formatted_data += \"</tr>\"\r\n html_lines += HTML_LINES_DATA % formatted_data\r\n\r\n return html_lines", "def _report_body(*, image: str, repo: str, run: str, stacktrace: str) -> str:\n return (\n f\"Repo: {repo}\\n\"\n f\"Run URL: {run}\\n\"\n f\"Image ID: {image}\\n\"\n f\"Stacktrace:\\n```py\\n{stacktrace}\\n```\\n\"\n )", "def handle_failed_plot(htmlfile, header, qatype):\n import sys\n import traceback\n lines = traceback.format_exception(*sys.exc_info())\n msg = f'ERROR generating {htmlfile}\\n' + ''.join(lines)\n print(msg)\n print('Proceeding with making other plots')\n pc = write_placeholder_html(\n htmlfile, header, \"PER_CAMFIBER\", message=msg)\n return pc", "def get_problem(self, _data):\r\n return {'html': self.get_problem_html(encapsulate=False)}", "def handle_error(context, inference_exception, trace):\n context.set_response_status(\n code=inference_exception.status_code,\n phrase=utils.remove_crlf(inference_exception.phrase),\n )\n return [\"{}\\n{}\".format(inference_exception.message, trace)]", "def render_expression(ex):\r\n try:\r\n return _render_to_html(_get_final_tree(ex))\r\n except ParseException:\r\n return err(ex)", "def _ExceptionResponse(args_dict=None):\n if args_dict is None:\n args_dict = {}\n args_dict[\"code\"] = \"Exception\"\n return CGateway._DumpResponse(args_dict)", "def handle_exception(self, exception, debug):\n\n # build our error report\n error_report = {\n 'method': self.request.method,\n 'url': self.request.path_url,\n 'query_string': self.request.query_string,\n # 'data': environ.get('wsgi.input'),\n 'headers': dict(self.request.headers),\n 'env': dict((\n ('REMOTE_ADDR', self.request.environ['REMOTE_ADDR']),\n ('SERVER_NAME', self.request.environ['SERVER_NAME']),\n ('SERVER_PORT', self.request.environ['SERVER_PORT']),\n )),\n }\n interface = 'sentry.interfaces.Http'\n\n try:\n client.captureException(data={interface: error_report})\n except HTTPException:\n logging.warning('Unable to contact sentry server')\n\n # Log the exception\n logging.exception(exception)\n\n # If the exception is a HTTPException, use its error code.\n # Otherwise use a generic 500 error code.\n if isinstance(exception, webapp2.HTTPException):\n self.response.set_status(exception.code)\n status_code = exception.code\n else:\n self.response.set_status(500)\n status_code = 500\n\n # collect our error data\n exc_info = sys.exc_info()\n\n # Set a custom message.\n if status_code == 500:\n self.render_response({'error': 'A server error has occurred'})\n # otherwise return the error message's value\n else:\n self.render_response({'error': str(exc_info[1])})", "def __as_unicode(self):\n # WARNING: Do not change this string - it is used to extract error from log\n strg = WMEXCEPTION_START_STR\n strg += \"\\nException Class: %s\\n\" % self.name\n strg += \"Message: %s\\n\" % self._message\n for key, value in viewitems(self.data):\n strg += \"\\t%s : %s\\n\" % (key, value,)\n strg += \"\\nTraceback: \\n\"\n strg += self.traceback\n strg += '\\n'\n strg += WMEXCEPTION_END_STR\n return strg", "def html_render(qry_domains, search_domains=None):\n reports = dict(filter(None, map(tools.analyse, qry_domains)))\n\n # Handle no valid domains by redirecting to GET page.\n if len(reports) == 0:\n app.logger.info(\n 'No valid domains found in {}'.format(qry_domains)\n )\n return flask.redirect('/error/0')\n\n return flask.render_template(\n 'www/report.html',\n reports=reports,\n atoms=dict(zip(qry_domains, map(binascii.hexlify, qry_domains))),\n exports={'json': 'json', 'csv': 'csv'},\n search=search_domains,\n )", "def render(self, data, *args, **kwargs):\n pass # pragma: nocover", "def write_error(self, status_code, **kwargs):\n reason = \"Unknown Error\"\n\n # Get information about the triggered exception\n self.application.gs_globals[\"exception_fulltext\"] = repr(sys.exc_info())\n\n # Get the status code and error reason\n if status_code in list(ERROR_CODES):\n reason = ERROR_CODES[status_code]\n try:\n if \"exc_info\" in kwargs:\n _, error, _ = kwargs[\"exc_info\"]\n reason = error.reason\n except AttributeError:\n pass\n\n # Return JSON if this is an API call\n if \"/api/v1/\" in self.request.uri:\n jsondict = {\n \"page_title\": \"Error {}: {}\".format(status_code, reason),\n \"error_status\": status_code,\n \"error_reason\": reason,\n \"error_exception\": self.application.gs_globals[\"exception_fulltext\"],\n }\n self.set_header(\"Content-type\", \"application/json\")\n self.write(json.dumps(jsondict))\n\n # Render the error template\n else:\n t = self.application.loader.load(\"error_page.html\")\n self.write(\n t.generate(\n gs_globals=self.application.gs_globals,\n status=status_code,\n reason=reason,\n user=self.get_current_user(),\n )\n )", "def data():\n return render_template(\n 'data.html',\n title='World Happiness Report',\n year=datetime.now().year,\n message='Main Data Model'\n )", "def handle_error(request, response, template):\n response.write(\n loader.render_to_string(\n template,\n None,\n RequestContext(\n request,\n dict = {\"blogs\": Blog.objects.select_related(),\n \"tags\": Tag.used_tags(),\n \"archive_qualifier\": \"\",\n \"recent_active_months\": Blog.recent_active_months()}\n )\n )\n )\n return response", "def test_02_add_exception_document(self):\n # get applications\n r = requests.get('%s/settings/applications' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n application_key = re.search('<td>Key</td>.*<td>(.*)</td>', r.content).group(1)\n application_id = re.search('<form id=\"application(\\d+)\"', r.content).group(1)\n\n # add an exception\n title = 'handled exception %f' % random.random()\n r = requests.post('%s/api/v1/exception/%s' % (self.url, application_key), data={\n 'name': 'Kelp',\n 'title': title,\n \"description\": \"log description\",\n \"email\": \"[email protected]\",\n \"version\": \"1.0 (1.0.012)\",\n \"device\": \"iPhone5,1\",\n \"os_version\": \"6.1\",\n \"access_token\": \"oauth access token\",\n \"method\": \"method of http request\",\n \"url\": \"http://victory-demo.appspot.com\",\n \"parameters\": \"parameters of http request\",\n \"status\": \"500\"\n })\n self.assertEqual(r.status_code, 200)\n result = json.loads(r.content)\n self.assertTrue(result['success'])\n\n # get exception groups\n r = requests.get('%s/exception_groups' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n group_tag = re.search('<tr href=\"#document_(.+)\" ', r.content).group(1)\n\n # get the exception group\n r = requests.get('%s/exception_groups/%s/%s' % (self.url, application_id, group_tag), cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n self.assertRegexpMatches(r.content, '<tr><td>Email</td><td>[email protected]</td></tr>')", "def _htmlify(self, data):\n resource = json.dumps(data, indent=4)\n title, explanation = self._get_title_and_explanation()\n return flask.make_response(\n flask.render_template('resource.html', title=title,\n explanation=explanation,\n resource=resource),\n 200, {'Content-Type': 'text/html'})", "def write_error_report(self):\n\n with open('runReport.txt', 'a') as report:\n report.write(\"Number of Hits: \" + str(self.num_hits) + '\\n')\n report.write(\"Number of Requests: \" + str(self.num_requests) + '\\n')\n report.write(\"Hit Rate: \" + str((self.num_hits / self.num_requests)))\n report.write(\"Datafiles downloaded: \" + str(self.num_datafiles))\n now = datetime.now()\n dt_string = now.strftime(\"%H:%M %m/%d/%Y\")\n report.write(\"Run finished \" + dt_string)", "def format_exception_only(exc):\r\n exc_type = type(exc)\r\n\r\n stype = exc_type.__qualname__\r\n smod = exc_type.__module__\r\n if smod not in (\"__main__\", \"builtins\"):\r\n stype = smod + '.' + stype\r\n try:\r\n _str = str(exc)\r\n except:\r\n _str = \"<unprintable {} object>\".format(exc_type.__name__)\r\n\r\n if _str == \"None\" or not _str:\r\n line = \"{}\\n\".format(stype)\r\n else:\r\n line = \"{}: {}\\n\".format(stype, _str)\r\n return line", "def formatException(self, exc_info):\n traces = traceback.format_exception(*exc_info)\n return \"\\n\".join(traces)", "def default_render_failure(request,\r\n message,\r\n status=403,\r\n template_name='extauth_failure.html',\r\n exception=None):\r\n\r\n log.debug(\"In openid_failure \" + message)\r\n\r\n data = render_to_string(template_name,\r\n dict(message=message, exception=exception))\r\n\r\n return HttpResponse(data, status=status)", "def render_report(blocks):\n\tfor block in blocks:\n\t render_block(\n\t\t(\n\t\t block[0],\t\t# signature \n\t\t block[1], # line number\n\t\t block[2],\t\t# line\n\t\t)\n\t )", "def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n exception = kwargs.get(\"exception\")\n\n context.update({\n \"exception\": exception,\n \"exception_type\": exception.__class__.__name__ if exception else None,\n \"exception_msg\": exception.message if exception and hasattr(exception, 'message') else str(exception) if exception else None,\n \"extra_message\": kwargs.get(\"extra_message\"),\n })\n return context", "def test_format_mapping_html_data_errors(self):\r\n\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['Sample1', 'AACCGGTT', 'ACATATT', 'Desc_1'],\r\n ['Sample2', 'CCAATTGG', 'ACATATT', 'Desc_2']\r\n ]\r\n errors = ['problem1\\t1,2']\r\n warnings = []\r\n\r\n # Should create a an error popup in the right location\r\n\r\n actual_formatted_html_data = format_mapping_html_data(header,\r\n mapping_data, errors, warnings)\r\n\r\n self.assertEqual(actual_formatted_html_data,\r\n self.expected_formatted_html_errors)", "def report_with_context(self, exception: Exception, http_context: HTTPContext):\n self._add_exception(exception, http_context)", "def _render_result(self, errno, errmsg, data=None):\n self.set_header(\"Content-Type\", \"application/json; charset=utf-8\")\n if self._finished:\n return\n self.write(tornado.escape.json_encode({\n \"errno\": errno,\n \"errmsg\": errmsg,\n \"logid\": self.logid,\n \"data\": data,\n }))", "def error():\n return render_template(\"404.html\")", "def render_diagnostics(request, diagnostics_dict, status=200):\n return HttpResponse(json.dumps(diagnostics_dict), status=status)", "def server_error(e):\n return render_template('500.html'), 500", "def render_log(self, template):\n\n # only write to the log file if it exists\n if self._selenium_log_file:\n\n id = self.id()\n description = self.shortDescription()\n\n # grab the stack frame info from test_* method\n (obj, filename, lineno, function, code_context, index) \\\n = self.get_test_frame()\n\n # render the test case debug\n html = render_to_string(\n template, {\n 'id': id,\n 'description': description,\n 'filename': filename,\n 'lineno': lineno,\n 'function': function,\n 'code_context': code_context,\n 'index': index,\n 'png': self.get_image_uri(),\n 'text': self.get_visible_text()})\n\n # write it to the file\n self._selenium_log_file.write(html.encode('utf8'))", "def render_report(cr, uid, ids, name, data, context=None):\n registry = yuancloud.modules.registry.RegistryManager.get(cr.dbname)\n return registry['ir.actions.report.xml'].render_report(cr, uid, ids, name, data, context)", "def test_handle_print_rich_exception(self):\n\n with io.StringIO() as buf:\n # Capture stdout logs (rich logs to stdout)\n with contextlib.redirect_stdout(buf):\n _print_rich_exception(Exception(\"boom!\"))\n # Capture the stdout output\n captured_output = buf.getvalue()\n\n assert \"Exception:\" in captured_output\n assert \"boom!\" in captured_output", "def baseExceptionHandler(*args):\n\n\theader, frames, trcback = formatReport(*extractException(*args))\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), header)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), frames)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tsys.stderr.write(\"\\n\".join(trcback))\n\n\treturn True", "def internal_server_error(e):\n return render_template('500.html', error=repr(e)), 500", "def _processGETErr(self, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httplib.BAD_REQUEST\r\n elif e.check(UnauthorizedLogin):\r\n msg = e.getErrorMessage()\r\n code = httplib.UNAUTHORIZED\r\n elif e.check(InternalError):\r\n e.printTraceback()\r\n msg = 'Internal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n\r\n self._render_GET(request, code, 'text/plain; charset=utf-8', msg)", "def experimentError(request, run_uuid):\n subject_data = get_object_or_404(SubjectData, pk=run_uuid)\n experiment = get_object_or_404(Experiment, pk=subject_data.experiment.pk)\n t = Template(experiment.error_page_tpl)\n c = RequestContext(request, {})\n return HttpResponse(t.render(c))", "def _render_report_form(start_str, end_str, start_letter, end_letter, report_type, total_count_error=False, date_fmt_error=False):\r\n context = {\r\n 'total_count_error': total_count_error,\r\n 'date_fmt_error': date_fmt_error,\r\n 'start_date': start_str,\r\n 'end_date': end_str,\r\n 'start_letter': start_letter,\r\n 'end_letter': end_letter,\r\n 'requested_report': report_type,\r\n }\r\n return render_to_response('shoppingcart/download_report.html', context)", "def print_error_data(error_data):\n\n print('\\nDays when there were more than 1% errors in HTTP :\\n')\n for day in error_data:\n print(str(day[0]) + '\\t-\\t' + str(day[1]) + '% \\n')\n print('-------------------------------------------------------\\n')", "def internal_server_error(e):\n return render_template(\"error/500.html\"), 500", "def page_not_found(e):\n return render_template(\"500.html\"), 500", "def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n print(response.data)\n response.content_type = \"application/json\"\n return response", "def generate_report(self, result):\n # Jinja template dictionary.\n template_dict = {}\n\n template_dict['title'] = jinja2.escape(self.title)\n template_dict['description'] = jinja2.escape(self.description)\n template_dict['sagenb_version'] = SAGENB_VERSION\n template_dict['environment'] = self.extra_args.get('environment')\n template_dict['start_time'] = str(self.start_time)[:19]\n template_dict['stop_time'] = str(self.stop_time)[:19]\n template_dict['elapsed_time'] = self.elapsed_time\n template_dict['pass_total'] = result.success_count\n template_dict['fail_total'] = result.failure_count\n template_dict['error_total'] = result.error_count\n template_dict['count_total'] = result.total_count\n\n rows = []\n sorted_result = self.sort_result(result.result)\n\n # Iterate over cases.\n for i, (case_type, case_results) in enumerate(sorted_result):\n # Stats for this case.\n passes = 0\n failures = 0\n errors = 0\n for status, test_case, output, trace in case_results:\n if status == _TestResult.PASS:\n passes += 1\n elif status == _TestResult.FAIL:\n failures += 1\n else:\n errors += 1\n\n # Case description.\n if case_type.__module__ == '__main__':\n name = case_type.__name__\n else:\n name = '%s.%s' % (case_type.__module__, case_type.__name__)\n doc = case_type.__doc__ and case_type.__doc__.split('\\n')[0] or ''\n desc = jinja2.escape(doc and '%s: %s' % (name, doc) or name)\n\n case_id = name.replace('.', '-') + '_%d' % i\n case_class = failures > 0 and 'case_fail' or errors > 0 and 'case_error' or 'case_pass'\n count = passes + failures + errors\n\n rows.append(REPORT_CASE_TMPL.render(locals()))\n\n # Iterate over this case's tests.\n for j, (status, test_case, output, trace) in enumerate(case_results):\n self.report_for_one_test(rows, case_id, j, status,\n test_case, output, trace)\n\n template_dict['test_cases_and_tests'] = '\\n'.join(rows)\n\n # Make the report self-contained.\n stylesheet = template(os.path.join('css', 'test_report.css'))\n template_dict['stylesheet'] = '<style type=\"text/css\"><!--\\n' + stylesheet + '\\n--></style>'\n template_dict['stylesheet'] += IE_STYLE_FIX.render()\n\n jquery = open(os.path.join(DATA,\n 'jquery/jquery-1.3.2.min.js'), 'r').read()\n template_dict['javascript'] = '<script type=\"text/javascript\">\\n' + jquery + '\\n</script>'\n return template(os.path.join('html', 'test_report.html'),\n **template_dict)", "def report(self) -> Any:", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n self._RawWrite(\"%d %s\" % (exception.code, exception.title))\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n self._RawWrite(\"500 Server Error\")\n self.response.set_status(500)", "def render(self, data, accepted_media_type=None, renderer_context=None):\n\n if '(e.g:bbox=xmin,ymin,xmax,ymax)' in str(data):\n rendered = {'error': str(data)}\n return json.dumps(rendered)\n if data is None:\n return ''\n\n if 'error' in data:\n rendered = data\n elif isinstance(data, dict):\n rendered = self.render_single(data)\n else:\n rendered = self.render_many(data)\n\n return json.dumps(rendered, separators=self.separators)", "def make_report(self):\n self.report = \"---\\ntitle: Broken Link Report\"\n self.report += \"\\nchecked: \" + str(len(self.visited))\n self.report += \"\\nnumber of email links: \" + str(len(self.mailto_links))\n self.report += \"\\nemails: \" + \", \".join(\n [str(m) for m in set(self.mailto_links)]\n )\n self.report += \"\\nbroken: \" + str(len(self.broken))\n self.report += \"\\n---\\n\"\n sorted_list = sorted(self.broken, key=lambda k: k[\"code\"], reverse=True)\n for link in sorted_list:\n self.report += f\"\\n- code: {link['code']}\\n url: {link['link']}\\n parent: {link['parent']}\\n error: {link['err']}\\n\"\n return self.report", "def internal_error_handler(error):\r\n return render_template('error.500.html')" ]
[ "0.6812304", "0.6662681", "0.6421623", "0.6406252", "0.6321645", "0.63113713", "0.61840075", "0.6142265", "0.6085713", "0.6048839", "0.59619063", "0.5947996", "0.5916094", "0.5889431", "0.58686614", "0.5866581", "0.5849284", "0.58273417", "0.5797272", "0.57954353", "0.5734677", "0.5728617", "0.5714531", "0.5712052", "0.57115763", "0.5706719", "0.56628513", "0.56088376", "0.5578029", "0.55709815", "0.55536276", "0.555277", "0.5548844", "0.5535286", "0.5532922", "0.5523181", "0.5519342", "0.5509976", "0.5491403", "0.54879063", "0.54851943", "0.5472248", "0.54713064", "0.54615176", "0.54573154", "0.5455028", "0.54536027", "0.54513925", "0.5449895", "0.54453963", "0.5432958", "0.5432958", "0.5429771", "0.5405931", "0.53953534", "0.5389762", "0.5382418", "0.5381788", "0.5376476", "0.53723854", "0.53671664", "0.5355493", "0.53528535", "0.53427964", "0.5342738", "0.53385955", "0.53309596", "0.5330444", "0.5324038", "0.53170276", "0.53136325", "0.53133833", "0.53126276", "0.5285755", "0.52787113", "0.52641904", "0.52614164", "0.5259131", "0.5258387", "0.52581674", "0.52483773", "0.5241061", "0.5239864", "0.52394366", "0.52340853", "0.5219849", "0.5213628", "0.52091104", "0.5206465", "0.5195971", "0.5189736", "0.5182952", "0.5180924", "0.5175002", "0.5170076", "0.5157631", "0.51547545", "0.51425266", "0.5136402", "0.5134586" ]
0.7821494
0
Render exception_data as a json object
def render_exception_json(exception_data): return json.dumps(exception_data, default=_json_serializer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exceptionhandler(e):\n response = e.get_response()\n response.data = json.dumps({\n \"code\" : e.code,\n \"name\": e.name,\n \"description\": e.description\n })\n response.content_type = \"application/json\"\n\n return response", "def jsonify_http_exception(exception: HTTPException):\n return jsonify(exception.description, exception.code)", "def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response", "def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n print(response.data)\n response.content_type = \"application/json\"\n return response", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n response.content_type = \"application/json\"\n return response", "def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }", "def exception_data(self) -> typing.Optional[dict]:\n return self._exception_data", "def jsonify_unknown_exception(exception: Exception):\n current_app.logger.exception('Unhandled exception has been raised!')\n return jsonify(DEFAULT_MESSAGE, 500)", "def jsonify_exception(error: HTTPException) -> Response:\n exc_resp = error.get_response()\n response: Response = jsonify(reason=error.description)\n response.status_code = exc_resp.status_code\n return response", "def render(self, data, accepted_media_type=None, renderer_context=None):\n\n if '(e.g:bbox=xmin,ymin,xmax,ymax)' in str(data):\n rendered = {'error': str(data)}\n return json.dumps(rendered)\n if data is None:\n return ''\n\n if 'error' in data:\n rendered = data\n elif isinstance(data, dict):\n rendered = self.render_single(data)\n else:\n rendered = self.render_many(data)\n\n return json.dumps(rendered, separators=self.separators)", "def handle_missing_objects(exc):\n return jsonify(dict(\n message=str(exc)\n )), exc.code", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n context = {'error': \"%d %s\" % (exception.code, exception.title), 'detail': exception.detail}\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n context = {'error': \"500 Server Error\"}\n self.response.set_status(500)\n return self.render_json(context)", "def format_exception(text, status_code):\n return {\"errors\": [{\"status\": str(status_code), \"detail\": text}]}, status_code", "def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)", "def make_json_error(ex):\n if isinstance(ex, HTTPException):\n return ex;\n elif isinstance(ex, ResourceException):\n info = ex.to_dict()\n status_code = ex.http_status\n info[\"type\"] = \"exception\"\n else:\n message = \"There was an internal server error. Please try again later.\"\n info = {\"code\": \"internal_server_error\", \"message\": message, \"type\": \"exception\"}\n status_code = 500\n # generally we should log these 500 errors with the stacktrace somewhere -- we used splunk at Box.\n\n response = jsonify(**info)\n response.status_code = status_code\n return response", "def get_er_exceptions():\n express_route_exceptions_lst = []\n try:\n for i in get_data():\n if i['expressRoute'] is False:\n express_route_exceptions_lst.append(i)\n express_route_exceptions_dic = {'expressRoutesExceptions': express_route_exceptions_lst}\n return get_json(express_route_exceptions_dic)\n except ValueError as e:\n print(e)", "def AsJson(self):\n\n return json.dumps(self._errors)", "def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')", "def render_exception_html(exception_data, report_template=None):\n report_template = report_template or _report_template()\n jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=[\"jinja2.ext.autoescape\"])\n exception_data[\"repr\"] = repr\n return jinja_env.from_string(report_template).render(exception_data)", "def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n exception = kwargs.get(\"exception\")\n\n context.update({\n \"exception\": exception,\n \"exception_type\": exception.__class__.__name__ if exception else None,\n \"exception_msg\": exception.message if exception and hasattr(exception, 'message') else str(exception) if exception else None,\n \"extra_message\": kwargs.get(\"extra_message\"),\n })\n return context", "def _ExceptionResponse(args_dict=None):\n if args_dict is None:\n args_dict = {}\n args_dict[\"code\"] = \"Exception\"\n return CGateway._DumpResponse(args_dict)", "def render(self, data):\n logging.info(\"render (start)\")\n\n seria = json.dumps(data, ensure_ascii=False, indent=4)\n logging.info(\"rendered %s characters (end)\" % len(seria))\n return seria", "def _format_data(self, data):\n return json.dumps(data)", "def write_error(self, status_code, exc_info, **kwargs):\n response = {\n \"data\": None,\n \"errors\": [ str(exc_info[1]) ]\n }\n\n self.set_status(status_code)\n self.write(json.dumps(response))", "def export(self) -> str:\n return self._collector.get_aggregated_exceptions().to_json() # type: ignore", "def test_report_from_json():\n\n class CustomException(Exception):\n pass\n\n def a(foo):\n bar = \"hey there\" # noqa\n # ensure it can handle weird characters\n _fuzz_tokens = [\n \"http\",\n \"https\",\n \":\",\n \"//\",\n \"?\",\n \".\",\n \"aaaaa\",\n \"союз\",\n \"-\",\n \"/\",\n \"@\",\n \"%20\",\n \"🌞\",\n \",\",\n \".com\",\n \"http://\",\n \"gov.uk\",\n \"\\udcae\",\n \"%\",\n \"#\",\n \" \",\n \"~\",\n \"\\\\\",\n \"'\",\n \" \" * 180,\n ]\n\n class HardToRender:\n def __repr__(self):\n return \"\".join(_fuzz_tokens)\n\n obj = HardToRender() # noqa\n\n b(foo)\n\n def b(foo):\n c(foo)\n\n def c(foo):\n green = 93 # noqa\n raise CustomException(\"yolo!\")\n\n try:\n a(\"hi\")\n except Exception:\n exception_data = get_exception_data(get_full_tb=False)\n\n frames = exception_data[\"frames\"]\n\n assert exception_data[\"exception_type\"] == \"CustomException\"\n assert exception_data[\"exception_value\"] == \"yolo!\"\n assert len(frames) == 4\n assert exception_data[\"frames\"][-1][\"function\"] == \"c\"\n local_vars = dict(exception_data[\"frames\"][-1][\"vars\"])\n assert local_vars[\"green\"] == \"93\"\n\n html_1 = render_exception_html(exception_data)\n text = render_exception_json(exception_data)\n\n json_based_data = json.loads(text)\n\n html_2 = render_exception_html(json_based_data)\n assert html_1 == html_2", "def _render_result(self, errno, errmsg, data=None):\n self.set_header(\"Content-Type\", \"application/json; charset=utf-8\")\n if self._finished:\n return\n self.write(tornado.escape.json_encode({\n \"errno\": errno,\n \"errmsg\": errmsg,\n \"logid\": self.logid,\n \"data\": data,\n }))", "def json(self):\n d = [err.json for err in self.errors]\n return d", "def handle_unknown_errors(exc):\n return jsonify(dict(\n traceback=traceback.format_exc(),\n message=str(exc),\n )), 500", "def process_exception(self, request, exception):\n logging.error(\"ERROR\")\n logging.error(traceback.format_exc())\n response = set_response(\"Internal server error\", False, 500, {})\n return JsonResponse(response, status=response[\"http_code\"])", "def render_diagnostics(request, diagnostics_dict, status=200):\n return HttpResponse(json.dumps(diagnostics_dict), status=status)", "def formatException(self, exc_info):\n keys = [\"type\", \"value\", \"frame\", \"filename\", \"lineno\", \"function\", \"text\"]\n type_, value, trcbk = exc_info\n rows = []\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n values = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n rows.append(dict(zip(keys, values)))\n\n return str(CustomEncoder().encode(rows))", "def renderHTTP_exception(request, failure):", "def render_to_json_response(self, context, **response_kwargs):\n response_kwargs.update(dict(json_dumps_params=dict(ensure_ascii=False)))\n return JsonResponse(self.safe_json(context), **response_kwargs)", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(context))", "def to_response_data(self) -> typing.Any:\n v = self.value or {}\n error_code = v.get(\"code\", \"GenericLobotomyError\")\n error_message = v.get(\"message\", \"There was an error.\")\n return {\"Error\": {\"Code\": error_code, \"Message\": error_message}}", "def format(self, record):\n message = {\n \"time\": datetime.utcfromtimestamp(record.created).isoformat(),\n \"level\": record.levelname,\n \"name\": record.name,\n \"message\": record.getMessage(),\n \"process\": record.process,\n \"thread\": record.threadName,\n \"hostname\": self.hostname,\n \"filename\": record.filename,\n \"function\": record.funcName,\n \"lineNo\": record.lineno,\n }\n\n if record.exc_info:\n message[\n \"exception\"\n ] = f\"{record.exc_info[0].__name__}: {record.exc_info[1]}\"\n message[\"traceback\"] = traceback.format_exc()\n\n return json.dumps(message, ensure_ascii=False)", "def render_to_json_response(self, data: Optional[Dict] = {}, meta: Optional[Dict] = {},\n error: Optional[str] = '', status=HTTPStatus.OK, **response_kwargs):\n response_data = {\"body\": data, \"meta\": meta, \"error\": error}\n return JsonResponse(response_data, status=status, **response_kwargs)", "def application_error(e):\n message = {\n 'status': 500,\n 'message': 'Sorry, unexpected error: ' + format(e)\n }\n resp = jsonify(message)\n resp.status_code = 500\n\n return resp", "def render(data, template=None, content_type=b'application/json', i18n=None, **kw):\n\t\n\treturn content_type, dumps(data, **kw)", "def _make_batch_response_dict(response, exception):\n if exception is not None:\n return {\n \"data\": str(exception)\n }\n return {\n \"status_code\": response.status_code,\n \"data\": generate_mailgun_response_json(response),\n }", "def wsgi_tool_error_handler(e):\n status_code = e.code\n result = {\n \"error_message\": e.description,\n \"error_code\": e.name.upper().replace(\" \", \"_\")\n }\n return jsonify(result), status_code", "def error_json(self, number=None, payload=None):\n try:\n spayload = json.dumps(payload)\n # spayload = payload.replace('\\\"','').replace('\\'','')\n except Exception:\n spayload = '\"\"'\n\n vals = (error_codes[number], str(number), spayload)\n self.debug(\"ERROR %s - %s - payload: %s\", *vals)\n\n return json.loads('{ \"Error\":\"%s\", \"Err\":\"%s\", \"Payload\":%s }' % vals)", "def not_found(e):\n\n return json.dumps({\"error\": \"Endpoint not found\"})", "def raise_critical_error(\n message: str,\n data: dict = None,\n exception_type: object = Exception\n):\n critical_error_message = json.dumps(\n {\n \"event_object\": data,\n \"message\": message,\n }\n )\n print(critical_error_message)\n raise exception_type(critical_error_message)", "def render(self, data, media_type=None, renderer_context=None):\n errors = data.get('errors', None)\n\n if errors:\n \"\"\"\n We will let the default JSONRenderer handle\n rendering errors.\n \"\"\"\n return super(NotificationJSONRenderer, self).render(data)\n\n # Finally, we can render our data under the \"profile\" namespace.\n return json.dumps({\n 'notification': data\n })", "def serialize_error(success, object, reason):\n\n return json.dumps({\"success\": success, \"object\": object, \"status\": reason}, indent=2, sort_keys=True)", "def build_error_output():\n\n error_type, error_value, error_tb = sys.exc_info()\n\n alert_data = dict()\n alert_data['type'] = type(error_value).__name__\n alert_data['value'] = str(error_value)\n alert_data['host'] = platform.node()\n alert_data['os'] = platform.system()\n alert_data['traceback'] = traceback.format_list(traceback.extract_tb(error_tb))\n\n return alert_data", "def process_exception(self, request, exception):\r\n if isinstance(exception, CommentClientRequestError) and request.is_ajax():\r\n try:\r\n return JsonError(json.loads(exception.message), exception.status_code)\r\n except ValueError:\r\n return JsonError(exception.message, exception.status_code)\r\n return None", "def _Error(message):\n return json.dumps({\n 'success': False,\n 'error': message,\n })", "def jsonable_server_error(request, template_name='500.html'):\r\n if request.is_ajax():\r\n msg = {\"error\": \"The edX servers encountered an error\"}\r\n return HttpResponseServerError(json.dumps(msg))\r\n else:\r\n return server_error(request, template_name=template_name)", "def output_json(data, code, headers=None):\n #data[\"timestamp\"] = datetime.now()\n return jsonify(data)", "def create_output_for_failure(exception, traceback_string):\r\n tag = '...'\r\n task_progress = {'exception': type(exception).__name__, 'message': unicode(exception.message)}\r\n if traceback_string is not None:\r\n # truncate any traceback that goes into the InstructorTask model:\r\n task_progress['traceback'] = traceback_string\r\n json_output = json.dumps(task_progress)\r\n # if the resulting output is too long, then first shorten the\r\n # traceback, and then the message, until it fits.\r\n too_long = len(json_output) - 1023\r\n if too_long > 0:\r\n if traceback_string is not None:\r\n if too_long >= len(traceback_string) - len(tag):\r\n # remove the traceback entry entirely (so no key or value)\r\n del task_progress['traceback']\r\n too_long -= (len(traceback_string) + len('traceback'))\r\n else:\r\n # truncate the traceback:\r\n task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag\r\n too_long = 0\r\n if too_long > 0:\r\n # we need to shorten the message:\r\n task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag\r\n json_output = json.dumps(task_progress)\r\n return json_output", "def xml(self):\n strg = \"<Exception>\\n\"\n strg += \"<Object>\\n\"\n strg += \"%s\\n\" % self.name\n strg += \"</Object>\\n\"\n strg += \"<Message>\\n\"\n strg += self._message\n strg += \"</Message>\\n\"\n strg += \"<DataItems>\\n\"\n for key, value in viewitems(self.data):\n strg += \"<DataItem>\\n\"\n strg += \"<Key>\\n\"\n strg += str(key)\n strg += \"</Key>\\n\"\n strg += \"<Value>\\n\"\n strg += str(value)\n strg += \"</Value>\\n\"\n strg += \"</DataItem>\\n\"\n strg += \"</DataItems>\\n\"\n strg += \"</Exception>\\n\"\n logging.error(strg)\n return strg", "def handle_uncaught_error(e):\n status_code = 500\n\n result = {\n \"error_message\": \"Unknown or unexpected error.\",\n \"error_code\": \"INTERNAL_SERVER_ERROR\"\n }\n return jsonify(result), status_code", "def get_error_message(self, data, response=None):\n return str(data)", "def response_json_error_info(func):\n def wrapper(request):\n try:\n return func(request)\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def _serialize_event_error(event):\n if not event.error_code:\n return None\n\n return {\n \"code\": event.error_code,\n \"message\": MessagingEvent.ERROR_MESSAGES.get(event.error_code, None),\n \"message_detail\": event.additional_error_text\n }", "def _writeJSONErrorResponse(f, request):\n code = getattr(f.value, 'code', CODE.UNKNOWN)\n _writeJSONResponse(\n result=f.getErrorMessage().decode('ascii'),\n request=request,\n code=code,\n status=_mapErrorCodeToStatus(code))\n raise f", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(**context), **response_kwargs)", "def json_err(msg: str) -> Response:\n return jsonify({\"success\": False, \"error\": msg})", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_json(self, obj):\n self.response.content_type = \"application/json\"\n self.response.out.write(json.encode(obj))", "def json_error(message):\n return json_response(isError=True, message=message)", "def display_error():\n return flask.jsonify(flask.request.args)", "def as_json(self):", "def handle_api_exception(error):\n response = flask.jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def generic_errors(error, code):\n errors = {}\n errors[\"error\"] = error\n response = jsonify(errors)\n response.status_code = code\n return response", "def generate_error_json(exception=None,\n mydetic_error_code=None,\n short_message=None, long_message=None):\n rval = {\n 'error_code': errorcodes.UNEXPLAINED_FAILURE,\n 'short_message': errorcodes.error_descs[errorcodes.UNEXPLAINED_FAILURE],\n 'long_message': ''\n }\n if exception is not None:\n if not isinstance(exception, MyDeticException):\n raise ValueError(\"Exception is not of type MyDeticException\")\n rval['error_code'] = exception.error_code\n rval['long_message'] = str(exception)\n\n if mydetic_error_code is not None:\n rval['error_code'] = mydetic_error_code\n if short_message is not None:\n rval['short_message'] = short_message\n else:\n rval['short_message'] = errorcodes.error_descs[rval['error_code']]\n if long_message is not None:\n rval['long_message'] = long_message\n\n return rval", "def gateway_error_response(self, exc):\n if hasattr(exc, \"get_stacks\"):\n # Process potentially multiple stacks.\n full_error, exc_stacks = \"\", exc.get_stacks()\n for i in range(len(exc_stacks)):\n full_error += exc_stacks[i][0] + \"\\n\"\n if i == 0:\n full_error += \"\".join(traceback.format_exception(*sys.exc_info()))\n else:\n entry = ApplicationException.format_stack(exc_stacks[i][1])\n full_error += entry + \"\\n\"\n\n exec_name = exc.__class__.__name__\n else:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n exec_name = exc_type.__name__\n full_error = \"\".join(traceback.format_exception(*sys.exc_info()))\n\n status_code = getattr(exc, \"status_code\", 400)\n if self.log_errors:\n if self.develop_mode:\n if status_code == 401:\n log.warn(\"%s: %s\", exec_name, exc)\n else:\n log.error(full_error)\n else:\n if status_code == 401:\n log.info(\"%s: %s\", exec_name, exc)\n else:\n log.info(full_error)\n\n result = {\n GATEWAY_ERROR_EXCEPTION: exec_name,\n GATEWAY_ERROR_MESSAGE: str(exc.message),\n GATEWAY_ERROR_EXCID: getattr(exc, \"exc_id\", \"\") or \"\"\n }\n if self.develop_mode:\n result[GATEWAY_ERROR_TRACE] = full_error\n\n if RETURN_MIMETYPE_PARAM in request.args:\n return_mimetype = str(request.args[RETURN_MIMETYPE_PARAM])\n return self.response_class(result, mimetype=return_mimetype)\n\n self._log_request_error(result, status_code)\n\n resp = self.json_response({GATEWAY_ERROR: result, GATEWAY_STATUS: status_code})\n # Q: Should HTTP status be the error code of the exception?\n resp.status_code = status_code\n return resp", "def http_exception(error):\n data = {'error': str(error)}\n return app.response_class(\n response=json.dumps(data),\n status=error.code,\n mimetype='application/json'\n )", "def internal_error(error):\n return jsonify({'error': \"Internal Server Error. \"\n \"Bitte die Logdatei für Details anschauen.\"}), 500", "def vue_exception_handler(exc, context):\n if isinstance(exc, exceptions.APIException) and isinstance(exc.detail, (list, dict)):\n exc.detail = _flatten_vue_validation(exc.detail)\n\n return drf_exception_handler(exc, context)", "def _serialize_event_data_as_json(event_data):\n return json.dumps(event_data)", "def formatException(self, exc_info):\n result = super(OneLineExceptionFormatter, self).formatException(exc_info)\n return repr(result) # or format into one line however you want to", "def render_json(object):\r\n return HttpResponse(jsonify(object), content_type='application/json')", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def renderInlineException(request, reason):", "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if isinstance(exc, Http404):\n response.data = {\n 'message': 'No data available' # custom exception message\n }\n return response\n try:\n print(\"Exception\", exc.get_codes())\n if 'email' in exc.get_codes() and 'unique' in exc.get_codes()['email']:\n response.data = {\n 'message': 'This email already exists.' # custom exception message\n }\n return response\n if 'mobile_number' in exc.get_codes() and 'unique' in exc.get_codes()['mobile_number']:\n response.data = {\n 'message': 'This mobile number already exists.' # custom exception message\n }\n return response\n if 'dev_id' in exc.get_codes() and 'unique' in exc.get_codes()['dev_id']:\n response.data = {\n 'message': 'This device already registered with other account.' # custom exception message\n }\n return response\n return response\n except:\n return response", "def __as_unicode(self):\n # WARNING: Do not change this string - it is used to extract error from log\n strg = WMEXCEPTION_START_STR\n strg += \"\\nException Class: %s\\n\" % self.name\n strg += \"Message: %s\\n\" % self._message\n for key, value in viewitems(self.data):\n strg += \"\\t%s : %s\\n\" % (key, value,)\n strg += \"\\nTraceback: \\n\"\n strg += self.traceback\n strg += '\\n'\n strg += WMEXCEPTION_END_STR\n return strg", "def display_exception(self, exception_trace=''):\n txt = [80 * '*', '\\n', BANNER, '\\n', 80 * '*', '\\n', '\\n', '\\n']\n txt.extend(exception_trace)\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n return as_bytes(txt)", "def render_response(self, context):\n\n # if object is a string just return as is\n if isinstance(context, basestring):\n self.response.write(context)\n # else attempt to serialise and return\n else:\n context = json.dumps(context)\n self.response.write(context)\n # set the right content-type header\n self.response.headers['Content-Type'] = 'application/json'", "def odata_error(self, request, environ, start_response, sub_code,\n message='', code=400):\n response_headers = []\n e = core.Error(None)\n e.add_child(core.Code).set_value(sub_code)\n e.add_child(core.Message).set_value(message)\n response_type = self.content_negotiation(\n request, environ, self.ErrorTypes)\n if response_type is None:\n # this is an error response, default to text/plain anyway\n response_type = params.MediaType.from_str(\n 'text/plain; charset=utf-8')\n elif response_type == \"application/atom+xml\":\n # even if you didn't ask for it, you get application/xml in this\n # case\n response_type = \"application/xml\"\n if response_type == \"application/json\":\n data = str(''.join(e.generate_std_error_json()))\n else:\n data = str(e)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (code, sub_code), response_headers)\n return [data]", "def render_to_response(self, context, **response_kwargs):\n return JsonResponse(context)", "def serialize(self):\n\t\treturn { 'client_id': self.client_id, 'is_in_error' : self.is_in_error, 'error_status' : self.error_status }", "def _rest_error(self, status_code, error_code, message):\n return {\"status_code\": status_code, \"error_code\": error_code, \"message\": message}", "def response_json(func):\n def wrapper(request):\n try:\n return get_json_response(func(request))\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def error_response(msg='Unknown'):\n return \"\"\"{{\"InternalServerError\":\"{}\"}}\"\"\".format(msg)", "def _construct_error_response_body(error_type, error_message):\n # OrderedDict is used to make testing in Py2 and Py3 consistent\n return json.dumps(OrderedDict([(\"Type\", error_type), (\"Message\", error_message)]))", "def create_error_response(data: Dict[str, str], status_code: int) -> Response:\n resp = jsonify(data)\n resp.status_code = status_code\n return resp", "def to_json(self, data):\n return json.dumps(data)", "def exception_output(self, exception: ExceptionValue) -> ExceptionValue:\n return exception", "def handle_not_found(exception):\n return jsonify({\n 'message': 'Resource not found'\n }), 404", "def render(data):\n if data is None:\n return ''\n\n if 'rendered_result' not in data:\n if 'result' not in data:\n data['rendered_result'] = ''\n else:\n make_pretty = True\n data['rendered_result'] = SEP2Renderer.export(data['result'], make_pretty)\n\n return data['rendered_result']" ]
[ "0.72541064", "0.67585784", "0.6713111", "0.6690408", "0.66427636", "0.66312206", "0.65327764", "0.65234625", "0.6517418", "0.6510992", "0.64126235", "0.6402634", "0.63840944", "0.63827544", "0.63217765", "0.6246733", "0.62432253", "0.61892617", "0.6140482", "0.6116676", "0.60697544", "0.60659206", "0.6048546", "0.60113007", "0.6007583", "0.5992608", "0.5970451", "0.5962027", "0.59452707", "0.5929636", "0.589944", "0.589422", "0.5893212", "0.58928365", "0.5882278", "0.5879313", "0.58505213", "0.58197725", "0.580697", "0.5786965", "0.57848525", "0.5784425", "0.57772934", "0.57642394", "0.5758104", "0.57463247", "0.5738363", "0.5721176", "0.5710569", "0.5709654", "0.57075506", "0.56652564", "0.56592184", "0.5659054", "0.56580657", "0.56572455", "0.5645947", "0.56427443", "0.5642144", "0.5638788", "0.5636679", "0.56312704", "0.56010467", "0.56010467", "0.56010467", "0.56010467", "0.56010467", "0.55772674", "0.5576388", "0.55722356", "0.5563505", "0.55546695", "0.55543387", "0.5549572", "0.55448115", "0.5540431", "0.5511979", "0.5510433", "0.5510061", "0.55069923", "0.5505624", "0.5502217", "0.54984343", "0.54851514", "0.54807746", "0.5476109", "0.54640883", "0.54578274", "0.5454841", "0.54514545", "0.5446789", "0.54408526", "0.5434627", "0.5427528", "0.54266226", "0.54200965", "0.5419033", "0.5417665", "0.5408593", "0.5405971" ]
0.9000337
0
JSON serializer for objects not serializable by default json code
def _json_serializer(obj): if isinstance(obj, (datetime, date)): return obj.isoformat(sep=" ") if isinstance(obj, (types.TracebackType, TracebackFrameProxy)): return "<Traceback object>" return saferepr(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')", "def jsonify(obj):\n raise NotImplementedError", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def serialize(self, obj):\n return json.dumps(obj)", "def json_friendly(self):", "def json_serial(obj):\n if isinstance(obj, LegipyModel):\n return obj.to_json()\n elif isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n raise TypeError(\"Type {0} not serializable\".format(repr(type(obj))))", "def default_encoder(obj): # pylint: disable=method-hidden\n if isinstance(obj, Decimal):\n return str(float(obj))\n\n if hasattr(obj, \"isoformat\"): # handles both date and datetime objects\n return str(obj)\n\n if isinstance(obj, set):\n return list(obj)\n\n if isinstance(obj, enum.Enum):\n return obj.name\n\n try:\n # attrs objects can appear when logging stuff\n return attr.asdict(obj, dict_factory=masked_dict)\n except attr.exceptions.NotAnAttrsClassError:\n pass\n\n try:\n obj_dict = obj.asdict()\n except AttributeError:\n pass\n else:\n return masked_dict(list(obj_dict.items()))\n\n raise TypeError(repr(obj) + \" is not JSON serializable\")", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return str(obj) #.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\r\n\r\n\t\tif isinstance(obj,(datetime, date)):\r\n\t\t\treturn obj.isoformat()\r\n\t\traise TypeError (\"Type %s not serializable\" % type(obj))", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(primitive=True), *args, **kwargs)", "def serialize(self, obj):\n pass", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n if isinstance(obj, complex):\n return str(obj)\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date,date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\r\n\r\n if isinstance(obj, (datetime, date)):\r\n return obj.isoformat()\r\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n\tif isinstance(obj, (dt.datetime, dt.date)):\n\t\treturn obj.isoformat()\n\traise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (dt.datetime, dt.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def toJSON(cls, obj):\n return json.dumps(obj)", "def jsonSerial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n\n if isinstance(obj, enum.Enum):\n return obj.value\n\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def default(self, o):\r\n raise TypeError(repr(o) + \" is not JSON serializable\")", "def toJSON(self):\n raise NotImplementedError()", "def serialize(self, obj):\n return obj", "def default(self, obj):\r\n if hasattr(obj, 'to_python'):\r\n return obj.to_python()\r\n return super(CLIJSONEncoder, self).default(obj)", "def default_serializer(_cls: Type[Any], obj: Any) -> Any:", "def default(self, o):\n raise TypeError(\"%r is not JSON serializable\" % (o,))", "def json_serial(obj):\n\n if isinstance(obj, (datetime)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__)", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def default(self, o):\n raise TypeError(repr(o) + \" is not JSON serializable\")", "def json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError (\"Type not serializable\")", "def json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")", "def jsonify(obj):\n d = model_to_dict(obj)\n return json.dumps(d, cls=LazyEncoder)", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(), *args, **kwargs)", "def json_serializer(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n serial = obj.isoformat()\n return serial", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n\n elif isinstance(obj, decimal.Decimal):\n if obj % 1 == 0:\n return int(obj)\n else:\n return float(obj)\n\n elif isinstance(obj, bytes):\n try:\n s = obj.decode()\n return s\n except Exception:\n return str(obj)\n\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError (\"Type not serializable\")", "def json_serial2(self, obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()", "def dumps(obj):\n return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)", "def to_serializable(o: Any) -> Any:\n if isinstance(o, UUID):\n return str(o)\n if isinstance(o, datetime):\n return isoformat(o)\n if is_dataclass(o):\n return asdict(o)\n if hasattr(o, \"__json__\"):\n return o.__json__()\n if hasattr(o, \"to_dict\"):\n # api_client models all have a to_dict function\n return o.to_dict()\n if isinstance(o, BaseModel):\n return o.dict()\n raise TypeError(f\"Could not serialize object of type {o.__class__.__name__} to JSON\")", "def serialize(self):\n return json.dumps(self.as_dict())", "def json_datetime_serializer(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"{} is not JSON serializable.\".format(obj))", "def SerializeObject(self, data):\n\n if isinstance(data,dict):\n serializad_data = json.dumps(data)\n else:\n serializad_data = json.dumps(data.__dict__)\n\n return serializad_data", "def _json_serialize(obj: Any) -> str:\n if isinstance(obj, bytes):\n if len(obj) < 256:\n try:\n return obj.hex()\n except Exception:\n pass\n else:\n try:\n return obj.decode()\n except Exception:\n pass\n return '<not serializable>'", "def serialize_instance(instance):\n ret = dict([(k, v)\n for k, v in instance.__dict__.items()\n if not k.startswith('_')]) if instance else None\n return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))", "def json_serializer(obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n if hasattr(obj, '_asdict'):\n return obj._asdict()", "def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)", "def getSerializer():", "def jsonify(obj):\n return json.loads(json.dumps(obj, default=default_encoder))", "def to_json(self) :\n return jsonpickle.encode(self)", "def serialize_instance(instance):\n ret = dict([(k, v)\n for k, v in instance.__dict__.items()\n if not k.startswith('_')])\n return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))", "def to_json(self):\n return json.dumps(self, default=lambda i: i.__dict__)", "def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)", "def json(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)", "def _toJSON(self):\n\n return json.encode(self.__toJSON())", "def as_json(self):", "def jsonify(object):\n # note: ng provides a \"json\" filter that can do this too\n # note: but Django doesn't [https://code.djangoproject.com/ticket/17419]\n if isinstance(object, QuerySet):\n return serialize('json', object)\n return json.dumps(object)", "def write(obj):\n import warnings\n warnings.warn(\"simplejson.dumps(s) should be used instead of write(s)\",\n DeprecationWarning)\n return dumps(obj)", "def default(self, obj):\n if isinstance(obj, tuple(TYPES.values())):\n key = '__%s__' % obj.__class__.__name__\n return {key: obj.__dict__}\n return json.JSONEncoder.default(self, obj)", "def jsonDefault(object):\n return object.__dict__", "def default(self, obj):\n \n if isinstance(obj, np.ndarray):\n return list(obj)\n\n if isinstance(obj, uuid.UUID):\n return str(obj)\n\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n \n if isinstance(obj,TPC):\n return obj._so()\n \n # No special handling called for; pass through\n return json.JSONEncoder.default(self, obj)", "def encode_json(obj):\n\treturn json.dumps(obj)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def json_encode_default(obj: Any) -> Dict[str, Any]:\n if isinstance(obj, Decimal):\n return {'type(Decimal)': str(obj)}\n elif isinstance(obj, pd.Timestamp):\n return {'type(pd.Timestamp)': str(obj)}\n elif isinstance(obj, datetime):\n return {'type(datetime)': obj.isoformat()}\n else:\n raise TypeError(f\"{repr(obj)} is not JSON serializable\")", "def toJSON(object):\n\treturn json.dumps(object, ensure_ascii=False)", "def to_json(self):\n return json.dumps(self._asdict())", "def __str__(self):\n return json.dumps(self.to_dict())", "def serialize(self) -> str:\n return json.dumps(self.__dict__)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def to_json(obj):\n\n return json.dumps(obj, indent=2, default=_generated_class_serializer)", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def serialise(obj):\n if isinstance(obj, datetime.datetime):\n # maybe assume UTC (as deserialise does the reverse)\n return obj.replace(tzinfo=du_tz.tzutc()).isoformat()\n\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n\n if isinstance(obj, queue.Queue):\n return {}\n\n if isinstance(obj, (pagination.PaginatedResponse, BaseObject)):\n return obj.to_dict()\n\n try:\n return obj.to_dict()\n except AttributeError:\n pass\n\n raise TypeError(\"Object of type '%s' is not JSON serializable\" % obj.__class__.__name__)", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def __json__(self):\n filtered_dict = dict()\n\n for k, item in six.iteritems(self.__dict__):\n if k.startswith('_'):\n continue\n\n if hasattr(item, '__json__'):\n filtered_dict[k] = item.__json__\n else:\n filtered_dict[k] = serialize_obj(item)\n\n return filtered_dict", "def to_json(self, **kwargs):\n return dumps(self, **kwargs)", "def JsonComplexEncoder(obj):\n if isinstance(obj, bytes):\n return str(obj)\n else:\n return obj", "def json_encode(obj):\n return json.dumps(obj)", "def serialize(self):\n pass", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()" ]
[ "0.8023734", "0.79166824", "0.7667077", "0.7573799", "0.7536935", "0.7529485", "0.74354947", "0.73595595", "0.73340595", "0.73321056", "0.7332048", "0.7322426", "0.7223231", "0.722149", "0.72154963", "0.72111374", "0.72008944", "0.72008944", "0.7184688", "0.7184688", "0.7184688", "0.7184688", "0.7177854", "0.7154486", "0.71408004", "0.71402466", "0.7131789", "0.70774966", "0.7062999", "0.7055049", "0.70524067", "0.70463574", "0.701805", "0.70084244", "0.6995666", "0.69900304", "0.69900304", "0.6972928", "0.6942593", "0.6942576", "0.6932405", "0.69289875", "0.6928538", "0.69193953", "0.6911861", "0.6911861", "0.6911564", "0.69036394", "0.68940395", "0.6883568", "0.68810374", "0.68718183", "0.6870925", "0.6863694", "0.68512684", "0.68356484", "0.6832378", "0.6819346", "0.68172073", "0.6810763", "0.6808212", "0.6807762", "0.6807301", "0.6795658", "0.678895", "0.6772008", "0.67715156", "0.67713356", "0.6755903", "0.6753638", "0.67440325", "0.66958463", "0.6686825", "0.66852754", "0.6679763", "0.667865", "0.6677099", "0.6676931", "0.6674433", "0.66736645", "0.66736645", "0.66679394", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.66669375", "0.6656594", "0.66521776", "0.6650218", "0.66500014", "0.6647391", "0.6647109", "0.66432506", "0.6642718" ]
0.6736821
71
Return a dictionary containing exception information. if exc_type, exc_value, and tb are not provided they will be supplied by sys.exc_info()
def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048): head_var_length = int(max_var_length / 2) tail_var_length = max_var_length - head_var_length if not tb: exc_type, exc_value, tb = sys.exc_info() frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb) for i, frame in enumerate(frames): if "vars" in frame: frame_vars = [] for k, v in frame["vars"]: try: v = pformat(v) except Exception as e: try: v = saferepr(e) except Exception: v = "An error occurred rendering the exception of type: " + repr(e.__class__) # The force_escape filter assume unicode, make sure that works if isinstance(v, bytes): v = v.decode("utf-8", "replace") # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > max_var_length: v = f"{v[0:head_var_length]}... \n\n<trimmed {len(v)} bytes string>\n\n ...{v[-tail_var_length:]}" frame_vars.append((k, escape(v))) frame["vars"] = frame_vars frames[i] = frame unicode_hint = "" if exc_type and issubclass(exc_type, UnicodeError): start = getattr(exc_value, "start", None) end = getattr(exc_value, "end", None) if start is not None and end is not None: unicode_str = exc_value.args[1] unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], "ascii", errors="replace") try: unicode_hint.encode("utf8") except UnicodeEncodeError: unicode_hint = unicode_hint.encode("utf8", "surrogateescape") c = { "unicode_hint": unicode_hint, "frames": frames, "sys_executable": sys.executable, "sys_version_info": "%d.%d.%d" % sys.version_info[0:3], "server_time": datetime.now(timezone.utc), "sys_path": sys.path, "platform": platform.uname()._asdict(), } # Check whether exception info is available if exc_type: c["exception_type"] = exc_type.__name__ if exc_value: c["exception_value"] = force_text(exc_value, errors="replace") if frames: c["lastframe"] = frames[-1] return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exc_info(self):\n return self._exc_info", "def __exc_info(self):\n exctype, excvalue, tb = sys.exc_info()\n if sys.platform[:4] == 'java': ## tracebacks look different in Jython\n return (exctype, excvalue, tb)\n return (exctype, excvalue, tb)", "def exc_info(self):\n ei = self._exc_info\n if ei is not None and ei[0] is not None:\n return (\n ei[0],\n ei[1],\n # The pickled traceback may be None if we couldn't pickle it.\n load_traceback(ei[2]) if ei[2] else None\n )", "def exc_info(): # real signature unknown; restored from __doc__\n pass", "def sys_exc_info(self, for_hidden=False):\n return self.gettopframe()._exc_info_unroll(self.space, for_hidden)", "def formatException(self, exc_info):\n keys = [\"type\", \"value\", \"frame\", \"filename\", \"lineno\", \"function\", \"text\"]\n type_, value, trcbk = exc_info\n rows = []\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n values = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n rows.append(dict(zip(keys, values)))\n\n return str(CustomEncoder().encode(rows))", "def _parse_traceback(self):\n exc_type, exc_obj, tb = sys.exc_info()\n assert tb is not None, \"must be called in an exception context\"\n\n self.data[\"exception\"] = str(exc_type)\n self.data[\"error\"] = str(exc_obj).strip()\n self.data[\"filename\"] = os.path.basename(tb.tb_frame.f_code.co_filename)\n self.data[\"lineno\"] = tb.tb_lineno", "def get_err_source_info(original_traceback=None) -> dict:\n try: # carefully try to get the actual place where the error happened\n if not original_traceback:\n original_traceback = sys.exc_info()[2] # class, exc, traceback\n first_call = traceback.extract_tb(original_traceback)[-1]\n return dict(\n src_module=first_call[0],\n src_linenr=first_call[1],\n src_func=first_call[2],\n src_code=first_call[3],\n )\n except Exception as e:\n current_app.warning(\n \"I was unable to retrieve error source information: %s.\" % str(e)\n )\n return dict(module=\"\", linenr=0, method=\"\", src_code=\"\")", "def get_error(self):\n return self.exc_info", "def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)", "def exception(self):\n return self._exc_info[1] if self._exc_info is not None else None", "def _get_locals(exception: Exception) -> dict:\n\n tb = exception.__traceback__\n\n if not tb: # pragma: no cover\n return {}\n\n while tb.tb_next is not None:\n tb = tb.tb_next\n\n return tb.tb_frame.f_locals", "def exception(self):\n if self._exc_info is not None:\n return self._exc_info[1]\n else:\n self._check_done()\n return None", "def build_error_output():\n\n error_type, error_value, error_tb = sys.exc_info()\n\n alert_data = dict()\n alert_data['type'] = type(error_value).__name__\n alert_data['value'] = str(error_value)\n alert_data['host'] = platform.node()\n alert_data['os'] = platform.system()\n alert_data['traceback'] = traceback.format_list(traceback.extract_tb(error_tb))\n\n return alert_data", "def format_locals(sys_exc_info):\n\n current_tb = sys_exc_info[-1]\n while current_tb:\n next_tb = current_tb.tb_next\n if not next_tb:\n frame_locals = current_tb.tb_frame.f_locals\n return pprint.pformat(frame_locals)\n current_tb = next_tb", "def _report_exc_info(exc_info, request, extra_data, payload_data, level=None):\n\n if not _check_config():\n return\n\n filtered_level = _filtered_level(exc_info[1])\n if level is None:\n level = filtered_level\n\n filtered_exc_info = events.on_exception_info(exc_info,\n request=request,\n extra_data=extra_data,\n payload_data=payload_data,\n level=level)\n\n if filtered_exc_info is False:\n return\n\n cls, exc, trace = filtered_exc_info\n\n data = _build_base_data(request)\n if level is not None:\n data['level'] = level\n\n # walk the trace chain to collect cause and context exceptions\n trace_chain = _walk_trace_chain(cls, exc, trace)\n\n extra_trace_data = None\n if len(trace_chain) > 1:\n data['body'] = {\n 'trace_chain': trace_chain\n }\n if payload_data and ('body' in payload_data) and ('trace' in payload_data['body']):\n extra_trace_data = payload_data['body']['trace']\n del payload_data['body']['trace']\n else:\n data['body'] = {\n 'trace': trace_chain[0]\n }\n\n if extra_data:\n extra_data = extra_data\n if not isinstance(extra_data, dict):\n extra_data = {'value': extra_data}\n if extra_trace_data:\n extra_data = dict_merge(extra_data, extra_trace_data, silence_errors=True)\n data['custom'] = extra_data\n if extra_trace_data and not extra_data:\n data['custom'] = extra_trace_data\n\n request = _get_actual_request(request)\n _add_request_data(data, request)\n _add_person_data(data, request)\n _add_lambda_context_data(data)\n data['server'] = _build_server_data()\n\n if payload_data:\n data = dict_merge(data, payload_data, silence_errors=True)\n\n payload = _build_payload(data)\n send_payload(payload, payload.get('access_token'))\n\n return data['uuid']", "def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n exception = kwargs.get(\"exception\")\n\n context.update({\n \"exception\": exception,\n \"exception_type\": exception.__class__.__name__ if exception else None,\n \"exception_msg\": exception.message if exception and hasattr(exception, 'message') else str(exception) if exception else None,\n \"extra_message\": kwargs.get(\"extra_message\"),\n })\n return context", "def _get_traceback(self, exc_info=None):\n import traceback\n import sys\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))", "def exception(self):\n exc_type, exc_value, exc_tb = sys.exc_info()\n cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],\n log_message=traceback.format_exc())", "def _get_traceback(self, exc_info):\n import traceback\n return '<br/>'.join(traceback.format_exception(*(exc_info or sys.exc_info())))", "def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):\n if exc_info is None:\n exc_info = sys.exc_info()\n\n try:\n return _report_exc_info(exc_info, request, extra_data, payload_data, level=level)\n except Exception as e:\n log.exception(\"Exception while reporting exc_info to Rollbar. %r\", e)", "def raise_exc_info(exc_info):\n # 2to3 isn't smart enough to convert three-argument raise\n # statements correctly in some cases.\n if isinstance(exc_info[1], exc_info[0]):\n # raise exc_info[1], None, exc_info[2]\n # # After 2to3: \n raise exc_info[1].with_traceback(exc_info[2])\n else:\n # I think this branch is only taken for string exceptions,\n # which were removed in Python 2.6.\n # raise exc_info[0], exc_info[1], exc_info[2]\n # # After 2to3: \n raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])", "def _elasticsearch_info(self) -> Dict[str, Any]:\n try:\n return self.__es__.info()\n except ElasticsearchException as ex:\n return {\"error\": ex}", "def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )", "def _get_exception(self):\r\n \r\n return self._exception", "def format_exc(etype, evalue, etb, context=5, tb_offset=0):\r\n # some locals\r\n try:\r\n etype = etype.__name__\r\n except AttributeError:\r\n pass\r\n\r\n # Header with the exception type, python version, and date\r\n pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable\r\n date = time.ctime(time.time())\r\n pid = 'PID: %i' % os.getpid()\r\n\r\n head = '%s%s%s\\n%s%s%s' % (etype, ' ' * (75 - len(str(etype)) - len(date)),\r\n date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),\r\n pyver)\r\n\r\n # Flush cache before calling inspect. This helps alleviate some of the\r\n # problems with python 2.3's inspect.py.\r\n linecache.checkcache()\r\n # Drop topmost frames if requested\r\n try:\r\n records = _fixed_getframes(etb, context, tb_offset)\r\n except:\r\n raise\r\n print('\\nUnfortunately, your original traceback can not be '\r\n 'constructed.\\n')\r\n return ''\r\n\r\n # Get (safely) a string form of the exception info\r\n try:\r\n etype_str, evalue_str = map(str, (etype, evalue))\r\n except:\r\n # User exception is improperly defined.\r\n etype, evalue = str, sys.exc_info()[:2]\r\n etype_str, evalue_str = map(str, (etype, evalue))\r\n # ... and format it\r\n exception = ['%s: %s' % (etype_str, evalue_str)]\r\n frames = format_records(records)\r\n return '%s\\n%s\\n%s' % (head, '\\n'.join(frames), ''.join(exception[0]))", "def exc_info_to_str(exc_info):\r\n return ''.join(traceback.format_exception(*exc_info))", "def _catch_exceptions(self, exctype, value, tb):\n\n # Now we log it.\n self.error(\"Uncaught exception\", exc_info=(exctype, value, tb))\n\n # First, we print to stdout with some colouring.\n print_exception_formatted(exctype, value, tb)", "def capture_exception():\n\n pm_logger.exception()\n exc_type, exc_value, exc_tb = sys.exc_info()\n exc_type_string = \"%s.%s\" % (exc_type.__module__, exc_type.__name__)\n exc_message = traceback.format_exception_only(exc_type, exc_value)[-1].strip()\n error = {\"type\": exc_type_string,\n \"message\": exc_message}\n try:\n BSON.encode({'args': exc_value.args})\n except InvalidDocument:\n pass\n else:\n error[\"args\"] = exc_value.args\n return error", "def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')", "def formatException(self, exc_info):\n traces = traceback.format_exception(*exc_info)\n return \"\\n\".join(traces)", "def formatException(self, exc_info):\n type_, value, trcbk = exc_info\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n row = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n self.writer.writerow(row)\n\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def get_frame_info(tb, context_lines=7):\n # line numbers / function / variables\n lineno = tb.tb_lineno\n function = tb.tb_frame.f_code.co_name\n variables = tb.tb_frame.f_locals\n\n # get filename\n fn = tb.tb_frame.f_globals.get('__file__')\n if not fn:\n fn = _os.path.realpath(\n _inspect.getsourcefile(tb) or _inspect.getfile(tb)\n )\n if fn[-4:] in ('.pyc', '.pyo'):\n fn = fn[:-1]\n\n # module name\n modname = tb.tb_frame.f_globals.get('__name__')\n\n # get loader\n loader = tb.tb_frame.f_globals.get('__loader__')\n\n # sourcecode\n try:\n if not loader is None:\n source = loader.get_source(modname)\n else:\n source = file(fn).read()\n except (SystemExit, KeyboardInterrupt):\n raise\n except:\n source = ''\n pre_context, post_context = [], []\n context_line, context_lineno = None, None\n else:\n parser = PythonParser(source)\n parser.parse()\n parsed_source = parser.get_html_output()\n lbound = max(0, lineno - context_lines - 1)\n ubound = lineno + context_lines\n try:\n context_line = parsed_source[lineno - 1]\n pre_context = parsed_source[lbound:lineno - 1]\n post_context = parsed_source[lineno:ubound]\n except IndexError:\n context_line = None\n pre_context = post_context = [], []\n context_lineno = lbound\n\n return {\n 'tb': tb,\n 'filename': fn,\n 'loader': loader,\n 'function': function,\n 'lineno': lineno,\n 'vars': variables,\n 'pre_context': pre_context,\n 'context_line': context_line,\n 'post_context': post_context,\n 'context_lineno': context_lineno,\n 'source': source\n }", "def exc_message(exc_info):\n exc = exc_info[1]\n if exc is None:\n # str exception\n result = exc_info[0]\n else:\n try:\n result = str(exc)\n except UnicodeEncodeError:\n try:\n result = unicode(exc) # flake8: noqa\n except UnicodeError:\n # Fallback to args as neither str nor\n # unicode(Exception(u'\\xe6')) work in Python < 2.6\n result = exc.args[0]\n return result", "def get_exception():\n trace = ''\n exception = ''\n exc_list = traceback.format_exception_only(sys.exc_info()[0],\n sys.exc_info()[1])\n for entry in exc_list:\n exception += entry\n tb_list = traceback.format_tb(sys.exc_info()[2])\n for entry in tb_list:\n trace += entry\n return '%s\\n%s' % (exception, trace)", "def get_traceback_stxt():\n #/\n exc_cls, exc_obj, tb_obj = sys.exc_info()\n\n #/\n txt_s = traceback.format_exception(exc_cls, exc_obj, tb_obj)\n\n #/\n res = ''.join(txt_s)\n\n return res", "def create_from_exception(self, exc_info=None, **kwargs):\n if not exc_info:\n exc_info = sys.exc_info()\n\n exc_type, exc_value, exc_traceback = exc_info\n\n def shorten(var):\n var = transform(var)\n if isinstance(var, basestring) and len(var) > 200:\n var = var[:200] + '...'\n return var\n\n reporter = ExceptionReporter(None, exc_type, exc_value, exc_traceback)\n frames = varmap(shorten, reporter.get_traceback_frames())\n\n if not kwargs.get('view'):\n # This should be cached\n modules = get_installed_apps()\n if conf.INCLUDE_PATHS:\n modules = set(list(modules) + conf.INCLUDE_PATHS)\n\n def iter_tb_frames(tb):\n while tb:\n yield tb.tb_frame\n tb = tb.tb_next\n \n def contains(iterator, value):\n for k in iterator:\n if value.startswith(k):\n return True\n return False\n \n # We iterate through each frame looking for an app in INSTALLED_APPS\n # When one is found, we mark it as last \"best guess\" (best_guess) and then\n # check it against SENTRY_EXCLUDE_PATHS. If it isnt listed, then we\n # use this option. If nothing is found, we use the \"best guess\".\n best_guess = None\n view = None\n for frame in iter_tb_frames(exc_traceback):\n try:\n view = '.'.join([frame.f_globals['__name__'], frame.f_code.co_name])\n except:\n continue\n if contains(modules, view):\n if not (contains(conf.EXCLUDE_PATHS, view) and best_guess):\n best_guess = view\n elif best_guess:\n break\n if best_guess:\n view = best_guess\n \n if view:\n kwargs['view'] = view\n\n data = kwargs.pop('data', {}) or {}\n if hasattr(exc_type, '__class__'):\n exc_module = exc_type.__class__.__module__\n else:\n exc_module = None\n data['__sentry__'] = {\n 'exc': map(transform, [exc_module, exc_value.args, frames]),\n }\n\n if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, 'source'):\n origin, (start, end) = exc_value.source\n data['__sentry__'].update({\n 'template': (origin.reload(), start, end, origin.name),\n })\n kwargs['view'] = origin.loadname\n \n tb_message = '\\n'.join(traceback.format_exception(exc_type, exc_value, exc_traceback))\n\n kwargs.setdefault('message', transform(force_unicode(exc_value)))\n\n return self.process(\n class_name=exc_type.__name__,\n traceback=tb_message,\n data=data,\n **kwargs\n )", "def print_exc_plus():\n tb = sys.exc_info()[2]\n while tb.tb_next:\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print \"Locals by frame, innermost last\"\n for frame in stack:\n print\n print \"Frame %s in %s at line %s\" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)\n for key, value in frame.f_locals.items():\n print \"\\t%20s = \" % key,\n try: print value\n except: print \"<ERROR WHILE PRINT VALUE>\"", "def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)", "def format_exc(exc=None):\r\n if exc is None:\r\n exc = _exc_info()\r\n if exc == (None, None, None):\r\n return \"\"\r\n import traceback\r\n return \"\".join(traceback.format_exception(*exc))", "def _exc_info_to_string(self, err, test):\n\t\texctype, value, tb = err\n\t\t# Skip test runner traceback levels\n\t\twhile tb and self._is_relevant_tb_level(tb):\n\t\t\ttb = tb.tb_next\n\n\t\tif exctype is test.failureException:\n\t\t\t# Skip assert*() traceback levels\n\t\t\tlength = self._count_relevant_tb_levels(tb)\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb, length)\n\t\telse:\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb)\t\t\n\t\treturn ''.join(msgLines)", "def exception_data(self) -> typing.Optional[dict]:\n return self._exception_data", "def graphical_exception_handler(self, exc_type, exc_value, exc_tb):\n bugdialog.ShowEI(exc_type, exc_value, exc_tb)\n if compat.PYTHON2: sys.exc_clear()", "def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self.log_kv({\n 'python.exception.type': exc_type,\n 'python.exception.val': exc_val,\n 'python.exception.tb': exc_tb,\n })\n self.finish()", "def get_traceback_html(self):\n\n if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):\n self.template_does_not_exist = True\n self.loader_debug_info = []\n for loader in template_source_loaders:\n try:\n module = import_module(loader.__module__)\n if hasattr(loader, '__class__'):\n source_list_func = loader.get_template_sources\n else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4\n source_list_func = module.get_template_sources\n # NOTE: This assumes exc_value is the name of the template that\n # the loader attempted to load.\n template_list = [{'name': t, 'exists': os.path.exists(t)} \\\n for t in source_list_func(str(self.exc_value))]\n except (ImportError, AttributeError):\n template_list = []\n if hasattr(loader, '__class__'):\n loader_name = loader.__module__ + '.' + loader.__class__.__name__\n else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4\n loader_name = loader.__module__ + '.' + loader.__name__\n self.loader_debug_info.append({\n 'loader': loader_name,\n 'templates': template_list,\n })\n if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and\n isinstance(self.exc_value, TemplateSyntaxError)):\n self.get_template_exception_info()\n\n frames = self.get_traceback_frames()\n for i, frame in enumerate(frames):\n if 'vars' in frame:\n frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]\n frames[i] = frame\n\n unicode_hint = ''\n if self.exc_type and issubclass(self.exc_type, UnicodeError):\n start = getattr(self.exc_value, 'start', None)\n end = getattr(self.exc_value, 'end', None)\n if start is not None and end is not None:\n unicode_str = self.exc_value.args[1]\n unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')\n t = get_template(\"500_metanas.html\")\n #t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')\n c = Context({\n 'is_email': self.is_email,\n 'unicode_hint': unicode_hint,\n 'frames': frames,\n 'request': self.request,\n 'settings': debug.get_safe_settings(),\n 'sys_executable': sys.executable,\n 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],\n 'server_time': datetime.datetime.now(),\n 'sw_version': get_sw_version(),\n 'sys_path': sys.path,\n 'template_info': self.template_info,\n 'template_does_not_exist': self.template_does_not_exist,\n 'loader_debug_info': self.loader_debug_info,\n })\n # Check whether exception info is available\n if self.exc_type:\n c['exception_type'] = self.exc_type.__name__\n if self.exc_value:\n c['exception_value'] = smart_unicode(self.exc_value, errors='replace')\n if frames:\n c['lastframe'] = frames[-1]\n return t.render(c)", "def exc_message(exc_info):\n exc = exc_info[1]\n if exc is None:\n # str exception\n result = exc_info[0]\n else:\n try:\n result = str(exc)\n except UnicodeEncodeError:\n try:\n result = str(exc)\n except UnicodeError:\n # Fallback to args as neither str nor\n # unicode(Exception(u'\\xe6')) work in Python < 2.6\n result = exc.args[0]\n return xml_safe(result)", "def print_exc_plus(tb):\n while 1:\n if not tb.tb_next:\n break\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print(\"Locals by frame, innermost last\")\n for frame in stack:\n print()\n print(\"Frame %s in %s at line %s\" % (frame.f_code.co_name,\n frame.f_code.co_filename,\n frame.f_lineno))\n for key, value in frame.f_locals.items():\n print(\"\\t%20s = \" % key,)\n #We have to be careful not to cause a new error in our error\n #printer! Calling str() on an unknown object could cause an\n #error we don't want.\n try:\n print(value)\n except:\n print(\"<ERROR WHILE PRINTING VALUE>\")", "def _FormatException(exc):\n return ''.join(traceback.format_exception_only(type(exc), exc))", "def note_exception(self,exc_info = None, cause = None):\n if not exc_info:\n exc_info = sys.exc_info()\n\n exception_type = exc_info[0]\n\n # If no cause was specified, use an appropriate message.\n if not cause:\n cause = \"An exception occurred.\"\n\n self.set_outcome(Result.ERROR, cause)\n exc = exc_info[1]\n msg = StringIO()\n print(exc.__class__.__name__ + ':',file=msg)\n for arg in exc.args:\n print(arg,file=msg)\n self[Result.EXCEPTION] \\\n = msg.getvalue()\n self[Result.TRACEBACK] \\\n = '\\n'.join(traceback.format_tb(exc_info[2]))", "def get_cur_info():\n try:\n raise Exception\n except:\n f = sys.exc_info()[2].tb_frame.f_back\n # return (f.f_code.co_name, f.f_lineno)\n return f.f_code.co_name", "def user_exception(self, frame, exc_info):\n pass", "def handle_exception(e):\n maps = {\n exp.ServiceExp: api_exceptions.ServiceException,\n exp.PermissionExp: api_exceptions.ForbiddenException,\n exp.NotFoundExp: api_exceptions.NotFoundException,\n exp.ValueExp: api_exceptions.BadRequestException,\n exp.BadRequestExp: api_exceptions.BadRequestException,\n }\n raise maps[e.__class__](e.message)", "def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)", "def hook_server_inspect_exception(self, request_event, reply_event, exc_infos):\r\n task_context = self.hook_get_task_context()\r\n for functor in self._hooks['server_inspect_exception']:\r\n functor(request_event, reply_event, task_context, exc_infos)", "def exception_stacktrace(self):\n # type: () -> list[string_types]\n return self._exception_stacktrace", "def handle_exception(exc_type, exc_value, exc_traceback):\n exc_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)\n exc_msg.insert(0, 'Uncaught exception on processor {}\\n'.format(mpiops.chunk_index))\n exc_msg = \"\".join(exc_msg)\n print(exc_msg, file=sys.stderr)", "def exception_hook(exc_type, exc_value, exc_traceback) -> None:\n log.error(\n \"exception\",\n exception_type=exc_type.__name__,\n exc_info=(exc_type, exc_value, exc_traceback),\n )", "def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name__ not in dir(napalm.exceptions) and \\\n exc_type.__name__ not in __builtins__.keys():\n epilog = (\"NAPALM didn't catch this exception. Please, fill a bugfix on \"\n \"https://github.com/napalm-automation/napalm/issues\\n\"\n \"Don't forget to include this traceback.\")\n print(epilog)\n raise exc_type, exc_value, exc_traceback", "def help_with_exception():\n global previous_traceback\n if 'last_traceback' in dir(sys):\n if sys.last_traceback != previous_traceback:\n previous_traceback = sys.last_traceback\n parse_last_exception(sys.last_value)", "def exception_data(code):\n try:\n exec(code)\n except Exception, detail:\n return (detail, detail.args,\n '%s: %s' % (detail.__class__.__name__, detail))", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)", "def log_exception(*args, **kwds):\n cls, err = sys.exc_info()[:2]\n logging.exception('Exception in request: %s: %s', cls.__name__, err)", "def info() -> Dict[str, Any]:", "def dump_exception(exception: Exception, with_traceback: types.TracebackType = None, remove_lines: tuple = ()):\n formatted_traceback = traceback.format_exception(\n type(exception),\n exception,\n exception.__traceback__ if not with_traceback else with_traceback\n )\n\n return {\n 'type': type(exception).__name__,\n 'traceback': [line for i, line in enumerate(formatted_traceback) if i not in remove_lines]\n }", "def full_details_exception_handler(exc, context):\n if isinstance(exc, APIException):\n exc.detail = exc.get_full_details()\n\n return exception_handler(exc, context)", "def format_debug(e):\n _, _, tb = sys.exc_info()\n return '1: {doc} \\n2: {exec_info} \\n3: {exec_0} \\n 4: {exec_1} \\n5: {lineno} \\n6: {stack}'.format(\n doc=e.__doc__,\n exec_info=sys.exc_info(),\n exec_0=sys.exc_info()[0],\n exec_1=sys.exc_info()[1],\n lineno=traceback.tb_lineno(sys.exc_info()[2]),\n stack=traceback.print_tb(tb))", "def func_on_exception(*args, **keys):\n try:\n yield\n except Exception as exc:\n reraise = func(*args + (\":\", str(exc)), **keys)\n if not CRDS_EXCEPTION_TRAP:\n # In python-2, distinction between raise and \"raise something\". raise doesn't\n # wreck the traceback, raising a new improved exception does.\n raise\n # Augmented, the traceback is trashed from here down but the message is better when caught higher up.\n elif reraise:\n exc_class = keys.pop(\"exception_class\", exc.__class__)\n keys[\"end\"] = \"\"\n raise exc_class(format(*args + (\":\", str(exc)), **keys)) from exc\n else:\n pass # snuff the exception, func() probably issued a log message.", "def exception_hook(self, exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n # ignore keyboard interrupt to support console applications\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n else:\n exc_info = (exc_type, exc_value, exc_traceback)\n # log_msg = '\\n'.join([''.join(traceback.format_tb(exc_traceback)),\n # '{0}: {1}'.format(exc_type.__name__, exc_value)])\n log_msg=\"{} : {}\".format(exc_type.__name__,exc_value)\n log.critical(\"Uncaught exception:\\n {0}\".format(log_msg), exc_info=exc_info)\n\n # trigger message box show\n self._exception_caught.emit(log_msg)", "def exc_to_catch(request):\n return request.param", "def werkzeug_debug_traceback(self, exc_type, exc_value, tb):\n\n orig_type, orig_value, orig_tb = self.einfo\n translated = Traceback(orig_type, orig_value, tb)\n\n # Drop the \"raise\" frame from the traceback.\n translated.frames.pop()\n\n def orig_frames():\n cur = orig_tb\n while cur:\n yield cur\n cur = cur.tb_next\n\n # Append our original frames, overwriting previous source information\n # with the translated Mako line locators.\n for tb, record in zip(orig_frames(), self.records):\n name, line = record[4:6]\n if name:\n new_frame = MakoFrame(orig_type, orig_value, tb, name, line)\n else:\n new_frame = Frame(orig_type, orig_value, tb)\n\n translated.frames.append(new_frame)\n\n return translated", "def debug_info(environ, exc_info):\n context = debug_context(exc_info)\n context.req_vars = sorted(environ.iteritems())\n return DebugRender(context).render()", "def exception(msg='', details={}, exc_info=True):\n logger = logging.getLogger(settings.LOGGER_EXCEPTION)\n logger.exception(msg=msg or sys.exc_info(), extra=details, exc_info=exc_info)", "def exceptionType(self):\n return ExceptionType.GeneralException", "def exception_hook(cls, etype, value, tb):\n import traceback\n\n # Print exception\n traceback.print_exception(etype, value, tb)\n\n # Log exception\n stacktrace_msg = ''.join(traceback.format_tb(tb))\n if etype:\n exception_msg = '{0}: {1}'.format(etype, value)\n else:\n exception_msg = 'Exception: {}'.format(value)\n\n LOGGER.critical(stacktrace_msg)\n LOGGER.critical(exception_msg)\n\n # Write to exception log file\n exception_file_name = datetime.now().strftime('RenderKnecht_Exception_%Y-%m-%d_%H%M%S.log')\n exception_file = Path(get_settings_dir()) / exception_file_name\n\n with open(exception_file, 'w') as f:\n traceback.print_exception(etype, value, tb, file=f)\n\n # Inform GUI of exception if QApplication set\n if cls.app:\n gui_msg = f'{stacktrace_msg}\\n{exception_msg}'\n cls.send_exception_signal(gui_msg)", "def exception(self):\n return self._exception", "def aboutException(self, ID=None, exception=None, nowForget=False):\n if ID:\n pastInfo = self.getInfo(ID, 'aboutException', nowForget)\n if pastInfo:\n return pastInfo\n if exception:\n lineList = [\"Exception '{}'\".format(repr(exception))]\n else:\n stuff = sys.exc_info()\n lineList = [\"Exception '{}'\".format(stuff[1])]\n callInfo = self.aboutCall()\n if callInfo:\n lineList.append(\n \" doing call '{}':\".format(callInfo))\n self._divider(lineList)\n if not exception:\n lineList.append(\"\".join(traceback.format_tb(stuff[2])))\n del stuff\n text = self._formatList(lineList)\n return self.saveInfo('aboutException', text, ID)", "def format_exception(exception_type, class_name = 'No classname', message = 'Formated exception', debug_info = {}):\n\tcheck_class(exception_type, Exception)\n\tcheck_type(class_name, StringType)\n\tcheck_type(message, StringType)\n\tcheck_type(debug_info, DictType)\n\n\tdebug = []\n\tfor k in debug_info:\n\t\tdebug.append('{0}: {1}'.format(k, debug_info[k]))\n\texc = exception_type('{0}, \"{1}\" - debug: ({2})'.format(class_name, message, ', '.join(debug)))\n\treturn exc", "def _thread_except_hook(exceptions, args):\n if issubclass(args.exc_type, SystemExit):\n return\n # cannot store the exception, it references the thread's stack\n exceptions.append((\n args.exc_type,\n str(args.exc_value),\n ''.join(\n traceback.format_exception(\n args.exc_type, args.exc_value, args.exc_traceback,\n ),\n ),\n ))", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def handle_exception(*exc_info):\n import logging\n import traceback\n\n logging.critical(\"\".join(traceback.format_exception(*exc_info)))", "def exception_handler(exception_type, value, tb_obj):\n tb = '\\n'.join(traceback.format_tb(tb_obj))\n txt = 'Traceback (most recent call last):\\n' + tb + '\\n' + exception_type.__name__ + ': ' + str(value)\n print(txt)\n logger.error(_(\"Uncaught exception: \") + txt)\n QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), txt)", "def format_stack_trace(exc_info):\n if exc_info[0] is None:\n return ''\n lines = traceback.format_exception(*exc_info)\n return ''.join(line for line in lines)", "def exc_info(self) -> Optional[str]:\n warnings.warn(\"job.exc_info is deprecated, use job.latest_result() instead.\", DeprecationWarning)\n\n from .results import Result\n\n if self.supports_redis_streams:\n if not self._cached_result:\n self._cached_result = self.latest_result()\n\n if self._cached_result and self._cached_result.type == Result.Type.FAILED:\n return self._cached_result.exc_string\n\n return self._exc_info", "def get_exceptions_results(self, results):\n agent_exceptions = {}\n for result in results:\n # strip leading/trailing quotes\n exc_tag = result.get(3).strip(\"'\")\n if exc_tag not in agent_exceptions:\n agent_exceptions[exc_tag] = {}\n\n ts_date = result.get(1)\n if AGENT_ERROR_KEY_BY_TIME:\n # use hours and minutes only\n ts_time = re.compile(r'(\\d+:\\d+).+').search(result.get(2))[1]\n key = \"{}_{}\".format(ts_date, ts_time)\n else:\n key = str(ts_date)\n\n if key not in agent_exceptions[exc_tag]:\n agent_exceptions[exc_tag][key] = 0\n\n agent_exceptions[exc_tag][key] += 1\n\n if not agent_exceptions:\n return\n\n for exc_type in agent_exceptions:\n agent_exceptions_sorted = {}\n for k, v in sorted(agent_exceptions[exc_type].items(),\n key=lambda x: x[0]):\n agent_exceptions_sorted[k] = v\n\n agent_exceptions[exc_type] = agent_exceptions_sorted\n\n return agent_exceptions", "def set_error(self, exc_info):\n self.exc_info = exc_info\n if exc_info is None:\n self.meta_classes = {}\n self.meta_functions = {}", "def _exc_info_to_string(self, err, test):\r\n exctype, value, tb = err\r\n # Skip test runner traceback levels\r\n while tb and self._is_relevant_tb_level(tb):\r\n tb = tb.tb_next\r\n if exctype is test.failureException:\r\n # Skip assert*() traceback levels\r\n length = self._count_relevant_tb_levels(tb)\r\n msgLines = traceback.format_exception(exctype, value, tb, length)\r\n else:\r\n msgLines = traceback.format_exception(exctype, value, tb)\r\n \r\n if self.buffer:\r\n output = sys.stdout.getvalue()\r\n error = sys.stderr.getvalue() \r\n if output:\r\n if not output.endswith('\\n'):\r\n output += '\\n'\r\n msgLines.append(STDOUT_LINE % output)\r\n if error:\r\n if not error.endswith('\\n'):\r\n error += '\\n'\r\n msgLines.append(STDERR_LINE % error)\r\n return ''.join(msgLines)", "def exception(self, *args, **kwargs):", "def exception_handler(exception_type, value, tb_obj):\n tb = '\\n'.join(traceback.format_tb(tb_obj))\n text = 'Traceback (most recent call last):\\n' + tb + '\\n' + exception_type.__name__ + ': ' + str(value)\n print(text)\n logger.error(_(\"Uncaught exception: \") + text)\n QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text)", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def formatException(cls, instance, trcback, context=1):\n\n\tstack = extractStack(getInnerMostFrame(trcback), context=context)\n\toutput = []\n\toutput.append(\"Traceback (most recent call last):\")\n\tfor frame, fileName, lineNumber, name, context, index in stack:\n\t\toutput.append(\" File \\\"{0}\\\", line {1}, in {2}\".format(fileName, lineNumber, name))\n\t\tfor line in context:\n\t\t\toutput.append(\" {0}\".format(line.strip()))\n\tfor line in traceback.format_exception_only(cls, instance):\n\t\toutput.append(\"{0}\".format(line))\n\treturn output", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "def exception_message(self):\n # type: () -> string_types\n return self._exception_message", "def get_info(self): \n return {\n \"ident\": self.ident,\n \"interval\": self._interval,\n \"exception\": self._exception,\n \"execute\": self._execute,\n \"args\": self._args,\n \"kwargs\": self._kwargs}", "def format_exception_only(exc):\r\n exc_type = type(exc)\r\n\r\n stype = exc_type.__qualname__\r\n smod = exc_type.__module__\r\n if smod not in (\"__main__\", \"builtins\"):\r\n stype = smod + '.' + stype\r\n try:\r\n _str = str(exc)\r\n except:\r\n _str = \"<unprintable {} object>\".format(exc_type.__name__)\r\n\r\n if _str == \"None\" or not _str:\r\n line = \"{}\\n\".format(stype)\r\n else:\r\n line = \"{}: {}\\n\".format(stype, _str)\r\n return line", "def traceback(self):", "def print_exception_formatted(type, value, tb):\n\n tbtext = \"\".join(traceback.format_exception(type, value, tb))\n lexer = get_lexer_by_name(\"pytb\", stripall=True)\n formatter = TerminalFormatter()\n sys.stderr.write(highlight(tbtext, lexer, formatter))", "def ips_excepthook(excType, excValue, traceback, frame_upcount=0):\n\n assert isinstance(frame_upcount, int)\n\n # first: print the traceback:\n tb_printer = TBPrinter(excType, excValue, traceback)\n\n # go down the stack\n tb = traceback\n tb_frame_list = []\n while tb.tb_next is not None:\n tb_frame_list.append(tb.tb_frame)\n tb = tb.tb_next\n\n critical_frame = tb.tb_frame\n tb_frame_list.append(critical_frame)\n\n tb_frame_list.reverse()\n # now the first frame in the list is the critical frame where the exception occured\n index = 0\n diff_index = frame_upcount\n\n # this allows to repeat the traceback inside the interactive function\n\n def __ips_print_tb(**kwargs):\n return tb_printer.printout(end_offset=index, **kwargs)\n\n while diff_index is not None:\n index += diff_index\n tb_printer.printout(end_offset=index)\n print(\"\\n\")\n current_frame = tb_frame_list[index]\n diff_index = IPS(frame=current_frame, ns_extension={\"__ips_print_tb\": __ips_print_tb}, print_tb=False)", "def process_exception(self, request, exc):\n return None", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = format_exc()\n app.logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return jsonify(message=\"Internal Server Error\"), 500" ]
[ "0.77512074", "0.7694684", "0.76201236", "0.72803456", "0.70208013", "0.668976", "0.66021264", "0.64593", "0.64557695", "0.6422902", "0.6302152", "0.6271954", "0.61457765", "0.60565907", "0.60373074", "0.60152197", "0.5867774", "0.5857275", "0.58364946", "0.58230966", "0.5801351", "0.5797989", "0.57829416", "0.5780941", "0.57725173", "0.57637465", "0.57137674", "0.57106507", "0.5644071", "0.5600948", "0.5591652", "0.5590398", "0.558358", "0.55731213", "0.55497354", "0.5527209", "0.5485745", "0.5475005", "0.5465063", "0.5448223", "0.54192924", "0.5416888", "0.5395312", "0.5389752", "0.53877425", "0.5360314", "0.5348661", "0.53300065", "0.5327883", "0.53264856", "0.53112924", "0.5281348", "0.5261831", "0.5251697", "0.5250081", "0.5247469", "0.52471066", "0.5228829", "0.5227795", "0.5226032", "0.5207134", "0.5203467", "0.5187047", "0.51762253", "0.51736695", "0.51668674", "0.5160874", "0.515857", "0.51585376", "0.51558834", "0.5154255", "0.51445377", "0.51423967", "0.51366735", "0.5135989", "0.51348495", "0.5134651", "0.51340675", "0.51260483", "0.5123384", "0.5107271", "0.5099031", "0.5096079", "0.5095987", "0.5069628", "0.5045337", "0.5039354", "0.5032114", "0.5030288", "0.50222534", "0.50141126", "0.5005597", "0.49994853", "0.4998435", "0.49967647", "0.49690193", "0.49655345", "0.49627548", "0.4962161", "0.49596268" ]
0.6381562
10
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context).
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None): source = None if loader is not None and hasattr(loader, "get_source"): with suppress(ImportError): source = loader.get_source(module_name) if source is not None: source = source.splitlines() if source is None: with suppress(OSError, IOError): with open(filename, "rb") as fp: source = fp.read().splitlines() if source is None: return None, [], None, [] try: # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = "ascii" for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br"coding[:=]\s*([-\w.]+)", line) if match: encoding = match.group(1).decode("ascii") break source = [str(sline, encoding, "replace") for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1 : upper_bound] return lower_bound, pre_context, context_line, post_context except Exception as e: try: context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>' except Exception: context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>" return lineno, [], context_line, []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_lines_from_file(filename, lineno, context_lines):\n\n try:\n source = open(filename).readlines()\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = \\\n [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = \\\n [line.strip('\\n') for line in source[lineno + 1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context\n except (OSError, IOError):\n return None, [], None, []", "def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):\n source = None\n if loader is not None and hasattr(loader, \"get_source\"):\n source = loader.get_source(module_name)\n if source is not None:\n source = source.splitlines()\n if source is None:\n try:\n f = open(filename)\n try:\n source = f.readlines()\n finally:\n f.close()\n except (OSError, IOError):\n pass\n if source is None:\n return None, [], None, []\n\n encoding = 'ascii'\n for line in source[:2]:\n # File coding may be specified. Match pattern from PEP-263\n # (http://www.python.org/dev/peps/pep-0263/)\n match = re.search(r'coding[:=]\\s*([-\\w.]+)', line)\n if match:\n encoding = match.group(1)\n break\n source = [unicode(sline, encoding, 'replace') for sline in source]\n\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = [line.strip('\\n') for line in source[lineno+1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context", "def get_source_lines(self, filename, lineno, context=0):\n if not filename or not lineno:\n return ''\n\n return ''.join([' ' + linecache.getline(filename, line) for line in range(lineno - context, lineno + context + 1)])", "def line_offsets(fname):\n line_offset = []\n offset = 0\n for _, line in enumerate( open(fname) ):\n line_offset.append(offset)\n offset += len(line)\n return line_offset", "def _diffContext(diff, n=3):\n nlines = len(diff)\n clines = set() # set of lines to include\n for i, line in enumerate(diff):\n if line[0] != ' ':\n clines |= set(range(max(0, i-n), min(i+n+1, nlines)))\n context = []\n clines = list(clines)\n clines.sort()\n last = -1\n for i in clines:\n if i != last+1:\n context.append(\" ...\\n\")\n context.append((\"%4d: \"%i) + diff[i])\n last = i\n if clines[-1] != nlines-1:\n context.append(\" ...\\n\")\n return context", "def line_range(self) -> Tuple[int, int]:\n if self._line_range is None:\n node_extent = self.node.extent\n comment_extent = self.node.comment_extent\n if comment_extent.start.file is None:\n comment_extent = node_extent\n\n self._line_range = (\n min(node_extent.start.line, comment_extent.start.line),\n max(node_extent.end.line, comment_extent.end.line),\n )\n\n return self._line_range", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n result = []\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n result.append((addr, lineno))\n lastlineno = lineno\n addr += byte_incr\n lineno += line_incr\n if lineno != lastlineno:\n result.append((addr, lineno))\n return result", "def outerLineno2():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_back.f_lineno", "def getlineno(frame):\r\n # FrameType.f_lineno is now a descriptor that grovels co_lnotab\r\n return frame.f_lineno", "def get_linepos(self, pos):\n lnum, cnum = self._get_linepos(pos)\n return lnum + self.LINE_NUM_BASE, cnum", "def linenum(self):\n return self.source_frame_stack.linenum()", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n yield (addr, lineno)\n lastlineno = lineno\n addr += byte_incr\n if line_incr >= 0x80:\n # line_increments is an array of 8-bit signed integers\n line_incr -= 0x100\n lineno += line_incr\n if lineno != lastlineno:\n yield (addr, lineno)", "def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def currentLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_lineno", "def get_lineno(self):\n return self.lexer.get_lineno()", "def lineno():\n\treturn inspect.currentframe().f_back.f_lineno", "def lineno():\r\n\treturn inspect.currentframe().f_back.f_lineno", "def GetLineno():\n return inspect.currentframe().f_back.f_lineno", "def _get_linepos(self, pos):\n t = self.input\n if pos < 0 or pos > len(t):\n raise IndexError(\"position %d not in 0..%d\" % (pos, len(t)))\n\n lpc = self.__linepos\n\n # Locate the smallest known line index whose end is at or after p.\n def locate(p):\n self._update_linetab(p)\n lo = 0\n hi = len(lpc) - 1\n if lpc[hi] < p:\n return hi\n\n # Invariant: lpc[lo] < p; lpc[hi] >= p\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if lpc[mid] > p: hi = mid\n elif lpc[mid] < p: lo = mid\n else: return mid - 1\n return hi - 1\n\n lnum = locate(pos)\n start, end = self._get_linespan(lnum)\n cnum = pos - start\n return lnum, cnum", "def extract_lines(infile):\n with open(infile, 'r') as src:\n return read_on(get_line, src)", "def get_frame_info(tb, context_lines=7):\n # line numbers / function / variables\n lineno = tb.tb_lineno\n function = tb.tb_frame.f_code.co_name\n variables = tb.tb_frame.f_locals\n\n # get filename\n fn = tb.tb_frame.f_globals.get('__file__')\n if not fn:\n fn = _os.path.realpath(\n _inspect.getsourcefile(tb) or _inspect.getfile(tb)\n )\n if fn[-4:] in ('.pyc', '.pyo'):\n fn = fn[:-1]\n\n # module name\n modname = tb.tb_frame.f_globals.get('__name__')\n\n # get loader\n loader = tb.tb_frame.f_globals.get('__loader__')\n\n # sourcecode\n try:\n if not loader is None:\n source = loader.get_source(modname)\n else:\n source = file(fn).read()\n except (SystemExit, KeyboardInterrupt):\n raise\n except:\n source = ''\n pre_context, post_context = [], []\n context_line, context_lineno = None, None\n else:\n parser = PythonParser(source)\n parser.parse()\n parsed_source = parser.get_html_output()\n lbound = max(0, lineno - context_lines - 1)\n ubound = lineno + context_lines\n try:\n context_line = parsed_source[lineno - 1]\n pre_context = parsed_source[lbound:lineno - 1]\n post_context = parsed_source[lineno:ubound]\n except IndexError:\n context_line = None\n pre_context = post_context = [], []\n context_lineno = lbound\n\n return {\n 'tb': tb,\n 'filename': fn,\n 'loader': loader,\n 'function': function,\n 'lineno': lineno,\n 'vars': variables,\n 'pre_context': pre_context,\n 'context_line': context_line,\n 'post_context': post_context,\n 'context_lineno': context_lineno,\n 'source': source\n }", "def lineno():\n\n return inspect.currentframe().f_back.f_lineno", "def get_lines_in_file(config_file):\n lines = []\n\n line = config_file.readline()\n lines.append([1, line])\n\n line_counter = 1\n while line:\n line = config_file.readline()\n if not (line.lstrip().startswith(\"#\")):\n lines.append([line_counter, line])\n\n line_counter += 1\n\n return lines", "def outerLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_lineno", "def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"", "def get_annotated_lines(self):\n lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]\n\n try:\n lines[self.lineno - 1].current = True\n except IndexError:\n pass\n\n return lines", "def scope_start_line_nos(scope: Scope,\n from_line_no: Optional[int] = None) -> List[int]:\n candidates: List[int] = []\n\n # Consider the first line for this scope's root expression, if any\n events = cast(Iterable[ExprStart], scope.iter_events(filter=ExprStart))\n try:\n line_no = next(iter(events)).line_no\n except StopIteration:\n # If there is no root expression, just use the first scope line. It's\n # degraded mode because users are interested in expressions rather than\n # scopes.\n candidates.append(scope.line_range.first_line)\n else:\n candidates.append(line_no)\n\n # Consider memoization lookup points for properties\n if isinstance(scope, Property):\n lookup_scope = scope.memoization_lookup\n if lookup_scope:\n candidates.append(lookup_scope.line_range.first_line)\n\n # Filter candidates if needed with `from_line_no`\n if from_line_no:\n candidates = [l for l in candidates if from_line_no < l]\n\n return candidates", "def _expected_lines_and_line_numbers(path, check_prefix):\n with open(path) as f:\n for index, line in enumerate(f):\n if 'RUN:' in line:\n # Ignore lit directives, which may include a call to\n # xctest_checker that specifies a check prefix.\n continue\n\n # Note that line numbers are not zero-indexed; we must add one to\n # the loop index.\n line_number = index + 1\n\n components = line.split(check_prefix)\n if len(components) == 2:\n yield (replace_offsets(components[1].strip(), line_number),\n line_number)\n elif len(components) > 2:\n # Include a newline, then the file name and line number in the\n # exception in order to have it appear as an inline failure in\n # Xcode.\n raise XCTestCheckerError(\n path, line_number,\n 'Usage violation: prefix \"{}\" appears twice in the same '\n 'line.'.format(check_prefix))", "def filename_line(skip: int = 2) -> Tuple[str, int]:\n stack = inspect.stack()\n start = skip\n parentframe = stack[start][0]\n\n filename = 'N/A'\n module = inspect.getmodule(parentframe)\n if module:\n filename = os.path.basename(os.path.realpath(module.__file__))\n\n return filename, parentframe.f_lineno", "def _line_offsets(self, snapshot: Bug, filepath: str) -> List[int]:\n logger.debug(\"Fetching line offsets for file, '%s', in snapshot, '%s'\", # noqa: pycodestyle\n filepath,\n snapshot.name)\n key_cache = (snapshot.name, filepath)\n if key_cache in self.__cache_offsets:\n logger.debug(\"Retrieving line offsets for file, '%s', in snapshot, '%s', from cache.\", # noqa: pycodestyle\n filepath,\n snapshot.name)\n return self.__cache_offsets[key_cache]\n\n logger.debug(\"Computing line offsets for file, '%s', in snapshot, '%s'\", # noqa: pycodestyle\n filepath,\n snapshot.name)\n contents = self.read_file(snapshot, filepath)\n\n # find all indices of newline characters\n offsets = [0]\n last_offset = 0\n while True:\n next_line_break = contents.find('\\n', last_offset)\n if next_line_break == -1:\n break\n last_offset = next_line_break + 1\n offsets.append(last_offset)\n\n logger.debug(\"Saving line offsets for file, '%s', in snapshot, '%s', to cache.\", # noqa: pycodestyle\n filepath,\n snapshot.name)\n self.__cache_offsets[key_cache] = offsets\n return offsets", "def lineno(self):\n return str('line: ' + str(inspect.currentframe().f_back.f_lineno))", "def revision_vs_lines(\n self, path_in_repo: Path, content: TextDocument, context_lines: int\n ) -> List[int]:\n old = git_get_content_at_revision(\n path_in_repo, self.revrange.rev1, self.git_root\n )\n edited_opcodes = diff_and_get_opcodes(old, content)\n return list(opcodes_to_edit_linenums(edited_opcodes, context_lines))", "def position_before_code(filename=None):\n\n if filename is None:\n raise ValueError\n\n with open(filename, 'r') as f:\n content = f.read()\n\n # find \"import\" lines - if they exist\n pattern = r'[\\n\\r]\\s*import[^\\n\\r]*;'\n it = re.finditer(pattern, content, re.DOTALL)\n\n last = None\n # next \"empty\" for loop is intended to advance iterator to last match\n for match in it:\n last = match\n\n if last is None:\n # no imports in file\n return position_before_class(content)\n else:\n # found import lines - last holds last match in file\n return last.end()", "def get_corresponding_lineno(self, lineno: int) -> int:\n for template_line, code_line in reversed(self.debug_info):\n if code_line <= lineno:\n return template_line\n return 1", "def ReadOffsets(infile):\n\n map_old2new = {}\n\n for line in infile:\n if line[0] == \"#\":\n continue\n chr, res_from, res_to, offset = line[:-1].split(\"\\t\")[:4]\n res_from, res_to, offset = map(int, (res_from, res_to, offset))\n if chr not in map_old2new:\n map_old2new[chr] = []\n map_old2new[chr].append((res_from, res_to, offset))\n\n breakpoints = {}\n endpoints = {}\n offsets = {}\n\n for chr in map_old2new.keys():\n r = map_old2new[chr]\n r.sort()\n breakpoints[chr] = map(lambda x: x[0], r)\n endpoints[chr] = map(lambda x: x[1], r)\n offsets[chr] = map(lambda x: x[2], r)\n\n return breakpoints, endpoints, offsets", "def get_previous_lines(self, num):\n (ending_daf, ending_line, foo) = self._get_from_history(1)\n joined_lines = \" \".join([self._get_from_history(i)[2].strip() for i in range(num, 0, -1)])\n return ending_daf, ending_line, joined_lines", "def lineno(self):\n return str(' Line: ' + str(inspect.currentframe().f_back.f_lineno))", "def get_line(cls, frame, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\n\t\tif not code: \n\t\t\treturn ''\n\t\t\n\t\treturn code.splitlines()[frame.f_lineno]", "def lineno(self):\n return self._lineno", "def compute_linemap(self, filename):\n points = self.point_symbol_info.get(filename, dict())\n\n line_to_points = dict()\n for fn, points in points.items():\n for point, loc in points.items():\n line = int(loc.split(\":\")[0])\n line_to_points.setdefault(line, []).append(point)\n\n result = dict()\n for line, points in line_to_points.items():\n status = \"covered\"\n covered_points = self.covered_points & set(points)\n if not len(covered_points):\n status = \"not-covered\"\n elif len(covered_points) != len(points):\n status = \"partially-covered\"\n result[line] = status\n return result", "def get_lines( self ):\n return self._line_info", "def get_lines( self ):\n return self._line_info", "def context(self) -> Optional[str]:\n displayed_line = self.editor_line\n displayed_column = self.editor_column\n # we want to avoid displaying a blank line for context. If we're on a blank line\n # find the nearest line above us that isn't blank.\n while displayed_line >= 1 and not len(self._lines[displayed_line - 1].strip()):\n displayed_line -= 1\n displayed_column = len(self._lines[displayed_line - 1])\n\n # only show context if we managed to find a non-empty line\n if len(self._lines[displayed_line - 1].strip()):\n formatted_source_line = expand_tabs(self._lines[displayed_line - 1]).rstrip(\n _NEWLINE_CHARS\n )\n # fmt: off\n return (\n f\"{formatted_source_line}\\n\"\n + f\"{' ' * (displayed_column - 1)}^\"\n )\n # fmt: on\n else:\n return None", "def contextPosition(self):\n ret = libxml2mod.xmlXPathGetContextPosition(self._o)\n return ret", "def lineno():\n return str(' - Statement - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def get_linenumber():\n\n # inspect.stack()[0][2] returns line number in this function\n lineno = str(inspect.stack()[1][2])\n\n return lineno", "def get_selinux_context(path):\n ret_code, context = selinux.lgetfilecon(path)\n if ret_code != -1:\n return context.split(':', 3)\n else:\n return False", "def _context(self, tokens, i):\n # Left context\n j = i - 1\n while j >= 0 and not self._CONTEXT_RE.match(tokens[j]):\n j -= 1\n left = tokens[j] if j != 0 else \"*START*\"\n\n # Right context\n j = i + 1\n while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]):\n j += 1\n right = tokens[j] if j != len(tokens) else \"*END*\"\n\n return (left, right)", "def get_lines_with_start(cls, frame, radius=5, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\t\n\t\tif not code: \n\t\t\treturn []\n\t\telse:\n\t\t\tcode_lines = code.splitlines()\n\t\n\t\tif not radius:\n\t\t\treturn code_lines, 1\n\t\telse:\n\t\t\tblock_slice = cls._calc_block_ends(frame.f_lineno, len(code_lines), radius)\n\t\t\treturn code_lines[block_slice], block_slice.start", "def get_lines_from_file(fname, context=None):\n content = []\n if context and context.ddboost:\n contents = get_lines_from_dd_file(fname, context.ddboost_storage_unit)\n return contents\n else:\n with open(fname) as fd:\n for line in fd:\n content.append(line.strip('\\n'))\n return content", "def lineOffset(self):\n if self.__lineOffset is None:\n self.__lineOffset = self.__offset - self.__source.rfind(\"\\n\", 0, self.__offset) - 1\n\n return self.__lineOffset", "def _get_linespan(self, lnum):\n lcount = self.get_linecount()\n _, q, _ = slice(lnum).indices(lcount)\n if q < 0 or q >= lcount:\n raise IndexError(\"line number %d not in 0..%d\" % (q, lcount))\n\n start = self.__linepos[q] + 1\n if q < lcount - 1:\n end = self.__linepos[q + 1]\n else:\n end = len(self.input) - 1\n\n return start, end + 1", "def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)", "def getsourcelines(object):\r\n lines, lnum = findsource(object)\r\n\r\n if ismodule(object): return lines, 0\r\n else: return getblock(lines[lnum:]), lnum + 1", "def line_number(self):\n return self._line_number", "def find_line_offsets(self):\n # line 0 doesn't exist; line 1 starts at char offset 0.\n self.line_offsets = [None, 0]\n # Find all newlines in `text`, and add an entry to\n # line_offsets for each one.\n pos = self.text.find('\\n')\n while pos != -1:\n self.line_offsets.append(pos+1)\n pos = self.text.find('\\n', pos+1)\n # Add a final entry, marking the end of the string.\n self.line_offsets.append(len(self.text))", "def pred_pos_contexts(pred_contexts_path):\n with open(os.path.join(pred_contexts_path, 'contexts.json'), 'r', errors='ignore') as infile:\n contexts = json.load(infile)\n logger.info('The number of contexts: %d', len(contexts))\n pred_pos = []\n with open(os.path.join(pred_contexts_path, 'folds.json'), 'r', errors='ignore') as json_file:\n folds = json.load(json_file)\n for fold_id in folds:\n pred_pos.extend([contexts[context] for context in folds[fold_id]['vot_pred_pos']])\n logger.debug(pred_pos)\n return pred_pos", "def _get_line_regions_from_xml_file(filename: str) -> List[Dict[str, int]]:\n xml_root_element = ElementTree.parse(filename).getroot() # nosec\n xml_line_elements = xml_root_element.findall(\"handwritten-part/line\")\n return [_get_line_region_from_xml_element(el) for el in xml_line_elements]", "def get_source_line (self):\n\n # pylint: disable=no-member\n if 'meta' in self:\n return os.path.join (self.meta.path, self.meta.filename), self.meta.lineno\n if self.parent:\n return self.parent.get_source_line ()\n return '<unknown>', 0", "def set_encapsulated_attribs_line_numbers(self) -> Tuple[Any]:\n\n line_numbers = []\n if not isinstance(self.definition, _ast.FunctionDef):\n return tuple(line_numbers)\n\n if self.definition.name != '__init__':\n\n for expressions in self.definition.body:\n if isinstance(expressions, _ast.Assign):\n for target in expressions.targets:\n # check for keyword 'self'\n try:\n value = target.value.id\n if value == 'self':\n line_numbers.append(target.lineno)\n except AttributeError:\n continue\n return tuple(line_numbers)", "def vasp_file_lines(vasp_file, line_continuation=False):\n vasp_file_stripped = (line.rstrip(\"\\n\") for line in vasp_file)\n line_nr = 0\n for line in vasp_file_stripped:\n line_nr += 1\n while line_continuation and line.endswith(\"\\\\\"):\n line = line[:-1] + next(vasp_file_stripped)\n line_nr += 1\n yield line_nr, line", "def line(self):\n loc = self.caller.location\n if loc.ndb.event_line is None:\n loc.ndb.event_line = []\n else: # cleanup line\n for ob in loc.ndb.event_line[:]:\n if hasattr(ob, \"location\") and ob.location != self.caller.location:\n loc.ndb.event_line.remove(ob)\n return loc.ndb.event_line", "def scanpatch(fp):\n lr = patch.linereader(fp)\n\n def scanwhile(first, p):\n \"\"\"scan lr while predicate holds\"\"\"\n lines = [first]\n while True:\n line = lr.readline()\n if not line:\n break\n if p(line):\n lines.append(line)\n else:\n lr.push(line)\n break\n return lines\n\n while True:\n line = lr.readline()\n if not line:\n break\n if line.startswith('diff --git a/'):\n def notheader(line):\n s = line.split(None, 1)\n return not s or s[0] not in ('---', 'diff')\n header = scanwhile(line, notheader)\n fromfile = lr.readline()\n if fromfile.startswith('---'):\n tofile = lr.readline()\n header += [fromfile, tofile]\n else:\n lr.push(fromfile)\n yield 'file', header\n elif line[0] == ' ':\n yield 'context', scanwhile(line, lambda l: l[0] in ' \\\\')\n elif line[0] in '-+':\n yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\\\')\n else:\n m = lines_re.match(line)\n if m:\n yield 'range', m.groups()\n else:\n raise patch.PatchError('unknown patch content: %r' % line)", "def getsourcelines(object):\n lines, lnum = findsource(object)\n\n if inspect.ismodule(object): return lines, 0\n else: return inspect.getblock(lines[lnum:]), lnum + 1", "def readMultipleFileLinesAndPositions(filePath,startPosition=None, bytesToRead=1): \n \n f = open(filePath, 'rb') \n \n if not startPosition is None: \n f.seek(startPosition) \n \n lines = f.readlines(bytesToRead) \n position = f.tell() \n \n f.close() \n \n return lines, position", "def _readline_ins(self):\n if self._ins_filehandle is None:\n if not os.path.exists(self._ins_filename):\n raise Exception(\n \"instruction file '{0}' not found\".format(self._ins_filename)\n )\n self._ins_filehandle = open(self._ins_filename, \"r\")\n line = self._ins_filehandle.readline()\n self._ins_linecount += 1\n if line == \"\":\n return None\n self._last_line = line\n # check for spaces in between the markers - this gets ugly\n line = line.lower()\n if self._marker is not None and self._marker in line:\n\n # def find_all(a_str, sub):\n # start = 0\n # while True:\n # start = a_str.find(sub, start)\n # if start == -1:\n # return\n # yield start\n # start += len(sub)\n # poss speedup using regex\n midx = [m.start() for m in re.finditer(re.escape(self._marker), line)]\n # midx = list(find_all(line, self._marker))\n midx.append(len(line))\n first = line[: midx[0]].strip()\n tokens = []\n if len(first) > 0:\n # tokens.append(first)\n tokens.extend([f.strip() for f in first.split()])\n for idx in range(1, len(midx) - 1, 2):\n mstr = line[midx[idx - 1] : midx[idx] + 1]\n ostr = line[midx[idx] + 1 : midx[idx + 1]]\n tokens.append(mstr)\n tokens.extend(ostr.split())\n else:\n tokens = line.strip().split()\n return tokens", "def CountLineNumber(filename):\n\n fp = open(os.path.abspath(filename), \"r\");\n lines = 0\n for line in fp.readlines():\n lines = lines + 1\n fp.close()\n return lines", "def _current_line_nr_gen(self):\n line_nr = -1\n while True:\n line_nr += 1\n yield line_nr", "def get_breakpoints(self, filename=None, lineno=None):\r\n if filename is None:\r\n return self.bpoints.items()\r\n if lineno is None:\r\n return self.bpoints.filter( keys=('filename',),\r\n values=(filename,) )\r\n else:\r\n return self.bpoints.filter( keys=('filename','lineno'), \r\n values=(filename, lineno) )", "def trace_context(n):\n # Drop the first four lines: the clock speed, the threshold, and the first\n # active/inactive pair.\n for line in runcmd([TRACE, n]).splitlines()[2:]:\n if len(line.strip()) is 0:\n continue\n m = re.search(r'^PID: (\\d+).*start at (\\d+).*\\(([\\d\\.]+) ms\\)', line)\n if not m:\n print line\n yield (int(m.group(1)), int(m.group(2)), float(m.group(3)))", "def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber", "def current_buffer_lines(self):\n # noinspection PyProtectedMember\n return self.current_buffer._working_lines", "def diff2_skipped_lines(request, ps_left_id, ps_right_id, patch_id,\n id_before, id_after, where, column_width,\n tab_spaces=None):\n column_width = _clean_int(column_width, django_settings.DEFAULT_COLUMN_WIDTH,\n django_settings.MIN_COLUMN_WIDTH,\n django_settings.MAX_COLUMN_WIDTH)\n tab_spaces = _clean_int(tab_spaces, django_settings.DEFAULT_TAB_SPACES,\n django_settings.MIN_TAB_SPACES,\n django_settings.MAX_TAB_SPACES)\n\n if where == 'a':\n context = None\n else:\n context = _get_context_for_user(request) or 100\n\n data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, 10000,\n column_width, tab_spaces)\n if isinstance(data, HttpResponse) and data.status_code != 302:\n return data\n return _get_skipped_lines_response(data[\"rows\"], id_before, id_after,\n where, context)", "def get_node_loc(node):\n lineno = node.lineno\n end_lineno = get_last_deep_child(node).lineno\n return end_lineno - lineno", "def get_file_lines(filename):\n if not os.path.isfile(filename):\n logging.error(\"[get_file_lines] %s not found\", filename)\n return -1\n\n if not os.access(filename, os.R_OK):\n logging.error(\"[get_file_lines] %s cannot be read\", filename)\n return -1\n\n i = -1\n with open(filename) as f:\n try:\n for i, l in enumerate(f):\n pass\n except UnicodeDecodeError:\n return -1\n return i + 1", "def outerLinenoN(N):\n frame = inspect.currentframe()\n for i in range(N):\n frame = frame.f_back\n return frame.f_lineno", "def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str:\n with open(file_path) as text_file:\n # Highlight code\n code = text_file.read()\n code_lines = code.splitlines()\n # Prepend line number\n code_lines = [\n f\">{lno:3} | {line}\" if line_no == lno else f\"{lno:4} | {line}\"\n for lno, line in enumerate(code_lines, 1)\n ]\n # # Cut out the snippet\n start_line_no = max(0, line_no - context_lines_count - 1)\n end_line_no = line_no + context_lines_count\n code_lines = code_lines[start_line_no:end_line_no]\n # Join lines\n code = \"\\n\".join(code_lines)\n return code", "def get_lines():\n buf = vim.current.buffer\n return buf", "def tokenize_context(context):\n context_sents = sent_tokenize(context)\n start_idx = 0\n result = []\n for sent in context_sents:\n end_idx = start_idx + len(sent)\n result.append([(start_idx, end_idx), sent.strip()])\n start_idx = end_idx + 1\n return result", "def diff2_skipped_lines(request, ps_left_id, ps_right_id, patch_id,\n id_before, id_after, where, column_width):\n column_width = _clean_int(column_width, django_settings.DEFAULT_COLUMN_WIDTH,\n django_settings.MIN_COLUMN_WIDTH,\n django_settings.MAX_COLUMN_WIDTH)\n\n if where == 'a':\n context = None\n else:\n context = _get_context_for_user(request) or 100\n\n data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, 10000,\n column_width)\n if isinstance(data, HttpResponse) and data.status_code != 302:\n return data\n return _get_skipped_lines_response(data[\"rows\"], id_before, id_after,\n where, context)", "def get_formatted_partial_source(self, filename, linenumber, offset):\n lines = self.get_source(filename)\n if not lines:\n return \"\", \"\"\n\n begin = max(0, linenumber - self.context)\n partial_source, bad_line = highlight_source(\n linenumber,\n linenumber - begin - 1,\n lines[begin : linenumber + 1],\n offset=offset,\n )\n return partial_source, bad_line", "def _recompute_line_offsets(self):\n # TODO: Consider \\r\\n?\n self._line_offsets = [0] + [m.start() + 1 for m in re.finditer('\\n', self._text)]", "def GetParserLineNumber(self):\n ret = libxml2mod.xmlTextReaderGetParserLineNumber(self._o)\n return ret", "def _get_line_number(vcf):\n with open(vcf) as vcf_input_file:\n i = -1\n for line in vcf_input_file:\n i += 1\n return i", "def get_current_line(self, document):\r\n return document.get_iter_at_mark(document.get_insert()).get_line() + 1", "def compare_revisions(self, path_in_repo: Path, context_lines: int) -> List[int]:\n content = TextDocument.from_file(self.git_root / path_in_repo)\n return self.revision_vs_lines(path_in_repo, content, context_lines)", "def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines", "def lineno():\n return str(' - IpAddr - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def _get_file_source(self, filename):\n try:\n with open(filename, encoding=\"utf8\") as f:\n lines = f.readlines()\n source = \"\".join(lines)\n except Exception:\n lines = []\n source = None\n return source, lines", "def lines_of_code(project: Project) -> int:\n ret = sh.cloc(\"--quiet\", \"--include-lang=Python\", \"--yaml\", str(project.root))\n ret_obj = list(yaml.safe_load_all(str(ret)))\n return ret_obj[0][\"Python\"][\"code\"]", "def line_at_cursor(code: str, cursor_pos: int = 0):\n offset = 0\n lines = code.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if not line.endswith('\\n'):\n # If the last line doesn't have a trailing newline, treat it as if\n # it does so that the cursor at the end of the line still counts\n # as being on that line.\n next_offset += 1\n if next_offset > cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)", "def line_styles (self):\n return self._line_styles", "def original_line_offsets(code, codes, codes_offsets, codes_lines):\n codes[id(code)] = code # necessary to keep reference to code objects\n offsets = codes_offsets[id(code)] = []\n lines = codes_lines[id(code)] = []\n # ToDo: submit a patch to Python to optimize findlinestarts with iters\n for addr, lineno in findlinestarts(code):\n offsets.append(addr)\n lines.append(lineno)\n\n for const in code.co_consts:\n if isinstance(const, CodeType):\n original_line_offsets(const, codes, codes_offsets, codes_lines)", "def get_line_changed_regions(self, old_line_num, old_line,\n new_line_num, new_line):\n return \\\n super(MarkdownDiffChunkGenerator, self).get_line_changed_regions(\n old_line_num,\n strip_tags(old_line),\n new_line_num,\n strip_tags(new_line))" ]
[ "0.7550956", "0.66815025", "0.635524", "0.60288763", "0.56448764", "0.5592623", "0.55494016", "0.55332536", "0.5494302", "0.5492887", "0.5485184", "0.5472898", "0.5435706", "0.54235774", "0.54235774", "0.54235774", "0.54235774", "0.54235774", "0.54235774", "0.5400774", "0.5390808", "0.53847116", "0.5373249", "0.5370138", "0.5334083", "0.53263456", "0.53189445", "0.52991045", "0.5291174", "0.5286108", "0.5258734", "0.52134264", "0.5212952", "0.5208559", "0.5194789", "0.5184049", "0.514881", "0.5135331", "0.51238984", "0.51216", "0.511345", "0.50832397", "0.506809", "0.50510705", "0.5048433", "0.5043223", "0.5031037", "0.5031037", "0.5026478", "0.50241095", "0.5020989", "0.50203025", "0.5019797", "0.500149", "0.49714288", "0.4970207", "0.49600187", "0.49553266", "0.49507618", "0.49455878", "0.49449944", "0.49428484", "0.49182227", "0.49155438", "0.49068785", "0.49054757", "0.48907506", "0.48879516", "0.48875478", "0.48874876", "0.4882896", "0.48704275", "0.48641062", "0.48584455", "0.4852376", "0.48466974", "0.48392713", "0.48303768", "0.48251042", "0.48245093", "0.48236132", "0.48166195", "0.48084864", "0.48042452", "0.4802363", "0.47837895", "0.47691193", "0.47520977", "0.47442988", "0.47402132", "0.47344464", "0.47255337", "0.4725445", "0.4711603", "0.47099185", "0.4707128", "0.4696335", "0.46901068", "0.46737632", "0.46724036" ]
0.68244785
1
Create an exception report and return its location
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False): exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb) if data_processor: exception_data = data_processor(exception_data) if output_format == "html": text = render_exception_html(exception_data) elif output_format == "json": text = render_exception_json(exception_data) else: raise TypeError("Exception report format not correctly specified") filename = gen_error_filename(extension=output_format) report_location = storage_backend.write(filename, text) return report_location
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_exception(self, msg: str):", "def formatReport(cls, instance, trcback, context=1):\n\n\theader = []\n\theader.append(\"Exception in '{0}'.\".format(getInnerMostFrame(trcback).f_code.co_name))\n\theader.append(\"Exception class: '{0}'.\".format(cls.__name__))\n\theader.append(\"Exception description: '{0}'.\".format(instance.__doc__ and instance.__doc__.strip() or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tConstants.nullObject))\n\tfor i, line in enumerate(str(instance).split(\"\\n\")):\n\t\theader.append(\"Exception message line no. '{0}' : '{1}'.\".format(i + 1, line))\n\n\tframes = []\n\tfor frame, locals in extractLocals(trcback):\n\t\tframes.append(\"Frame '{0}' in '{1}' at line '{2}':\".format(*frame))\n\t\targuments, namelessArgs, keywordArgs, locals = locals\n\t\tany((arguments, namelessArgs, keywordArgs)) and frames.append(\"{0:>40}\".format(\"Arguments:\"))\n\t\tfor key, value in arguments.iteritems():\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tfor value in namelessArgs:\n\t\t\tframes.append(\"{0:>40}\".format(value))\n\t\tfor key, value in sorted(keywordArgs.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tlocals and frames.append(\"{0:>40}\".format(\"Locals:\"))\n\t\tfor key, value in sorted(locals.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tframes.append(str())\n\n\ttrcback = formatException(cls, instance, trcback)\n\n\treturn header, frames, trcback", "def process_exception(self, request, exception):\n gc = GithubCredentials(\n user=settings.EXREPORTER_GITHUB_USER,\n repo=settings.EXREPORTER_GITHUB_REPO,\n auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)\n gs = GithubStore(credentials=gc)\n reporter = ExReporter(\n store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)\n\n reporter.report()", "def report_exception(logger=None, report_details=None, cleanup_details=None):\n\n if logger is None:\n raise Exception(\"A logger must be defined!\")\n\n logger.debug(report_details)\n logger.error(report_details[\"error_message\"])\n\n if report_details[\"ujs_job_id\"] is not None:\n ujs = report_details[\"ujs_client\"]\n \n job_status = ujs.get_job_status(report_details[\"ujs_job_id\"])\n\n if job_status[-2] == 0:\n ujs.complete_job(report_details[\"ujs_job_id\"], \n report_details[\"token\"], \n report_details[\"status\"][:UJS_STATUS_MAX], \n report_details[\"error_message\"], \n None)\n else:\n raise Exception(\"No report details included!\") \n \n if cleanup_details is not None: \n if not cleanup_details[\"keep_working_directory\"]:\n try:\n cleanup(logger=logger, directory=cleanup_details[\"working_directory\"]) \n except Exception, e:\n logger.exception(e)\n else:\n raise Exception(\"Unable to cleanup working directory without cleanup info!\")", "def create_log(self, exc):\n return self.formatter.formatException(exc)", "def _generate_error_report(self, errno=None):\n # as of now we think this will be the same for every interface\n NIWORKFLOWS_LOG.warn('Report was not generated')\n\n errorstr = '<div><span class=\"error\">Failed to generate report!</span>.\\n'\n if errno:\n errorstr += (' <span class=\"error\">Interface returned exit '\n 'code %d</span>.\\n') % errno\n errorstr += '</div>\\n'\n with open(self._out_report, 'w' if PY3 else 'wb') as outfile:\n outfile.write(errorstr)", "def _create_issue(*, image: str, repo: str, run: str, stacktrace: str) -> Issue:\n title = f\"Automatic error report from {repo}\"\n body = _report_body(image=image, repo=repo, run=run, stacktrace=stacktrace)\n return TAGBOT_ISSUES_REPO.create_issue(title, body)", "def __init__(self):\r\n try:\r\n self.file = open(REPORT_FILE, 'w')\r\n except OSError:\r\n print('Problem opening log file')\r\n exit(1)", "def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)", "def catch(self, report_name):\n return Catch(report_name, self)", "def write_error_report(self):\n\n with open('runReport.txt', 'a') as report:\n report.write(\"Number of Hits: \" + str(self.num_hits) + '\\n')\n report.write(\"Number of Requests: \" + str(self.num_requests) + '\\n')\n report.write(\"Hit Rate: \" + str((self.num_hits / self.num_requests)))\n report.write(\"Datafiles downloaded: \" + str(self.num_datafiles))\n now = datetime.now()\n dt_string = now.strftime(\"%H:%M %m/%d/%Y\")\n report.write(\"Run finished \" + dt_string)", "def reportinfo(self):\n return self.fspath, 0, f\"usecase: {self.name}\"", "def getErrorReport(self):\n return self.sError;", "def getErrorReport(self):\n return self.sError;", "def __create_failure_report(self, classname, failure_desc):\n match = FAILURE_LOC_RE.match(failure_desc[0])\n if not match:\n raise ValueError(\"Unexpected failure description format.\\n\"\n \"Expected the first line to contain details \"\n \"of the location of the error.\\n\"\n \"Found '%s'\" % failure_desc[0])\n name = match.group(3)\n return TestCaseReport(classname, name, \"\\n\".join(failure_desc))", "def _get_report(self, entry):\n script = entry.get('@fields').get('script_name', '')\n message = entry.get('@message').encode('utf8')\n error = entry.get('@context').get('error', 'n/a').encode('utf8')\n\n # extract SQL from the error\n (error, sql) = self.extract_error_and_sql(error)\n\n description = self.REPORT_TEMPLATE.format(\n full_message=message,\n error=error,\n sql=sql,\n details=json.dumps(entry, indent=True)\n ).strip()\n\n report = Report(\n summary='{} - {}'.format(script, message),\n description=description,\n label=self.REPORT_LABEL\n )\n\n return report", "def ReportError(text):\n raise IOError(text)", "def get_report_path_hash(report) -> str:\n report_path_hash = ''\n events = [i for i in report.bug_path if i.get('kind') == 'event']\n for event in events:\n file_name = \\\n os.path.basename(report.files.get(event['location']['file']))\n line = str(event['location']['line'] if 'location' in event else 0)\n col = str(event['location']['col'] if 'location' in event else 0)\n\n report_path_hash += line + '|' + col + '|' + event['message'] + \\\n file_name\n\n report_path_hash += report.check_name\n\n if not report_path_hash:\n LOG.error('Failed to generate report path hash!')\n LOG.error(report.bug_path)\n\n LOG.debug(report_path_hash)\n return __str_to_hash(report_path_hash)", "def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path", "def get_error_file(self):\n pass", "def report(self) -> Any:", "def create_from_exception(self, exc_info=None, **kwargs):\n if not exc_info:\n exc_info = sys.exc_info()\n\n exc_type, exc_value, exc_traceback = exc_info\n\n def shorten(var):\n var = transform(var)\n if isinstance(var, basestring) and len(var) > 200:\n var = var[:200] + '...'\n return var\n\n reporter = ExceptionReporter(None, exc_type, exc_value, exc_traceback)\n frames = varmap(shorten, reporter.get_traceback_frames())\n\n if not kwargs.get('view'):\n # This should be cached\n modules = get_installed_apps()\n if conf.INCLUDE_PATHS:\n modules = set(list(modules) + conf.INCLUDE_PATHS)\n\n def iter_tb_frames(tb):\n while tb:\n yield tb.tb_frame\n tb = tb.tb_next\n \n def contains(iterator, value):\n for k in iterator:\n if value.startswith(k):\n return True\n return False\n \n # We iterate through each frame looking for an app in INSTALLED_APPS\n # When one is found, we mark it as last \"best guess\" (best_guess) and then\n # check it against SENTRY_EXCLUDE_PATHS. If it isnt listed, then we\n # use this option. If nothing is found, we use the \"best guess\".\n best_guess = None\n view = None\n for frame in iter_tb_frames(exc_traceback):\n try:\n view = '.'.join([frame.f_globals['__name__'], frame.f_code.co_name])\n except:\n continue\n if contains(modules, view):\n if not (contains(conf.EXCLUDE_PATHS, view) and best_guess):\n best_guess = view\n elif best_guess:\n break\n if best_guess:\n view = best_guess\n \n if view:\n kwargs['view'] = view\n\n data = kwargs.pop('data', {}) or {}\n if hasattr(exc_type, '__class__'):\n exc_module = exc_type.__class__.__module__\n else:\n exc_module = None\n data['__sentry__'] = {\n 'exc': map(transform, [exc_module, exc_value.args, frames]),\n }\n\n if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, 'source'):\n origin, (start, end) = exc_value.source\n data['__sentry__'].update({\n 'template': (origin.reload(), start, end, origin.name),\n })\n kwargs['view'] = origin.loadname\n \n tb_message = '\\n'.join(traceback.format_exception(exc_type, exc_value, exc_traceback))\n\n kwargs.setdefault('message', transform(force_unicode(exc_value)))\n\n return self.process(\n class_name=exc_type.__name__,\n traceback=tb_message,\n data=data,\n **kwargs\n )", "def error(self, environ, start_response):\n \n \"Generate an error report\"\n status = '200 Handle error'\n headers = [('Content-type','text/html')]\n start_response(status, headers)\n trace = traceback.extract_tb(sys.exc_traceback)\n return ['Error<br />[Exception] <i><q>%s</q></i> <br /> [File ] <i><q>%s</q></i> <br /><pre>%s</pre>'\n % (sys.exc_info()[0],trace[-1][0],self.print_file(trace[-1][0], trace[-1][1]))]", "def report(self, output_dir):", "def _generate_report(self):\n raise NotImplementedError", "def create_report(self, report_job: dict):\n try:\n # Run the report and wait for it to finish\n report_job_id = self.report_downloader.WaitForReport(report_job)\n return report_job_id\n except errors.AdManagerReportError as e:\n print('[INFO]: Failed to generate report. Error: %s' % e)\n sys.exit()", "def report():\n pass", "def pytest_runtest_makereport(item, call):\n if \"incremental\" in item.keywords:\n if call.excinfo is not None:\n parent = item.parent\n parent._previousfailed = item", "def get_exception():\n raise Exception(\"example\")", "def save_exception(exc):\n LOG.error(\"Error - %s\", str(exc))\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n data = (str(exc)+traceback.format_exc())\n\n file = open(\"./logs/ERROR_\"+threading.currentThread().getName()+today+\".log\",'a+') #Replace to fix OSError\n file.write(\"\\n==\"+hour+\"==\\n\")\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def add_error(self, reference_id, error):\n\n with open('runReport.txt', 'a') as report:\n try:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id + \": \" + error)\n except Exception:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id)", "def report_path(self, *relative_path):\n report_path = pathlib.Path(self.strategy.service.report_path)\n if report_path.exists():\n # only create the relative path (not the report if not exists, it might from config setup else its an issue)\n report_path = report_path.joinpath(*relative_path)\n if not report_path.exists():\n try:\n report_path.mkdir(parents=True)\n except Exception as e:\n error_logger(repr(e))\n return None\n\n return report_path\n else:\n return report_path\n else:\n return None", "def report(self, line: int, where: str, message: str):\n output = f'[line {line}] Error{where}: {message}'\n print(output, file=sys.stderr)\n self.had_error = True", "def errpath(self):\n return None", "def get_exception():\n trace = ''\n exception = ''\n exc_list = traceback.format_exception_only(sys.exc_info()[0],\n sys.exc_info()[1])\n for entry in exc_list:\n exception += entry\n tb_list = traceback.format_tb(sys.exc_info()[2])\n for entry in tb_list:\n trace += entry\n return '%s\\n%s' % (exception, trace)", "def initialize_report(report_type, start_date, end_date, start_letter=None, end_letter=None):\r\n for item in REPORT_TYPES:\r\n if report_type in item:\r\n return item[1](start_date, end_date, start_letter, end_letter)\r\n raise ReportTypeDoesNotExistException", "def test_referral_not_found_exception():\n # When raising the exception\n with pytest.raises(ReferralNotFoundError) as e_info:\n raise ReferralNotFoundError(\"Custom exception message\")\n\n # Then the exception message should be as expected\n assert str(e_info.value) == \"Custom exception message\"", "def build_error_output():\n\n error_type, error_value, error_tb = sys.exc_info()\n\n alert_data = dict()\n alert_data['type'] = type(error_value).__name__\n alert_data['value'] = str(error_value)\n alert_data['host'] = platform.node()\n alert_data['os'] = platform.system()\n alert_data['traceback'] = traceback.format_list(traceback.extract_tb(error_tb))\n\n return alert_data", "def save_last_error_report(trace):\n try:\n rfile = os.path.join(get_file_dir(), 'last_report_error.txt')\n with open(rfile, 'w') as f:\n f.write(trace)\n except:\n log(\"Error writing error report file\")", "async def createIncidentReport(\n self, incidentReport: IncidentReport, author: str\n ) -> IncidentReport:", "def get_report(self) -> str:\n return self.diagnostics.get_report()", "def GenerateReport(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('generateReport', payload=payload, response_object=None)", "def report_with_context(self, exception: Exception, http_context: HTTPContext):\n self._add_exception(exception, http_context)", "def instrument_fail(self, req, where):\n\n if where in req[\"file_details\"][\"backend_filename\"]:\n raise Exception(\"Instrumented Failure: %s\" % where)", "def create_report():\n global inspection_report\n operator_name = request.form['inspectorName']\n inspection_date = request.form['datepicker']\n city = request.form['city']\n street = request.form['street']\n pipe_id = request.form['pipe_id']\n manhole_id = request.form['manhole_id']\n dimensions = request.form['sizes']\n shape = request.form['shapes']\n material = request.form['materials']\n\n inspection_report = InspectionReport(\n operator_name, inspection_date, city, street, pipe_id, manhole_id, dimensions, shape, material)\n # print(inspection_report.toJSON())\n return render_camera_view()", "def get_fatal_alerts(self, path):", "def _reportErrorMsg(self, ErrorMessage, outputFile):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n self._insertErrorMsg(ErrorMessage, f)\n f.close()", "def pytest_runtest_makereport(item, call):\n outcome = yield\n report = outcome.get_result()\n if report.when == \"call\":\n doc = getattr(getattr(item, \"function\", None), \"__doc__\", None)\n item.report_call = ReportResult(report=report, excinfo=call.excinfo, doc=doc)", "def get_problem_report(self):\n\t\treturn Job(SDK.PrlSrv_GetProblemReport(self.handle)[0])", "def GetReportString(self):\n return _gmat_py.LocatedEvent_GetReportString(self)", "def report_path(self):\r\n return os.path.join(self._html_dir, 'build.html')", "def baseExceptionHandler(*args):\n\n\theader, frames, trcback = formatReport(*extractException(*args))\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), header)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), frames)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tsys.stderr.write(\"\\n\".join(trcback))\n\n\treturn True", "def report_error(manager, entrypoint, exception):\n LOG.error(\"Error while loading provider %s\", entrypoint)\n raise exception", "def add_a_report(request: Request, report: Report):\n reports_collection = request.app.state.db.data.reports\n\n report = process_polygon(report)\n \n try: \n res = reports_collection.insert_one(report.dict(by_alias=True))\n except pymongo.errors.DuplicateKeyError:\n raise HTTPException(404, \"Tried to insert a duplicate.\")\n\n if res is None:\n raise HTTPException(404)\n\n d = report.dict(by_alias=True)\n d['_id'] = res.inserted_id\n\n return d", "def create_report_file(self, contents, report_file=None, **kwargs):\n if report_file is None:\n report_file = 'pytan_report_{}.txt'.format(pytan.utils.get_now())\n\n # try to get report_dir from the report_file\n report_dir = os.path.dirname(report_file)\n\n # try to get report_dir from kwargs\n if not report_dir:\n report_dir = kwargs.get('report_dir', None)\n\n # just use current working dir\n if not report_dir:\n report_dir = os.getcwd()\n\n # make report_dir if it doesnt exist\n if not os.path.isdir(report_dir):\n os.makedirs(report_dir)\n\n # remove any path from report_file\n report_file = os.path.basename(report_file)\n\n # if prefix/postfix, add to report_file\n prefix = kwargs.get('prefix', '')\n postfix = kwargs.get('postfix', '')\n report_file, report_ext = os.path.splitext(report_file)\n report_file = '{}{}{}{}'.format(prefix, report_file, postfix, report_ext)\n\n # join the report_dir and report_file to come up with report_path\n report_path = os.path.join(report_dir, report_file)\n\n with open(report_path, 'wb') as fd:\n fd.write(contents.encode(\"utf-8\"))\n\n m = \"Report file {!r} written with {} bytes\".format\n self.mylog.info(m(report_path, len(contents)))\n return report_path", "def get_report(self):\n raise NotImplementedError('Agent is an abstract base class')", "def make_report(template_path, report_path, function, *args, **kwargs):\n # Create the report content.\n with open(template_path) as f:\n content = function(*args, f=f, **kwargs)\n\n # Write to the target directory.\n with open(report_path, \"w+\") as f:\n f.write(content)\n\n return report_path", "def display_exception(self, exception_trace=''):\n txt = [80 * '*', '\\n', BANNER, '\\n', 80 * '*', '\\n', '\\n', '\\n']\n txt.extend(exception_trace)\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n return as_bytes(txt)", "def create_stix2_report_from_report(\n report: Report,\n source_name: str,\n created_by: stix2.Identity,\n objects: List[Union[_DomainObject, _RelationshipObject]],\n report_types: List[str],\n confidence: int,\n object_markings: List[stix2.MarkingDefinition],\n x_opencti_report_status: int,\n x_opencti_files: Optional[List[Mapping[str, str]]] = None,\n) -> stix2.Report:\n report_name = report.name\n\n report_created_date = report.created_date\n if report_created_date is None:\n report_created_date = datetime_utc_now()\n\n report_last_modified_date = report.last_modified_date\n if report_last_modified_date is None:\n report_last_modified_date = report_created_date\n\n report_description = report.description\n report_rich_text_description = report.rich_text_description\n report_short_description = report.short_description\n\n description = None\n if report_rich_text_description is not None and report_rich_text_description:\n description = remove_html_tags(report_rich_text_description)\n elif report_description is not None and report_description:\n description = report_description\n elif report_short_description:\n description = report_short_description\n\n labels = []\n report_tags = report.tags\n if report_tags is not None:\n for tag in report_tags:\n value = tag.value\n if value is None or not value:\n continue\n\n labels.append(value)\n\n external_references = []\n report_url = report.url\n if report_url is not None and report_url:\n external_reference = create_external_reference(\n source_name, str(report.id), report_url\n )\n external_references.append(external_reference)\n\n return create_report(\n report_name,\n report_created_date,\n objects,\n created_by=created_by,\n created=report_created_date,\n modified=report_last_modified_date,\n description=description,\n report_types=report_types,\n labels=labels,\n confidence=confidence,\n external_references=external_references,\n object_markings=object_markings,\n x_opencti_report_status=x_opencti_report_status,\n x_opencti_files=x_opencti_files,\n )", "def _save_crasher(run_dir: str, smp: sample.Sample,\n exception: sample_runner.SampleError,\n crasher_dir: str) -> str:\n digest = hashlib.sha256(smp.input_text.encode('utf-8')).hexdigest()[:8]\n sample_crasher_dir = os.path.join(crasher_dir, digest)\n logging.info('Saving crasher to %s', sample_crasher_dir)\n gfile.recursively_copy_dir(\n run_dir, sample_crasher_dir, preserve_file_mask=True)\n with gfile.open(os.path.join(sample_crasher_dir, 'exception.txt'), 'w') as f:\n f.write(str(exception))\n crasher_path = os.path.join(\n sample_crasher_dir,\n 'crasher_{}_{}.x'.format(datetime.date.today().strftime('%Y-%m-%d'),\n digest[:4]))\n with gfile.open(crasher_path, 'w') as f:\n f.write(smp.to_crasher(str(exception)))\n return sample_crasher_dir", "def _log_crash_report():\n # For each crash report we find, dump its contents.\n # In theory we clean up after a crash so there should be only one.\n cwd = os.getcwd()\n for entry in os.listdir('.git'):\n if entry.startswith('fast_import_crash_'):\n with open(os.path.join(cwd, '.git', entry)) as f:\n report = f.read()\n # Keep the message free of repetition.\n LOG.error(\"git {}:\\n{}\".format(entry, report))", "def __set_report_path(self):\n self.report_path = os.path.join(self.get_report_path(), \"cyclomatic_report\")\n Path(self.report_path).mkdir(parents=True, exist_ok=True)", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def make_eval_exception(app, global_conf, xmlhttp_key=None, reporters=None):\n if xmlhttp_key is None:\n xmlhttp_key = global_conf.get('xmlhttp_key', '_')\n if reporters is None:\n reporters = global_conf.get('error_reporters')\n if reporters and isinstance(reporters, basestring):\n reporter_strings = reporters.split()\n reporters = []\n for reporter_string in reporter_strings:\n reporter = import_string.eval_import(reporter_string)\n if isinstance(reporter, (type, types.ClassType)):\n reporter = reporter()\n reporters.append(reporter)\n return EvalException(app, xmlhttp_key=xmlhttp_key, reporters=reporters)", "def errorpath():\n stdoutfile=pdbid()+\".error.log\"\n stdout = os.path.join(output_dir(), stdoutfile)\n\n return stdout", "def _report_body(*, image: str, repo: str, run: str, stacktrace: str) -> str:\n return (\n f\"Repo: {repo}\\n\"\n f\"Run URL: {run}\\n\"\n f\"Image ID: {image}\\n\"\n f\"Stacktrace:\\n```py\\n{stacktrace}\\n```\\n\"\n )", "def pytest_runtest_makereport(item, call):\n timestamp = datetime.now().strftime('%d-%m-%Y_%H-%M-%S')\n pytest_html = item.config.pluginmanager.getplugin('html')\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, 'extra', [])\n\n if report.when == 'call':\n xfail = hasattr(report, 'wasxfail')\n if (report.skipped and xfail) or (report.failed and not xfail):\n file_name = f\"{item.name}_{timestamp}.png\"\n if item.funcargs.get('driver'):\n path = os.path.join(ROOT_DIR, 'reports', 'screenshots', file_name)\n item.funcargs['driver'].get_screenshot_as_file(f'{path}')\n if file_name:\n html = f'<div><img src=\"screenshots/{file_name}\" alt=\"screenshot\" ' \\\n f'style=\"width:304px;height:228px;\" onclick=\"window.open(this.src)\" align=\"right\"/></div>'\n extra.append(pytest_html.extras.html(html))\n report.extra = extra", "def exception_alias():\n try:\n #result=1/0\n raise Exception\n except ZeroDivisionError, e:\n print(\"ZeroDivisionError\")\n print(e.message if e.message != \"\" else 'no message')\n except Exception, e:\n print(\"Exception\")\n print(type(e.message)) # <type 'str'>\n print(e.message if e.message != \"\" else 'no message')", "def report_test(report_generator, request):\n time1 = str(datetime.datetime.now())\n yield\n time2 = str(datetime.datetime.now())\n try:\n # TODO: Sometimes this line causes problems, but I can't\n # remember the context surrounding it. I think if there\n # is an error setting up a test fixture, `report_call` is not\n # defined, or something. Anyway, it'd be a great\n # to figure out a graceful solution.\n # In the meantime this'll be nicer output.\n nodeid = request.node.report_call.report.nodeid\n except Exception: # pragma: no cover\n logging.error(\n f\"Unable to extract nodeid from node: {request.node}; \"\n \"not preparing report segment\"\n )\n return\n is_failed = request.node.report_call.report.failed\n # TODO: Figure out a way to include class docs if they exist\n # class TestFooBar:\n # \"\"\"\n # When Foo is bar\n # \"\"\"\n # def test_a_baz(self):\n # \"\"\"and baz is bop\"\"\"\n # do_work('bop')\n # The report output should then read \"When foo is bar and baz is bop\"\n doc = request.node.report_call.doc\n slug = re.sub(r\"\\W\", \"-\", nodeid)\n header = ResultHeader(link=slug, is_failed=is_failed, description=nodeid)\n failure = None\n if is_failed:\n exc_info = request.node.report_call.excinfo\n if isinstance(exc_info.value, BrowserError):\n e = exc_info.value\n failure = ResultFailure(\n message=e.message,\n url=e.url,\n loglines=[log.get(\"message\", \"\") for log in e.logs],\n )\n else:\n failure = ResultFailure(message=str(exc_info))\n\n report = ResultAttributes(\n link=slug,\n doc=doc,\n nodeid=nodeid,\n pngs=BrowserRecorder.pngs,\n failure=failure,\n time1=time1,\n time2=time2,\n )\n\n filename = os.path.join(report_generator, f\"result.{slug}.html\")\n headerfile = os.path.join(report_generator, f\"head.{slug}.html\")\n\n with open(headerfile, \"w\") as fd:\n fd.write(header.json())\n with open(filename, \"w\") as fd:\n fd.write(report.json())\n BrowserRecorder.pngs.clear()", "def open(self):\r\n safe_mkdir(os.path.dirname(self._html_dir))\r\n self._report_file = open(self.report_path(), 'w')", "def report(db, openfile):\n pass", "def exception(self) -> str:\n return pulumi.get(self, \"exception\")", "def syntaxError (self, s) :\r\n report = self.generateReport() + s\r\n raise Exception, report", "def error(self, trigger=None):\n try:\n trace = traceback.format_exc()\n if sys.version_info.major < 3:\n trace = trace.decode('utf-8', errors='xmlcharrefreplace')\n stderr(trace)\n try:\n lines = list(reversed(trace.splitlines()))\n report = [lines[0].strip()]\n for line in lines:\n line = line.strip()\n if line.startswith('File \"'):\n report.append(line[0].lower() + line[1:])\n break\n else:\n report.append('source unknown')\n\n signature = '%s (%s)' % (report[0], report[1])\n # TODO: make not hardcoded\n log_filename = os.path.join(self.config.core.logdir, 'exceptions.log')\n with codecs.open(log_filename, 'a', encoding='utf-8') as logfile:\n logfile.write('Signature: %s\\n' % signature)\n if trigger:\n logfile.write('from {} at {}. Message was: {}\\n'.format(\n trigger.nick, str(datetime.now()), trigger.group(0)))\n logfile.write(trace)\n logfile.write(\n '----------------------------------------\\n\\n'\n )\n except Exception as e:\n stderr(\"Could not save full traceback!\")\n LOGGER.error(\"Could not save traceback from %s to file: %s\", trigger.sender, str(e))\n\n if trigger and self.config.core.reply_errors and trigger.sender is not None:\n self.msg(trigger.sender, signature)\n if trigger:\n LOGGER.error('Exception from {}: {} ({})'.format(trigger.sender, str(signature), trigger.raw))\n except Exception as e:\n if trigger and self.config.core.reply_errors and trigger.sender is not None:\n LOGGER.error('Exception from {}: {} ({})'.format(trigger.sender, str(e), trigger.raw))", "def _CreateReport(failed_percentage):\r\n\r\n pdf = FPDF()\r\n pdf.add_page()\r\n pdf.set_font(\"Arial\", size=12)\r\n pdf.cell(200, 10, txt=\"Report\", ln=1, align='C')\r\n pdf.cell(200, 10, txt=\"Scenario ID: {0}\".format(sys.argv[2]), ln=2, align='C')\r\n\r\n # prints failures above threshold to the report\r\n for i in range(39):\r\n if failed_percentage[i] > FAILURE_THRESHOLD:\r\n pdf.cell(200, 10, txt=\"Requirement {0}: {1}% failure\".format(i + 1, failed_percentage[i]), ln=3)\r\n\r\n pdf.output(\"report.pdf\")", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def capture_exception():\n\n pm_logger.exception()\n exc_type, exc_value, exc_tb = sys.exc_info()\n exc_type_string = \"%s.%s\" % (exc_type.__module__, exc_type.__name__)\n exc_message = traceback.format_exception_only(exc_type, exc_value)[-1].strip()\n error = {\"type\": exc_type_string,\n \"message\": exc_message}\n try:\n BSON.encode({'args': exc_value.args})\n except InvalidDocument:\n pass\n else:\n error[\"args\"] = exc_value.args\n return error", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def generate_report(template_filename, report_title, report_dir):\n\n def inner(output_dir: Optional[str] = None):\n output_dir = output_dir or report_dir\n with open(template_filename) as fd:\n template = jinja2.Template(fd.read())\n\n template.globals.update(\n {\"date\": str(datetime.datetime.now()), \"lettergen\": lettergen, \"zip\": zip}\n )\n\n headers = iterfiles(output_dir, \"head.\")\n results = iterfiles(output_dir, \"result.\")\n stream = template.stream(headers=headers, results=results, project=report_title)\n artifact = os.path.join(output_dir, \"index.html\")\n stream.dump(artifact)\n logging.info(f\"Created report: {artifact}\")\n\n return inner", "def get_problem_report(self):\n\t\treturn Job(SDK.PrlVm_GetProblemReport(self.handle)[0])", "def exception(self, *args, **kwargs):", "def traceback(self):", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def test_get_report_file_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_OK)", "def test_harvester_new_file_exception_recovered(self):\n # create the file so that it is unreadable\n self.create_sample_data_set_dir(\n \"DOS15908_1st7_step1.DAT\",\n RECOV_DIR,\n \"DOS15909.DAT\",\n mode=000\n )\n\n # Start sampling and watch for an exception\n self.driver.start_sampling()\n\n self.assert_exception(IOError)\n\n # At this point the harvester thread is dead. The agent\n # exception handle should handle this case.", "def _create_report(self, report_type, report_key, report_name):\n\n listOfReports = self.model.find(xmlns + 'ListOfReports')\n \n #Check a report with the current key doesn't already exist. If it does, delete it\n foundReport = False\n for report in listOfReports:\n if report.attrib['key'] == report_key:\n foundReport = report\n if foundReport:\n listOfReports.remove(foundReport)\n\n #Next, look through and check to see if a report with the report_name already exists. If it does, delete it\n \n listOfReports = self.model.find(xmlns + 'ListOfReports')\n foundReport = False\n for report in listOfReports:\n if report.attrib['name'] == report_name:\n foundReport = report\n if foundReport:\n listOfReports.remove(foundReport)\n\n if report_type == 'SO':\n\n newReport = etree.SubElement(listOfReports, xmlns + 'Report')\n newReport.set('key', report_key)\n newReport.set('name', report_name)\n newReport.set('taskType', 'optimization')\n newReport.set('seperator', '&#x09;')\n newReport.set('precision', '6')\n \n newReport_Comment = etree.SubElement(newReport, xmlns + 'Comment')\n newReport_Comment_body = etree.SubElement(newReport_Comment, xmlns + 'body')\n newReport_Comment_body.set('xmlns', 'http://www.w3.org/1999/xhtml')\n newReport_Comment_body.text = 'Report automatically generated by condor-copasi'\n\n #Create the body\n newReport_Body = etree.SubElement(newReport, xmlns + 'Body')\n\n newReport_Body_Object1 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object1.set('cn','String=#----\\n')\n\n newReport_Body_Object2 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object2.set('cn','String=Evals \\= ')\n\n newReport_Body_Object3 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object3.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Function Evaluations')\n\n newReport_Body_Object4 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object4.set('cn','String=\\nTime \\= ')\n\n newReport_Body_Object5 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object5.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Timer=CPU Time')\n\n newReport_Body_Object6 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object6.set('cn','String=\\n')\n\n newReport_Body_Object7 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object7.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Value')\n \n #And put the same objects in the footer\n newReport_Footer = etree.SubElement(newReport, xmlns + 'Footer')\n\n newReport_Footer_Object1 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object1.set('cn','String=#----\\n')\n\n newReport_Footer_Object2 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object2.set('cn','String=Evals \\= ')\n\n newReport_Footer_Object3 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object3.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Function Evaluations')\n\n newReport_Footer_Object4 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object4.set('cn','String=\\nTime \\= ')\n\n newReport_Footer_Object5 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object5.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Timer=CPU Time')\n\n newReport_Footer_Object6 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object6.set('cn','String=\\n')\n\n newReport_Footer_Object7 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object7.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Value')\n \n elif report_type == 'SS':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"timeCourse\" separator=\"&#x09;\" precision=\"6\">\n <Comment>\n A table of time, variable species particle numbers, variable compartment volumes, and variable global quantity values.\n </Comment>\n <Table printTitle=\"1\">\n \n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n model_name = self.get_name()\n \n table = report.find(xmlns + 'Table')\n time_object = etree.SubElement(table, xmlns + 'Object')\n time_object.set('cn', 'Model=' + model_name + ',Reference=Time')\n \n for variable in self.get_variables():\n row = etree.SubElement(table, xmlns + 'Object')\n row.set('cn', variable) \n \n listOfReports.append(report)\n \n elif report_type == 'OR':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"optimization\" separator=\"&#x09;\" precision=\"6\">\n <Comment>\n \n </Comment>\n <Table printTitle=\"1\">\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Parameters\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Value\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Timer=CPU Time\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Function Evaluations\"/>\n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n \n listOfReports.append(report)\n \n elif report_type == 'PR':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"parameterFitting\" separator=\"&#x09;\" precision=\"6\">\n<Comment>\n Condor Copasi automatically generated report.\n </Comment>\n <Table printTitle=\"1\">\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Parameters\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Value\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Timer=CPU Time\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Function Evaluations\"/>\n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n \n listOfReports.append(report)\n \n \n \n \n elif report_type == 'SP':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"parameterFitting\" separator=\"&#x09;\" precision=\"6\">\n<Comment>\n Condor Copasi automatically generated report.\n </Comment>\n <Table printTitle=\"1\">\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Parameters\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Value\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Timer=CPU Time\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Function Evaluations\"/>\n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n \n listOfReports.append(report) \n else:\n raise Exception('Unknown report type')", "def _get_report(self, entry):\n # format the report\n description = self.FULL_MESSAGE_TEMPLATE.format(\n db=entry.get('db', 'n/a'),\n host=entry.get('@source_host'),\n client=entry.get('client', 'n/a'),\n query_time=entry.get('query_time', 'n/a'),\n method=entry.get('query_class'),\n query=entry.get('query'),\n entry=json.dumps(entry, indent=True),\n ).strip()\n\n # add a fake path to the class, to that we will try to make classifier attach a proper component\n class_name = entry.get('query_class', '').split(':')[0]\n description += '\\n\\nPossible source file:\\n* /extensions/wikia/{}:1'.format(class_name)\n\n return Report(\n summary='[{method}] Long running query was killed by mysql-killer'.format(method=entry.get('query_class')),\n description=description,\n label=self.REPORT_LABEL\n )", "def send_report(e_mail=None, subject='Report', log_name=None):\n\n username = 'nannyrobot009'\n password = 'wihzof-Pupxez-kawjy3'\n smtp_server = 'smtp.gmail.com'\n port = 465\n from_name = username + '@gmail.com'\n signature = '\\n\\n---------------------------------\\nI hope your program finished succesfully\\n' \\\n + chr(0x1F916) + chr(0x1F609)\n\n if e_mail is None:\n print('No e-mail to send')\n return\n\n msg = EmailMessage()\n msg['Subject'] = subject\n msg['From'] = from_name\n msg['To'] = e_mail\n\n if log_name is not None:\n if not(isfile(log_name) and access(log_name, R_OK)):\n msg.set_content('Log file {} doesn\\'t exist or isn\\'t readable '.format(log_name) +\n chr(0x1F914) +\n signature)\n else:\n msg.set_content('THE LAST 10 LINES OF FILE {}:\\n'.format(log_name) +\n Popen('tail {}'.format(log_name), shell=True, stdout=PIPE).stdout.read().decode(\"utf-8\") +\n signature)\n else:\n msg.set_content(signature)\n\n with smtplib.SMTP_SSL(smtp_server, port) as server:\n server.login(username, password)\n server.send_message(msg)", "def report_failure(self):\n if self.email:\n if self.wiki.config.admin_mail and self.wiki.config.admin_mail.lower() != 'nomail':\n subject = \"Dump failure for \" + self.wiki.db_name\n message = self.wiki.config.read_template(\"errormail.txt\") % {\n \"db\": self.wiki.db_name,\n \"date\": self.wiki.date,\n \"time\": TimeUtils.pretty_time(),\n \"url\": \"/\".join((self.wiki.config.web_root, self.wiki.db_name,\n self.wiki.date, ''))}\n self.mail(subject, message)", "def format_exception(exception_type, class_name = 'No classname', message = 'Formated exception', debug_info = {}):\n\tcheck_class(exception_type, Exception)\n\tcheck_type(class_name, StringType)\n\tcheck_type(message, StringType)\n\tcheck_type(debug_info, DictType)\n\n\tdebug = []\n\tfor k in debug_info:\n\t\tdebug.append('{0}: {1}'.format(k, debug_info[k]))\n\texc = exception_type('{0}, \"{1}\" - debug: ({2})'.format(class_name, message, ', '.join(debug)))\n\treturn exc", "def _location_from_fx_stack_trace(\n node_stack_trace: str,\n) -> Optional[diagnostics.infra.Location]:\n if \"File\" not in node_stack_trace:\n return None\n\n lines = node_stack_trace.strip().split(\"\\n\")\n idx = 0\n while idx < len(lines) and \"File\" not in lines[idx]:\n idx += 1\n if idx + 1 >= len(lines):\n return None\n\n pattern = re.compile(r\"^File \\\"(.+)\\\", line (\\d+), in (.+)$\")\n matches = pattern.match(lines[idx].strip())\n if matches:\n uri = matches.group(1)\n line_number = int(matches.group(2))\n snippet = lines[idx + 1].strip()\n return diagnostics.infra.Location(uri=uri, line=line_number, snippet=snippet)\n return None", "def test_exceptions_init_nonexistent():\n with pytest.raises(IOError):\n Exceptions(os.path.join(os.path.dirname(__file__),\n 'nonexistent_exceptions.yaml'))", "def formatException(cls, instance, trcback, context=1):\n\n\tstack = extractStack(getInnerMostFrame(trcback), context=context)\n\toutput = []\n\toutput.append(\"Traceback (most recent call last):\")\n\tfor frame, fileName, lineNumber, name, context, index in stack:\n\t\toutput.append(\" File \\\"{0}\\\", line {1}, in {2}\".format(fileName, lineNumber, name))\n\t\tfor line in context:\n\t\t\toutput.append(\" {0}\".format(line.strip()))\n\tfor line in traceback.format_exception_only(cls, instance):\n\t\toutput.append(\"{0}\".format(line))\n\treturn output", "def _get_exception(self):\r\n \r\n return self._exception", "def _build_prediction_Report(name, x, y, a, indices):\n return PredictionReport(name, x[indices], y[indices], a[indices], indices)", "def test_friendly_exception_formatting_multiple_exceptions():\n ex1 = InsufficientCorrectSignatures(1, 2, {'6ouriXMZkLeHsuXrN1X1fd': '3GoEPiwhJUjALzrXmmE9tFTXAi7Emv8Y8jjSxQyQB'})\n ex2 = InsufficientSignatures(1, 3)\n ex2.__cause__ = ex1\n ex3 = SigningException()\n ex3.__cause__ = ex2\n\n expected = '{} [caused by {} [caused by {}]]'.format(ex3, ex2.reason, ex1.reason)\n formatted_exception = friendlyEx(ex3)\n\n assert formatted_exception == expected", "def test_get_manifest_unexpected_report_name(self):\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader._get_manifest(self.mock_data.bad_test_date)", "def exception_message():\n def get_os_release():\n \"\"\"Returns detailed OS release.\"\"\"\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"\n\n msg = (\n \"Oops! Cuckoo failed in an unhandled exception!\\nSometimes bugs are \"\n \"already fixed in the development release, it is therefore \"\n \"recommended to retry with the latest development release available \"\n \"%s\\nIf the error persists please open a new issue at %s\\n\\n\" %\n (GITHUB_URL, ISSUES_PAGE_URL)\n )\n\n msg += \"=== Exception details ===\\n\"\n msg += \"Cuckoo version: %s\\n\" % version\n msg += \"OS version: %s\\n\" % os.name\n msg += \"OS release: %s\\n\" % get_os_release()\n msg += \"Python version: %s\\n\" % platform.python_version()\n msg += \"Python implementation: %s\\n\" % platform.python_implementation()\n msg += \"Machine arch: %s\\n\" % platform.machine()\n\n try:\n import pip\n\n msg += \"Modules: %s\\n\" % \" \".join(sorted(\n \"%s:%s\" % (package.key, package.version)\n for package in pip.get_installed_distributions()\n ))\n except ImportError:\n pass\n\n msg += \"\\n\"\n return msg", "def stderr_path(self):\n return self.log_path\n # return self.path / 'stderr.txt'", "def test_are_chained_exceptions_printed(self):\n\n io = BufferedSystemIO()\n\n try:\n try:\n raise IndexError('Invalid index 5')\n except IndexError as index_exc:\n raise Exception('There was an error with index') from index_exc\n\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('(Caused by) IndexError:', io.get_value())\n self.assertIn('Exception:', io.get_value())\n self.assertIn('There was an error with index', io.get_value())" ]
[ "0.61463684", "0.5946372", "0.59041274", "0.577588", "0.57041526", "0.569233", "0.5647634", "0.56435865", "0.5620892", "0.5555634", "0.54271317", "0.5426168", "0.5407387", "0.5407387", "0.538084", "0.53600556", "0.53557533", "0.533768", "0.5310631", "0.5296912", "0.5202553", "0.51981044", "0.51744926", "0.5169028", "0.51378834", "0.5128839", "0.5111966", "0.5111103", "0.5097656", "0.5084358", "0.5066365", "0.5063892", "0.5048873", "0.5042898", "0.5034631", "0.5010584", "0.496738", "0.49537715", "0.49510512", "0.49161556", "0.49104187", "0.49097034", "0.48957002", "0.4888986", "0.48887503", "0.4886973", "0.48809087", "0.48753515", "0.4872927", "0.4863388", "0.48560086", "0.4855501", "0.48448744", "0.48339072", "0.48311692", "0.48202533", "0.48004204", "0.4793529", "0.47877613", "0.47799876", "0.4765598", "0.47630253", "0.47605205", "0.47478208", "0.4740847", "0.4740821", "0.47403783", "0.4739418", "0.4732175", "0.4728415", "0.47235936", "0.47176984", "0.47170407", "0.471091", "0.47100884", "0.4705559", "0.47007307", "0.469403", "0.46931538", "0.4691243", "0.468604", "0.46847576", "0.46833882", "0.46774065", "0.46718532", "0.46689144", "0.46613422", "0.46610928", "0.4659048", "0.46484193", "0.46403152", "0.46392438", "0.4638374", "0.46377102", "0.46304604", "0.46247372", "0.46204183", "0.46173185", "0.46171692", "0.4617169" ]
0.6724956
0
Create the root node of the BST.
def __init__(self, name): debug.printMsg("We Initiated a BST with no root node") self.name = name self.root = None self.size = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a root')", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def build():\n\n root = Node(9)\n root.left = Node(6)\n root.left.left = Node(3)\n root.left.right = Node(8)\n root.left.right.left = Node(7)\n root.right = Node(14)\n root.right.left = Node(12)\n return root", "def __init__(self):\n self.root = TreeNode(None)", "def create_bst(self, a, left, right):\n if left > right:\n return\n mid = (left + right) / 2\n self.insert(a[mid])\n self.create_bst(a, left, mid - 1)\n self.create_bst(a, mid + 1, right)", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def createBST(self, nums, start, end):\n if end < start:\n return None\n mid = int((start+end)/2)\n #create a new node\n root = TreeNode(nums[mid])\n root.left = self.createBST(nums, start,mid -1)\n root.right = self.createBST(nums, mid +1 ,end)\n return root", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = TreeNode(\"\")", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def __init__(self):\n self.root = Node('')", "def __init__(self):\n self.root = Node(None)", "def root(self, statements, children=None):\n self._count += 1\n self._root = Node(self._count, statements, children)\n return self._root", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node(\"\")", "def create_bst(lst, start, end):\n if end < start:\n return None\n mid = (start + end) // 2\n root = BinaryTree(lst[mid])\n root.left_child = create_bst(lst, start, mid - 1)\n root.right_child = create_bst(lst, mid + 1, end)\n # post-order traversal\n print(root.get_root_val())\n return root", "def get_root_node(self):\n return self.root", "def create(self, val:int):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n\n while True:\n if val < current.val:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.val:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def __init__(self):\n self.root = TreeNode('#')", "def __init__(self, root_node):\n\n\t\tself.root = root_node\n\t\tself.left_child = None\n\t\tself.right_child = None", "def __init__(self):\n self.root = self.Node(None)", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def root(self) -> Node:\n return self._root", "def check_or_create_root(self):\n self.root = self.db_handler.get_or_create_indexed_node(\"root\", \n \"root_name\", \"ndn\", {\"component\":\"ndn\"})\n if not self.root:\n raise NoRootException(\"cannot locate root name (ndn)\")\n\n self.root.add_labels(LABEL_COMPONENT)", "def bst_insert(root, data):\n if root is None:\n root = Tree(d=data)\n elif data > root.data:\n root.right = bst_insert(root.right, data)\n else:\n root.left = bst_insert(root.left, data)\n return root", "def build_binary_tree(self, root_id, json_data):\n self.root = self.__build_binary_tree(self.root, root_id, json_data)\n return self.root", "def root_above(node, name=None):\n # walk down from self node\n left = walk_copy(node, node.parent)\n\n # walk up from parent node\n right = walk_copy(node.parent, node)\n\n # set basal branch lengths to be half of the original, i.e., midpoint\n left.length = right.length = node.length / 2\n\n # create new root\n res = TreeNode(name, children=[left, right])\n res.support = None\n return res", "def __init__(self, tree_node=None):\n self.root = tree_node", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def __build_binary_tree(self, root, node_id, json_data):\n new_node = BinaryTree(value=json_data[node_id][\"value\"], left=None, right=None)\n if json_data[node_id][\"left\"] != None:\n new_node.left = self.__build_binary_tree(new_node, json_data[node_id][\"left\"], json_data)\n if json_data[node_id][\"right\"] != None:\n new_node.right = self.__build_binary_tree(new_node, json_data[node_id][\"right\"], json_data)\n return new_node", "def __init__(self):\n self.root = self.Node()", "def test_tree_initiates_with_empty_root(empty_t):\n assert not empty_t.root", "def __init__(self):\n self.root = TridNode()", "def test_left_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(5)\n assert empty_t.root.left\n assert not empty_t.root.right", "def test_init_root():\n from bst import BST\n bst = BST()\n # import pdb; pdb.set_trace()\n assert bst.root is None", "def build():\n # root = TreeNode(5)\n # root.left = TreeNode(2)\n # root.right = TreeNode(7)\n # return root\n\n \"\"\"\n 5\n / \\\n 2 6\n / \\\n 1 3\n [5,2,1,3,6]\n \"\"\"\n _5 = TreeNode(5)\n _2 = TreeNode(2)\n _6 = TreeNode(6)\n _1 = TreeNode(1)\n _3 = TreeNode(3)\n _5.left = _2\n _5.right = _6\n _2.left = _1\n _2.right = _3\n return _5", "def insertHead(self, value):\n newNode = BinaryTree(value)\n if random.random() > 0.5:\n self.parent = newNode\n newNode.right = self\n else:\n self.parent = newNode\n newNode.left = self\n return newNode", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def _createSubtree(self, parent, begin, end):\n n_elem = end - begin\n if (n_elem == 1):\n node = Node(position=begin)\n node.parent = parent\n node.end = end\n return node\n\n # At least 2 values (leaves) left\n mid = int((end + begin)/2)\n node = Node(end=end)\n node.parent = parent\n node.left = self._createSubtree(node, begin, mid)\n node.right = self._createSubtree(node, mid, end)\n return node", "def __init__(self):\n self.__root = Node()", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def __init__(self):\n self.root = SimpleNode()", "def add_root(self, elem):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._root = self._Node(elem, idx=0)\n self._size = 1\n self._curr_idx = 1\n self._depths, self._heights = None, None\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def straight_bst():\n bst = BST()\n for i in range(1, 10):\n bst.insert_non_balance(i)\n return bst, 10, -9", "def build_UNIST_tree():\n root = LinkedBinaryTree()", "def __init__(self):\n self.root = self.get_new_node();", "def root(self):\n return self._make_position(self._root)", "def root_node(self):\n return self.process_tree", "def get_root_node(cur):\n sql = \"\"\"\n SELECT\n *\n FROM\n nodes\n WHERE\n parent IS NULL;\n \"\"\"\n cur.execute(sql)\n result = cur.fetchone()\n\n if result is None:\n raise exceptions.NoRootNode()\n else:\n return NodeData(**result)", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def get_root_tree(tree_raw):\n length = len(tree_raw)\n if 0 == length:\n return None\n if 1 == length:\n if tree_raw[0] is None:\n return None\n return TreeNode(tree_raw[0])\n\n root = TreeNode(tree_raw[length // 2])\n left_tree_raw = tree_raw[:length // 2]\n right_tree_raw = tree_raw[length // 2 + 1:]\n root.left = get_root_tree(left_tree_raw)\n root.right = get_root_tree(right_tree_raw)\n\n return root", "def insert(self, key):\n if self.root is None:\n self.root = self.Node(key)\n else:\n self.root = self.root.insert(key)", "def root(tree):\n\n return tree[0]", "def binary_tree():\n\n class Node(object):\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n # Create a root\n root = Node(data=1)\n root.left = Node(data=2)\n root.right = Node(data=3)\n root.left.left = Node(data=4)\n \"\"\" Structure\n 1 <-- root\n / \\\n 2 3 \n / \n 4\n \"\"\"", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)", "def insert(self, val):\n\n\t\tif not self.root:\n\t\t\tself.root = BinaryTreeNode(val)\n\n\t\telse:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif not node.left:\n\t\t\t\t\tnode.left = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.left)\n\n\t\t\t\tif not node.right:\n\t\t\t\t\tnode.right = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.right)\n\n\t\tself.numNodes += 1", "def root(self):\n return self._make_position(self._root)", "def root(self):\n return self._make_position(self._root)", "def root(self):\n return self._make_position(self._root)", "def root(self):\n return self._make_position(self._root)", "def root(self):\n return self._make_position(self._root)", "def root(self):\n return self._make_position(self._root)", "def test_right_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(15)\n assert empty_t.root.right\n assert not empty_t.root.left", "def test_bst_empty_root(bst_empty):\n assert bst_empty.root == None", "def _gen_test_tree_1():\n tree = BinaryNode(5)\n tree.left = BinaryNode(5)\n return tree", "def root(self):\n if self.has_multiple_roots:\n raise ValueError(\"More than one root exists. Use tree.roots instead\")\n return self.left_root", "def create_empty_node():\n from linked_list import Node\n return Node()", "def begin(self):\n current_node = self.root\n\n while current_node.left_child is not None:\n current_node = current_node.left_child\n\n return current_node", "def make_tree(arr):\n\n for i in range(len(arr)):\n arr, val = mid(arr)\n\n if i == 0: \n binary = BinaryNode(val)\n\n else:\n binary.insert(val)\n\n return binary", "def _sorted_list_to_bst(cls, items=[], start=None, end=None, parent=None):\n if start > end:\n return None\n mid = start + (end - start) // 2\n node = Node(items[mid], parent)\n node.left = cls._sorted_list_to_bst(items, start, mid - 1, node)\n node.right = cls._sorted_list_to_bst(items, mid + 1, end, node)\n return node", "def __init__(self, root=None):\n self.set_root(root)", "def insertLeft(self, node):\n if self.left is None:\n self.left = BinaryTree(node)\n else:\n t = BinaryTree(node)\n t.left = self.left\n self.left = t", "def __init__(self, root):\n self._root = root\n self._leaves = [root]", "def add(self, value):\n if self.root is None:\n self.root = BinaryNode(value) \n else:\n self.root.add(value)", "def add_root(self, engine_name, browser_name=None, define_compo=False):\n browser_name = browser_name or engine_name\n std = self.sstd\n bld = self.sbld\n compo = std.FindComponent(engine_name)\n if compo is None:\n compo = bld.NewComponent(engine_name)\n node = Node(std, bld, compo.GetID(), is_root=True)\n node.write_name(browser_name)\n if define_compo:\n from salome import lcc\n eng = lcc.FindOrLoadComponent(\"FactoryServerPy\", engine_name)\n bld.DefineComponentInstance(compo, eng)\n else:\n node = Node(std, bld, compo.GetID(), is_root=True)\n return node", "def __init__(self, start_tree=None) -> None:\n self.root = None\n\n # populate BST with initial values (if provided)\n # before using this feature, implement add() method\n if start_tree is not None:\n for value in start_tree:\n self.add(value)", "def __init__(self, start_tree=None) -> None:\n self.root = None\n\n # populate BST with initial values (if provided)\n # before using this feature, implement add() method\n if start_tree is not None:\n for value in start_tree:\n self.add(value)", "def _insert(self, root: AVLTreeNode, key, val=None) -> AVLTreeNode:\n if not root:\n return AVLTreeNode(key, val, bf=0) # If empty root this is the root of new tree\n if key < root.key:\n left_sub_root = self._insert(root.left, key, val) # insert and update left subroot\n root.left = left_sub_root\n left_sub_root.parent = root # assign the parent\n elif key > root.key:\n right_sub_root = self._insert(root.right, key, val) # insert and update right subroot\n root.right = right_sub_root\n right_sub_root.parent = root\n else:\n return root # no duplicate keys allowed; no insertion, return current root as is\n # finally, update heights and bf's of current root after insertion completed (postorder processing)\n root.height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n root.bf = self._get_height(root.left) - self._get_height(root.right)\n return self.rebalance(root) # RE-BALANCE CURRENT ROOT (if required)", "def insert(self, k):\n node = self.klass(None, k)\n if self.root is None:\n # The root's parent is None.\n self.root = node\n else:\n self.root.insert(node)\n return node", "def __init__(self, val=None):\n self.val = val\n self.parent = None\n if val is not None:\n self.left = BSTree()\n self.right = BSTree()\n else:\n self.left = None\n self.right = None", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def add(self, value):\n # no root node: add node as root\n if self.root == None:\n self.root = Node(value, None)\n return\n\n # root already exists: find place of node to be added\n node = self.root\n while True:\n\n # left child: check if is already a leaf or not\n if value <= node.value:\n\n # current node is a leaf: add new node as left child\n if node.left == None:\n node.left = Node(value, node)\n self.rebalance(node)\n break\n\n # current node is not a leaf: descend to left child\n node = node.left\n\n # right child: check if is already a leaf or not\n else:\n\n # current node is a leaf: add new node as right child\n if node.right == None:\n node.right = Node(value, node)\n self.rebalance(node)\n break\n\n # current node is not a leaf: descend to right child\n node = node.right", "def test_instantiate_gets_correct_root_and_root_pointers():\n input = [13, 42, 7]\n c = BinaryTree(input)\n root_repr = repr(c.root)\n expected_root_repr = '<Node | Val: 13 | Data: None | Left: 7 | Right: 42>'\n assert root_repr == expected_root_repr", "def insert(self, val):\n node = Node(val)\n current = self.root\n\n if self.root is None:\n self.root = node\n return node\n\n while current:\n if val >= current.val:\n if current.right is not None:\n current = current.right\n else:\n current.right = node\n break\n\n elif val < current.val:\n if current.left is not None:\n current = current.left\n else:\n current.left = node\n break\n\n return node", "def __make_tree(self, wd, root=\"d1\", create=True):\n d1 = \"%s/%s\" % (wd, root)\n t1 = FSTree(d1)\n d2 = \"%s/d2\" % d1\n t2 = t1.add(d2)\n if create:\n hdfs.mkdir(d2)\n for t, d, bn in ((t1, d1, \"f1\"), (t2, d2, \"f2\")):\n f = \"%s/%s\" % (d, bn)\n if create:\n hdfs.dump(self.data, f, mode=\"wb\")\n t.add(f, 0)\n return t1", "def Insert(root, node):\n target = root.ChooseLeaf(node)\n node.father = target\n target.leaves.append(node)\n target.MBR = merge(target.MBR, node.MBR)\n target.AdjustTree()\n if root.father != None:\n root = root.father\n return root", "def get_or_create_root():\r\n try:\r\n root = URLPath.root()\r\n if not root.article:\r\n root.delete()\r\n raise NoRootURL\r\n return root\r\n except NoRootURL:\r\n pass\r\n\r\n starting_content = \"\\n\".join((\r\n _(\"Welcome to the edX Wiki\"),\r\n \"===\",\r\n _(\"Visit a course wiki to add an article.\"),\r\n ))\r\n\r\n root = URLPath.create_root(title=_(\"Wiki\"), content=starting_content)\r\n article = root.article\r\n article.group = None\r\n article.group_read = True\r\n article.group_write = False\r\n article.other_read = True\r\n article.other_write = False\r\n article.save()\r\n\r\n return root", "def left_root(self):\n return self.left_child(self.virtual_root)", "def insert_node_in_tree(current_node, value):\n current_node_level = get_node_level(current_node)\n if current_node.left is None:\n current_node.left = Node(value, current_node, current_node_level + 1)\n elif current_node.right is None:\n current_node.right = Node(value, current_node, current_node_level + 1)\n else:\n new_node = Node(value, current_node, current_node_level + 2)\n current_node.right = new_node\n new_node.right = current_node.right", "def minimal_tree(array: list):\n bst = BST()\n def build(l, r):\n if l == r: bst.insert(array[l]); return\n m = (l+r)//2\n # insert into the tree\n bst.insert(array[m])\n # build recursively\n build(l, m)\n build(m+1, r)\n build(0, len(array)-1)\n return bst", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def insert(root, key, value=None):\n if root is None:\n root = Node(key, value)\n else:\n if key >= root.key:\n if root.right is None:\n root.right = Node(key, value)\n else:\n # Use root.right as the root of the subtree\n insert(root.right, key, value)\n else:\n if root.left is None:\n root.left = Node(key, value)\n else:\n # Use root.left as the root of the subtree\n insert(root.left, key, value)", "def BinaryTree(root):\n return [root, [], []]", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree" ]
[ "0.7032419", "0.6934794", "0.68248075", "0.67966664", "0.6757742", "0.6755082", "0.67382634", "0.67275465", "0.67275465", "0.6668411", "0.6649478", "0.66437757", "0.6606337", "0.6603094", "0.6603094", "0.65813065", "0.65738034", "0.6492516", "0.6490673", "0.6471958", "0.64572597", "0.6427452", "0.6383125", "0.6383125", "0.6383125", "0.6250316", "0.624785", "0.62378335", "0.6234811", "0.6212296", "0.6210868", "0.6180119", "0.6177232", "0.6153926", "0.61537886", "0.6140481", "0.61320925", "0.61223596", "0.61203176", "0.6108695", "0.61031526", "0.6101701", "0.6099491", "0.6077937", "0.60750204", "0.60575664", "0.60558695", "0.60558695", "0.605476", "0.6035771", "0.60199547", "0.59989893", "0.59938955", "0.59872675", "0.59779793", "0.59740806", "0.5971132", "0.5966786", "0.5959985", "0.5948695", "0.5943593", "0.5892179", "0.58841157", "0.58841157", "0.58841157", "0.58841157", "0.58841157", "0.58841157", "0.58835065", "0.58826363", "0.5882234", "0.5861352", "0.5860664", "0.5853929", "0.58526367", "0.58515704", "0.5849898", "0.58462423", "0.5841291", "0.5821028", "0.58206177", "0.58163184", "0.58163184", "0.5806111", "0.5803908", "0.5789036", "0.5782556", "0.57812047", "0.577559", "0.5767632", "0.5761459", "0.5752413", "0.57508194", "0.5746339", "0.5744049", "0.5738361", "0.57322687", "0.57117933", "0.56958365", "0.5695677" ]
0.6229294
29
Returns the length of the BST
def length(self): return self.length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_len(BST):\r\n if isinstance(BST,tuple):\r\n return total_len(BST[0]) + total_len(BST[1])\r\n else:\r\n return len(BST)", "def size(self) -> int:\n #binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper count nodes\n return self.size_helper(self.root)", "def size(node):\n\t\tif node is None:\n\t\t\treturn 0\n\t\treturn 1+BST.size(node.left)+BST.size(node.right)", "def len(self):\n L,R = 0,0\n if self.left is not None:\n L = self.left.len()\n if self.right is not None:\n R = self.right.len()\n return 1 + L + R", "def size(self) -> int:\n if self.root is None: # If tree is empty\n return 0\n\n return self.size_helper(self.root)", "def min_len(BST):\r\n if isinstance(BST,tuple):\r\n return min_len(BST[0]) + min_len(BST[1])\r\n else:\r\n return BST[0]", "def tree_size(self):\n if self._tree_size is not None:\n return self._tree_size\n if self.is_root:\n self.arbor._setup_tree(self)\n # pass back to the arbor to avoid calculating again\n self.arbor._store_node_info(self, '_tree_size')\n else:\n self._tree_size = len(list(self[\"tree\"]))\n return self._tree_size", "def size(self) -> int:\n return self.root.size if not self.empty() else 0", "def tree_size(self) -> int:\n Q = Queue()\n count = 0\n Q.put(self.root)\n while not Q.empty():\n node = Q.get()\n count += 1\n for child in node.children.values():\n Q.put(child)\n return count", "def diameterOfBinaryTree(self, root):\n self.max_length = 0\n def maxDepth(root):\n if not root:\n return 0\n left_branch = maxDepth(root.left)\n right_branch = maxDepth(root.right)\n self.max_length = max(self.max_length, left_branch + right_branch)\n return max(left_branch, right_branch) + 1\n maxDepth(root)\n return self.max_length", "def get_tree_size(self, node):\n\n # If the tree has not been created yet.\n if node == None:\n return 0\n n_nodes = 1\n for child in node.children:\n n_nodes += self.get_tree_size(node.children[child])\n return n_nodes", "def size(self):\n if self.root is None:\n return 0\n return self.root.size", "def bst_count_leaves(tree):\n leaves = 0\n\n def _walk(node=None):\n nonlocal leaves\n if node is None:\n return\n\n if node.left is not None:\n _walk(node.left)\n\n if node.left is None and node.right is None:\n leaves += 1\n\n if node.right is not None:\n _walk(node.right)\n\n _walk(tree.root)\n return leaves", "def size(self):\n count = 0\n if self.val is None:\n return count\n else:\n count += 1\n count += self.left.size()\n count += self.right.size()\n return count", "def size(self):\n return 1 + self.left.size + self.right.size", "def __len__(self):\n return len(self.subtrees())", "def count_leaves(self) -> int:\n # binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper function +=count total leaf\n return self.count_leaves_helper(self.root)", "def count_leaves(self) -> int:\n if self.root is None: # If tree is empty\n return 0\n\n return self.count_helper(self.root)", "def height(root):\n if not root:\n return 0\n lheight = height(root.left)\n rheight = height(root.right)\n if lheight > rheight:\n return lheight + 1\n else:\n return rheight + 1", "def height(self) -> int:\n # binary search tree == empty\n if self.root is None:\n return -1\n\n #count number\n return self.height_helper(self.root)", "def height(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 0\n else:\n if self.has_left():\n if self.has_right():\n return 1+max(self.get_left().height(), self.get_right().height())\n else:\n return 1+self.get_left().height()\n else:\n return 1+self.get_right().height()", "def __len__(self):\n return self.tree.filled_size()", "def size(self):\n if self.val is None:\n return 0\n left_size = self.left.size() if self.left is not None else 0\n right_size = self.right.size() if self.right is not None else 0\n return left_size + right_size + 1", "def size(self):\r\n return self.root.size_tree", "def height(T):\r\n if T.isLeaf:\r\n return 0\r\n return 1 + height(T.child[0])", "def get_tree_length(x):\n length = list(self.__plan_graph.objects(subject=x, predicate=AGORA.length)).pop()\n return length", "def get_tree_size(cur):\n sql = \"\"\"\n SELECT\n COUNT(*)\n FROM\n nodes;\n \"\"\"\n cur.execute(sql)\n result = cur.fetchone()\n return result['count']", "def size(self):\n if not self._leftchild and not self._rightchild:\n return 1\n if self._leftchild and not self._rightchild:\n return 1 + self._leftchild.size()\n if self._rightchild and not self._leftchild:\n return 1 + self._rightchild.size()\n return 1 + self._leftchild.size() + self._rightchild.size()", "def height(node):\n\n if node is None:\n return 0\n\n left_height = height(node.left)\n right_height = height(node.right)\n\n return max(left_height, right_height) + 1", "def count_leaf(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 0 + self.get_left().count_leaf() + self.get_right().count_leaf()\n else:\n return 0 + self.get_left().count_leaf()\n else:\n return 0 + self.get_right().count_leaf()", "def internal_path_len(root, height=0):\n\n if root is None:\n return 0\n else:\n return height + internal_path_len(root.left, height + 1) + internal_path_len(root.right, height + 1)", "def height(root: Node):\n return (max(height(root.left), height(root.right)) + 1) if root else 0", "def total_branch_length(self):\n return self._ll_tree.get_total_branch_length()", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt", "def heightTree(root):\n try:\n if (root is None):\n return -1\n else:\n return 1 + max(heightTree(root['left']), heightTree(root['right']))\n except Exception as exp:\n error.reraise(exp, 'RBT:heightTree')", "def numNodes(T):\r\n n = 1\r\n if T.isLeaf:\r\n return n\r\n for i in range(len(T.child)):\r\n n += numNodes(T.child[i])\r\n return n", "def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:\n q = deque([(root, 1)])\n max_width = 1\n\n while len(q) > 0:\n temp_q = deque()\n local_max_width = float('-inf')\n local_min_width = float('+inf')\n\n for (node, position) in q:\n local_max_width = max(local_max_width, position)\n local_min_width = min(local_min_width, position)\n if node.left:\n temp_q.append((node.left, position * 2 - 1))\n if node.right:\n temp_q.append((node.right, position * 2))\n max_width = max(max_width, local_max_width - local_min_width + 1)\n q.clear()\n q = temp_q\n\n return max_width", "def height(self):\n # Check if root node has a value and if so calculate its height\n return self.root.height() if self.root is not None else -1", "def leaf_count(self) -> int:\n if self.children == []:\n return 1\n else:\n return sum([x.leaf_count() for x in self.children])", "def _get_height(self, node):\n # Base Case.\n if node is None:\n return 0 \n\n # Recursion.\n left = self._get_height(node.left)\n right = self._get_height(node.right)\n\n # Count the height of the tree.\n if left > right: \n return left + 1\n\n else: \n return right + 1", "def height(t):\n if t.is_empty:\n return 0\n else:\n left = height(t.left)\n right = height(t.right)\n \n return 1 + max([left, right])", "def _get_height(self, root) -> int:\n # BC\n if root is None:\n return 0\n # Take the maximum of the height of the left subtree and the right subtree, and add 1 to the result\n height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n return height", "def get_height(root):\n if root is None:\n return 0\n return max(get_height(root.left), get_height(root.right)) + 1", "def n_trees(self):\n return len(self.data_kd)", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def _height(node):\n\n if not node:\n return 0\n\n return 1 + max(_height(node.left), _height(node.right))", "def countNodes(self, root):\n\n\n if not root:\n return 0\n\n return 1+self.countNodes(root.left)+self.countNodes(root.right)", "def count_height(self, node: TreeNode) -> int:\n if not node.left or not node.right:\n if node.left == node.right:\n return 1\n if node.left and not node.right:\n return node.left.height + 1\n return node.right.height + 1\n return max(node.left.height, node.right.height) + 1", "def depth(self):\n result = 0\n if self.val is None:\n return result\n return max(self.left.depth(), self.right.depth()) + 1", "def height(node):\r\n \r\n height = 0\r\n temp = node\r\n while temp != None:\r\n temp = temp.parent\r\n height += 1\r\n return height", "def __len__(self):\n return len(self.node)", "def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt", "def __len__(self) -> int:\n return 1 + sum(len(child) for child in self.children)", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def __len__(self) -> int:\r\n return len(self._nodes)", "def get_length(self):\n curr = self.head\n length = 0\n\n while curr != None:\n length += 1\n curr = curr.link\n\n return length", "def size(self):\n return self.variables.end_of_tree - 1", "def __len__(self) -> int:\n return len(self.nodes)", "def size_helper(self, node: object) -> int:\n #current node\n count = 1\n\n # current node left\n if node.left is not None:\n count += self.size_helper(node.left)\n\n #current node right\n if node.right is not None:\n count += self.size_helper(node.right)\n\n return count", "def node_count(self):\n return self._root.count()", "def depth(self):\n\t\tdef helper(tree, d):\n\t\t\tif tree.isLeaf():\n\t\t\t\treturn d\n\t\t\telse:\n\t\t\t\td_left=helper(tree.left, d+1) if tree.hasLeftChild() else 0\n\t\t\t\td_right=helper(tree.right, d+1) if tree.hasRightChild() else 0\n\t\t\t\treturn max(d_left, d_right)\n\n\t\treturn helper(self.root, 1) if not self.isEmpty() else 0", "def calculate_tree_height(tree):\n max_height = 0\n for i in tree.values():\n if i.is_leaf():\n path = i.path_to_root()\n if len(path) > max_height:\n max_height = len(path)\n\n return max_height", "def height(self) -> int:\n return self.root.height if not self.empty() else 0", "def size_node(node):\n if node is None:\n return 0\n return node.size", "def count(self):\n return self.__tree.node_count", "def leaf_count(t: Tree) -> int:\n if t.children == []:\n return 1\n else:\n return sum([leaf_count(child) for child in t.children])", "def get_length(self):\n pointer = self.head\n counter = 0\n while pointer:\n counter += 1\n pointer = pointer.next_node\n return counter", "def height(t: Tree):\n if len(t.children) == 0:\n return 1\n else:\n return 1 + max([height(c) for c in t.children])", "def _get_height(self, root: AVLTreeNode) -> int:\n if not root: # empty tree means height of 0\n return 0\n else:\n return root.height # return instance var height", "def get_depth(self):\n if self.root is None:\n return 0\n else:\n node_queue = list()\n node_queue.append(self.root)\n depth = 0\n while len(node_queue):\n q_len = len(node_queue)\n while q_len:\n q_node = node_queue.pop(0)\n q_len = q_len - 1\n if q_node.left is not None:\n node_queue.append(q_node.left)\n if q_node.right is not None:\n node_queue.append(q_node.right)\n depth = depth + 1\n return depth", "def height(self):\n if not self._leftchild and not self._rightchild:\n return 0\n if self._leftchild and not self._rightchild:\n return 1 + self._leftchild.height()\n if self._rightchild and not self._leftchild:\n return 1 + self._rightchild.height()\n return 1 + max(self._leftchild.height(), self._rightchild.height())", "def get_node_size(self):\n return self._node_size", "def get_height_tree(self):\n layers = self.breadth_first_traversal()\n \n if all(node is None for node in layers[-1]):\n del layers[-1]\n \n height = len(layers) - 1\n return height", "def get_length(self):\n current_node = self.head\n if current_node:\n i = 1\n while current_node.next:\n current_node = current_node.next\n i += 1\n return i\n else:\n return 0", "def get_height(self):\n if self.root is None:\n return 0\n else:\n return self._get_height(self.root) # Start at the root", "def depth(self):\n L, R = 0,0\n if self.left:\n L = self.left.depth()\n if self.right:\n R = self.right.depth()\n\n return 1 + max(L, R)", "def length(self):\r\n current_node = self.head\r\n size = 0\r\n while current_node.next!=None:\r\n size += 1\r\n current_node = current_node.next\r\n return size", "def branch_length(self, u):\n ret = 0\n parent = self.parent(u)\n if parent != NULL:\n ret = self.time(parent) - self.time(u)\n return ret", "def get_height(self):\n def _get_height(node, height=None):\n if not height:\n height = self._get_level(node) + 1\n if node.left:\n height = _get_height(node.left, height+1)\n if node.right:\n height = max(height, _get_height(node.right, height+1))\n if not node.left and not node.right:\n height = self._get_level(node)\n return height\n return _get_height(self.root)", "def __len__(self):\n return 1 + sum([len(child) for child in self.children])", "def depth(self):\n left_depth = self.left.depth() if self.left is not None else 0\n right_depth = self.right.depth() if self.right is not None else 0\n return max(left_depth, right_depth) + 1", "def num_tree(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumTree(self.handle, ctypes.byref(out)))\n return out.value", "def _parse_tree_height(sent):\n children = list(sent._.children)\n if not children:\n return 0\n else:\n return max(_parse_tree_height(child) for child in children) + 1", "def qx_len(self):\n bbox = self.bounds_to_tuple(self.tree_bounds[0])\n return bbox.right - bbox.left", "def size(self):\n\t\treturn len(self.nodes)", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def treeLevel(root):\n\n if not root:\n return 0\n else:\n return 1+max(treeLevel(root.left),treeLevel(root.right))", "def get_node_size(self):\n range_start = self._node_map[self._partid - 1] if self._partid > 0 else 0\n range_end = self._node_map[self._partid]\n return range_end - range_start", "def size(self):\n if len(self.children) == 0:\n return 1\n else:\n return 1 + sum([x.size() for x in self.children])", "def ht(node):\n n = 0\n while node: n, node = n+1, node.left\n return n", "def get_n_leaves(clf):\n leaves = clf.tree_.children_left == -1\n leaves = np.arange(0,clf.tree_.node_count)[leaves]\n return len(leaves)", "def height(self):\n # TODO: Check if left child has a value and if so calculate its height\n left_height = ... if self.left is not None else -1\n # TODO: Check if right child has a value and if so calculate its height\n right_height = ... if self.right is not None else -1\n # Return one more than the greater of the left height and right height\n return 1 + max(left_height, right_height)", "def height(self) ->int:\n if self.children == []:\n return 1\n else:\n return 1 + max([c.height() for c in self.children])\n # another solution\n # return 1 + max([c.height() for c in self.children]+[0])", "def GetCount(self):\r\n\r\n if not self._anchor:\r\n # the tree is empty\r\n return 0\r\n\r\n count = self._anchor.GetChildrenCount()\r\n \r\n if not self.HasAGWFlag(TR_HIDE_ROOT):\r\n # take the root itself into account\r\n count = count + 1\r\n \r\n return count", "def __len__(self):\r\n return len(self.x) + sum(len(child) for child in self.children)", "def height(self) -> int:\n if self.root is None:\n return -1\n\n return self.height_helper(self.root)", "def fn(node):\n nonlocal ans \n if not node: return True, 0, inf, -inf # BST? | size | low | high\n ltf, lsz, llo, lhi = fn(node.left)\n rtf, rsz, rlo, rhi = fn(node.right)\n tf = ltf and rtf and lhi < node.val < rlo\n sz = 1 + lsz + rsz\n if tf: ans = max(ans, sz)\n return tf, sz, min(llo, node.val), max(rhi, node.val)", "def length(self):\n if self.head:\n count = 1\n current = self.head\n while(current.next != self.head):\n\tcount+=1\n\tcurrent = current.next\n return count\n else:\n return 0", "def size_helper(self, node: object) -> int:\n count = 1\n if node.left is not None: # Calling helper on left subtree of current node\n count += self.size_helper(node.left)\n\n if node.right is not None: # Calling helper on right subtree of current node\n count += self.size_helper(node.right)\n return count", "def height_helper(self, node: object) -> int:\n if self.leaf(node): # If current node is a leaf\n return 0\n\n if node.left is not None and node.right is None:\n return 1 + self.height_helper(node.left)\n\n if node.left is None and node.right is not None:\n return 1 + self.height_helper(node.right)\n\n # If node has two children\n if self.height_helper(node.left) > self.height_helper(node.right):\n return 1 + self.height_helper(node.left)\n\n else:\n return 1 + self.height_helper(node.right)" ]
[ "0.78509456", "0.7732251", "0.76259017", "0.7583946", "0.7449236", "0.744216", "0.7385411", "0.7341175", "0.72633183", "0.7180082", "0.7144463", "0.71312356", "0.71181273", "0.7093196", "0.7090866", "0.7089732", "0.70622516", "0.70397943", "0.701053", "0.7006638", "0.6984938", "0.6969688", "0.6948725", "0.6943857", "0.6939177", "0.6919242", "0.6905142", "0.6887441", "0.68784773", "0.68686247", "0.686641", "0.68624556", "0.67832047", "0.67540234", "0.67310774", "0.672769", "0.67148423", "0.6704011", "0.6701319", "0.66944563", "0.6686549", "0.66722435", "0.66599786", "0.66561216", "0.6650617", "0.6629448", "0.6620895", "0.6590917", "0.6573439", "0.65674657", "0.6556802", "0.6550304", "0.6532268", "0.65310884", "0.65294063", "0.6525055", "0.6521499", "0.6517222", "0.6516535", "0.65153664", "0.6504278", "0.65017235", "0.64982706", "0.64819366", "0.6474578", "0.647165", "0.6460246", "0.6442906", "0.64387965", "0.64224434", "0.63889843", "0.6376799", "0.63651407", "0.63636607", "0.6356062", "0.6351268", "0.63511086", "0.63393235", "0.63335323", "0.6330316", "0.63191843", "0.6314037", "0.6312521", "0.6311413", "0.63113976", "0.6304312", "0.6296942", "0.62948036", "0.62844837", "0.62794507", "0.6270874", "0.626842", "0.62492454", "0.6240138", "0.6232608", "0.6225841", "0.6194906", "0.61924213", "0.61873096", "0.61849165", "0.6183451" ]
0.0
-1
overload the in operator. credit.org
def __contains__(self,key): if self.recursiveLookup(key,self.root): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regular(self):", "def CL(self):", "def exo2():", "def operation(self):\n pass", "def idealOpAmp():", "def two(self):", "def __pow__(self,*args):\r\n pass", "def express(self):\n raise NotImplementedError", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def numerator(self, ???):", "def base_operator(self):\n raise NotImplementedError()", "def __pow__(self, ???):", "def c(self):\n pass", "def c(self):\n pass", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def real(self, ???):", "def __int__(self):\n pass", "def __call__(value):", "def logic(self):\r\n raise NotImplementedError", "def to_op(self):\n raise NotImplementedError", "def apply(self):", "def __add__(self, other)\n \n def __mul__(self, other):\n print(\"Standard multiplication with\", other.which)\n print(other.which)\n \n if isinstance(other, int):\n print(\"Other is int\")\n self.coefficients *= other\n \n return self\n \n else:\n print(\"Other operator is spin operator\")\n summandsNew = []\n coeffsNew = []\n for otherSummandIndex, otherSummand in enumerate(other.summands):\n for thisSummandIndex, thisSummand in enumerate(self.summands):\n summandsNew.append(flatten([thisSummand, otherSummand]))\n coeffsNew.append(self.coefficients[thisSummandIndex]*other.coefficients[otherSummandIndex])\n print(summandsNew) \n self.coeffs = coeffsNew\n self.summands = summandsNew\n \n return self", "def __call__(self, x):\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def __rpow__(self, *args, **kwargs): # real signature unknown\n pass", "def result(self) -> global___Expression:", "def a(self):\n pass", "def a(self):\n pass", "def exercise_b2_113():\r\n pass", "def degibber(self):", "def __add__(self, other):\n \"*** YOUR CODE HERE ***\"", "def prim_method(self):", "def prim_method(self):", "def reckon(self):", "def _regr_basic():", "def __rpow__(self, ???):", "def b(self):\n pass", "def b(self):\n pass", "def _as_rhs(self):\n raise NotImplementedError", "def __reduce__(self): # real signature unknown; restored from __doc__\r\n pass", "def cx():", "def __rpow__(self, other):\n pass # TODO: implement this.", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def g(self):\n return 2", "def number(self):", "def exercise_b2_53():\r\n pass", "def __call__(self, x):", "def calculate(self):", "def result(self):", "def result(self):", "def __nonzero__(*args, **kwargs):\n \n pass", "def substantiate():", "def __call__(self) -> None:", "def plus(self, a, b):\n return a + b", "def operator(self, sort):\r\n return None", "def _append_operator(self, operator):", "def aic(self, X):\n raise NotImplementedError", "def function(self):\n raise NotImplementedError", "def x(self):\n pass", "def __call__(a, b):", "def ADP (self):", "def exercise_b2_39():\r\n pass", "def exercise_b2_98():\r\n pass", "def support(self):", "def __call__(object):" ]
[ "0.6408418", "0.6184584", "0.61502206", "0.6102789", "0.60980195", "0.6068953", "0.59730977", "0.5938999", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.588855", "0.58851856", "0.58687365", "0.5855215", "0.5855215", "0.5841609", "0.5821051", "0.5761362", "0.5760488", "0.5750584", "0.5744155", "0.5691103", "0.56892616", "0.5685416", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.56813484", "0.5676538", "0.56593263", "0.56593263", "0.5645756", "0.56366605", "0.56328607", "0.56242156", "0.56242156", "0.561932", "0.559166", "0.5587221", "0.5586", "0.5586", "0.55699295", "0.55696183", "0.55693203", "0.5566365", "0.55525124", "0.55525124", "0.55525124", "0.55525124", "0.55525124", "0.5550663", "0.5550663", "0.5550663", "0.55278075", "0.55253166", "0.5517314", "0.5514939", "0.5514421", "0.5508372", "0.5508372", "0.5505007", "0.54927063", "0.54804933", "0.54800224", "0.54746145", "0.5470438", "0.546223", "0.54565144", "0.5454903", "0.54546624", "0.54396045", "0.5435355", "0.5435269", "0.54269415", "0.54250073" ]
0.0
-1
internal function returns length
def __len__(self): return self.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__():", "def __len__():", "def __len__():", "def Length(self) -> int:", "def Length(self) -> int:", "def length(self):\n ...", "def size(self) -> int:", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def total_length():\n return", "def length(self):\n pass", "def __len__(self):\n return self.__length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def Length(data):\n return len(data)", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self) -> int:", "def __len__(self) -> int:", "def __len__(self) -> int:\n return len(self.length)", "def __len__(self):\n return self._length # pylint: disable = E1101", "def size() -> int:\n ...", "def __len__(self) -> int:\n return len(self.getvalue())", "def __len__(self) -> int:\n return self.length", "def __len__(self):\n\t\treturn self._size", "def __len__(self):\n ## return 1 + len(self.rest)\n length = 1\n rest = self.rest\n while rest:\n length , rest = length + 1, rest.rest\n return length", "def __len__(self) -> int:\n return self._length", "def __len__(self):\n\t\treturn self.len", "def __len__(self):\n return self._length", "def length(self):\n\t\treturn self.n", "def __len__(self):\n\n return self.length", "def __len__(self):\n # type: () -> int\n return len(self.data)", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def length(self) -> int:\n pass", "def __len__(self) -> int:\n return self._len", "def len (self):\n\t\treturn len (self.data)", "def __len__(self) -> int:\n raise NotImplementedError", "def __len__(self):\n return self.lengths[0]", "def length(self):\n return self.count", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self):\r\n return self.len", "def __len__(self):\r\n return self.len", "def __len__(self):\r\n return self.len", "def __len__(self):\r\n return self.len", "def __len__(self):\r\n return self.len", "def __len__(self):\n return self._len", "def __len__(self): # pragma: no cover\n return self.size()", "def __len__(self):\r\n return self._size", "def __len__(self):\r\n return self._size", "def __len__(self):\n\t\treturn self.n", "def __len__(self):\n return self.__size", "def __len__(self):\n return self._count()", "def getLength(self):\n return self.count", "def __len__(self):\n return sum(len(p) for p in self.parts)", "def __len__(self):\n return self._n", "def len(self):\n return self.__len__()", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\r\n return self.size", "def __len__(self):\n len(self.data)", "def __len__(self):\n return self.len", "def __len__(self):\n return self.len", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size" ]
[ "0.8433408", "0.8433408", "0.8433408", "0.8313579", "0.8313579", "0.8182767", "0.8016835", "0.79593396", "0.793603", "0.789175", "0.7891721", "0.7867253", "0.7867253", "0.7867253", "0.7857477", "0.78541964", "0.78541964", "0.78541964", "0.78541964", "0.78541964", "0.78541964", "0.7846054", "0.7846054", "0.7830035", "0.7826139", "0.78172076", "0.7812411", "0.7811912", "0.7801219", "0.7798588", "0.7768327", "0.7753802", "0.7753206", "0.77523595", "0.7741968", "0.77391887", "0.7732519", "0.7732519", "0.7732519", "0.77276564", "0.7725013", "0.7702799", "0.770067", "0.76998985", "0.76746744", "0.76745313", "0.76745313", "0.76745313", "0.7669406", "0.7669406", "0.7669406", "0.76574", "0.76574", "0.76574", "0.76574", "0.76574", "0.76567376", "0.76525074", "0.763144", "0.763144", "0.76301384", "0.76083857", "0.7605993", "0.7604674", "0.760155", "0.7601167", "0.7591211", "0.7585691", "0.7585691", "0.7582689", "0.7578892", "0.7574979", "0.7574979", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554", "0.7562554" ]
0.0
-1
Allows to override how we insert things
def __setitem__(self,k,v): self.insert(k,v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self):\n pass", "def before_insert(self, obj, st):\n pass", "def on_insert(self) -> None:", "def insert(self, data):\r\n pass", "def after_insert(self, obj, st):\n pass", "def insert_data(self):\n\n pass", "def _insert_op(self, op):", "def DocumentElementInsertBefore(self):\n raise NotImplementedError()", "def insert(self, sample, *args):\n raise NotImplementedError", "def insert_values():\n pass", "def insert(self, index, p_object): # real signature unknown; restored from __doc__\n pass", "def insert(self, i, x) -> None:\n pass", "def insert(self, item: T) -> None:\n pass", "def insert(self, product):\n pass", "def DocumentElementInsertAfter(self):\n raise NotImplementedError()", "def insertItem(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def insert(self, *args):\n return _libsbml.ListOf_insert(self, *args)", "def insertRow(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def insertAndOwn(self, *args):\n return _libsbml.ListOf_insertAndOwn(self, *args)", "def insert_index(self):\n pass", "def _create_placeholders(self):\n raise NotImplementedError", "def insert(self, rule, ident):\n raise NotImplementedError", "def do_insert(self,args):\n if len(args) != 0:\n for w in args.split():\n sl.insertList(int(w.rstrip()))", "def insert(self, index: int, item: Any) -> BaseList:\n super().insert(index, item)\n return self", "def insertRows(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def inserted(self):\n return True", "def insert(self, item: T, index: int) -> None:\n pass", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def insert(self, *a):\r\n return self.stack.insert(*a)", "def augment(self, *args, **kwargs):\n pass", "def InsertUnderLast(self, *args, **kwargs):\n pass", "async def _insert_stmt(self):\n raise NotImplementedError", "def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)", "def insertChild(self):\n # insert at position 3 as first is heading and next two nodes have some info \n # from pos 3 the releaselog entry starts\n self.entry.content.html.get_elements('table')[0].get_elements('tbody')[0].get_elements('tr')[0].get_elements('td')[0].get_elements()[0].children.insert(3, self.new_xml)", "def help_insert(self):\n print(INSERT)", "def testInsert(self):\n\n for i in xrange(randint(50,150)):\n self.s.insert(i, None)", "def insert(self, index, chars, *args):\n self.config(state=NORMAL)\n Text.insert(self,index, chars, args)\n self.config(state=DISABLED)", "def on_insert(self, event):\n bolinserted, row_before, (fldname, fldtype) = self.insert_before()\n if bolinserted:\n ## should be only change\n self.add_new_to_settings(row_before, fldname, fldtype)\n self.update_demo()\n self.tabentry.grid.SetFocus()\n event.Skip()", "def insert(self, *args, **kwargs):\n return _image.image_insert(self, *args, **kwargs)", "def insert(self, index, *elements, **kw):\n # add support for tagged_text on input!\n if PyVers_f >= 3.4:\n index_i = super().index(index)\n super().insert(index, *elements)\n lb_elements = super().get(index_i, index_i + len(elements) - 1)\n for x, elem in enumerate(lb_elements, index_i):\n _, attrs, text = split_chunk(elem)\n if attrs:\n opts, _, case = parse_tag_attrs(attrs, {}, {}, **kw)\n if case:\n text = getattr(text, case)()\n super().insert(x, text)\n super().itemconfig(x, **opts)\n super().delete(x + 1)\n else:\n # bug in earlier Py versions causes above to fail on 1st elem\n elems_to_process = elements[:: 1 if index == tk.END else -1]\n for elem in elems_to_process: # elements[::-1]:\n if type(elem) in (list, tuple):\n elem1 = [\n e.replace(\"{\", r\"\\{\").replace(\"}\", r\"\\}\") for e in elem\n ]\n elem2 = [\"{%s}\" % e if \" \" in e else e for e in elem1]\n elem = \" \".join(elem2)\n _, attrs, text = split_chunk(elem)\n if attrs:\n opts, _, case = parse_tag_attrs(attrs, {}, {}, **kw)\n if case:\n text = getattr(text, case)()\n super().insert(index, text)\n super().itemconfig(index, **opts)\n else:\n super().insert(index, text)", "def insert(self, new):\n return self.replace(None, new)", "def _insert_table_row(self, db: str, table: str, row: Dict[str, Any]):\n pass", "def insert(cls, data):\n return super(notification, cls).insert(data)", "def insert(self, e): \r\n if not e in self.vals:\r\n self.vals.append(e)", "def insert(self, element):\n self.line.append(element)", "def InsertNextPoint(self, ):\n ...", "def insertByHand(self):\n\n fieldValues = []\n for field in self.fieldNames:\n fieldValues.append(raw_input(\"Give \" + field + \": \"))\n\n print(self.tableName + \".insert(\" + str(fieldValues) + \")\")\n\n self.insert(fieldValues)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insert(self, *args):\n return _ida_hexrays.hexwarns_t_insert(self, *args)", "def __setitem__(self, key, value):\n self.insert(key, value)", "def insert(self, index: int, item: Any) -> None:\n self.contents.insert(index, item)\n return", "def _insert_tag_thing(cur, tag_id, thing_id):\n cur.execute(dbq.INSERT_TAG_THING, [tag_id, thing_id])\n logger.debug(\"Linked tag_id '{}' and thing_id '{}'\".format(tag_id, thing_id))", "def add_event(self, event):\r\n return super().insert_event(event)", "def on_insert(self, callback):\n self._insert_callback = callback if callable(callback) else _void", "def add(self, item):", "def insert(self, e):\n if e not in self.vals:\n self.vals.append(e)", "def insert(self, b):\n self.liste.append(b)", "def onInsert(self):\n self.mainWindow.insert()", "def InsertPoint(self, p_int, ):\n ...", "def prepend(self, item: Any):\n self.insert(0, item)", "def __init__(self):\r\n super(AppendNode, self).__init__()", "def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')", "def insert(self, indexes: Tuple[int, ...], tree: 'Tree') -> None:\n ...", "def insert(self):\n #vim.command(\"autocmd! CursorMovedI *\")\n try:\n placeholder = self.placeholders.pop()\n pos = self.findPlaceholder(placeholder)\n except IndexError:\n #TODO here I could do a findAllPlaceHolders on the complete file, for\n #reducing errors!\n pos = (0,0,0)\n if pos !=(0,0,0):\n line = self.buffer[pos[0]]\n new_line = line[:pos[1]] + \"\" + line[pos[1]+pos[2]:]\n cursor = (pos[0]+1, pos[1])\n vim.current.window.cursor = cursor\n vim.command(\"startinsert\")\n vim.command(\"redraw\")\n self.buffer[pos[0]] = new_line\n yield\n self.templateMode = False\n return", "def insert_callback(self, chain, value):", "def insertChild(self, *args):\n return _libsbml.ASTBasePlugin_insertChild(self, *args)", "def insertJoint(*args, **kwargs)->AnyStr:\n pass", "def prepend_inst(self, inst):\n inst.basic_block = self\n self.insts = [inst] + self.insts", "def insert(self, *args):\n self.insert_count += 1\n self.total_ops += 1\n return super(BulkOperator, self).insert(*args)", "def insert(self, word: str) -> None:\n # Inserting element into the list using append().\n self.mylist.append(word)", "def insert_parts(self, parts):\r\n self.board.insert_parts(parts)\r\n self.set_changed(parts)", "def append(self, item: T) -> None:\n self.insert(item)", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def _insert(self, key):\n self.tree.insert(key)", "def insert(self, pid, pname, pparent, pobj, ptype):\r\n self.pids.append(pid)\r\n self.pnames.append(pname)\r\n self.pparents.append(pparent)\r\n self.ptypes.append(ptype)\r\n self.pobjs.append(pobj)", "def ExtraInfo(self) -> object:", "def add_before ( self ):\n self.add_item( 0 )", "def insertFunctionRun(self):\n\t\treturn", "def insert(self, word):\n now = self.tree\n for i in word:\n now[i] = now.setdefault(i,{})\n now = now[i]\n now['end']=True", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def _hook(self):", "def extend(self, other):\n # YOUR CODE HERE\n raise NotImplementedError()", "def wrapup(self):\n pass", "def insert_arg(self, factory, arg, *args, **kwargs):\n args = (arg,) + args\n try:\n spec = self._by_factory[factory]\n except KeyError:\n spec = self._by_factory[factory] = HandlerSpec(factory)\n\n if spec.args is None:\n spec.args = []\n spec.args.insert(0, (args, kwargs))", "def insert_before(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx, self)", "def append(self, *args, **kwargs): # real signature unknown\n pass", "def insert(self, point, extra_data):\n hvs = self.input_to_hash(self.hash(point))\n for i, table in enumerate(self.hash_tables):\n hv = hvs[i]\n if hv not in table:\n table[hv] = []\n table[hv].append(extra_data)", "def insertData(self, itemData):\n for key, value in itemData.items():\n setattr(self, key, value)\n\n self.addIdentifiers()\n self.addLinks()\n self.addMeasurements()\n self.addDates()\n self.addRights()\n self.addAgents()\n\n logger.info('Inserted item {}'.format(self))", "def insert_element(new_cloth, index=0):\n global clothes\n clothes.insert(index, new_cloth)\n print (clothes)", "def insert_placement(self, insert_placement):\n\n self._insert_placement = insert_placement", "def insert(self, e):\n if not e in self.vals:\n self.vals.append(e)", "def _insert_callback(self, _=None):\n try:\n value = ntutils.type_cast(self.insert_entry.get(), self._items[self._curr_indices[0]].getType())\n for idx in self._curr_indices:\n ntutils.set_entry_by_type(self._items[idx], value)\n # Reload when done to assure the new data appears in the listboxes.\n self.reload()\n except ValueError as e:\n # If an error occurred while trying to cast the given string to the type expected by the selected entries,\n # create a popup window telling the user of the error.\n popup.dialog(\"Type Error\", \"Error: {}\".format(str(e)), popup.BUTTONS_OK,\n frame_style=self._style, message_style=self._label_style, button_style=self._button_style)", "def do_insert(self, text):\n args = text.split()\n if len(args) == 2:\n try:\n pos = int(args[0])\n value = int(args[1])\n self.list.insert(pos, value)\n print(self.list, sep=', ')\n except ValueError:\n print('Error: invalid literal.')\n except IndexError:\n print('Error: invalid position.')\n else:\n print('Error: insert takes two parameters.')", "def add(self, *items):", "def add_extra_args(self):\n pass", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)" ]
[ "0.7783864", "0.7460584", "0.7289235", "0.71610194", "0.7091829", "0.6844495", "0.6827948", "0.6589303", "0.65811783", "0.6558969", "0.65067995", "0.63486093", "0.6267669", "0.62151104", "0.6206384", "0.6192331", "0.6132293", "0.60734904", "0.604549", "0.60312724", "0.6014594", "0.60078335", "0.59823555", "0.5944418", "0.5928798", "0.59052885", "0.5902251", "0.59013087", "0.5868102", "0.5835796", "0.5825911", "0.5819144", "0.58067787", "0.5799743", "0.57714105", "0.5759704", "0.57524204", "0.57524186", "0.5745946", "0.57134736", "0.57100296", "0.570522", "0.5697838", "0.5694841", "0.5665461", "0.56580454", "0.5635624", "0.5635413", "0.5635413", "0.56176645", "0.5616912", "0.5594926", "0.55920255", "0.55763566", "0.55700964", "0.5566334", "0.5551503", "0.55451924", "0.553293", "0.55291206", "0.55278224", "0.55275196", "0.5525478", "0.552119", "0.5509188", "0.55047816", "0.5504176", "0.55020523", "0.5497834", "0.54926246", "0.5491617", "0.54826033", "0.5478522", "0.5475569", "0.5475569", "0.5475569", "0.5475569", "0.54642236", "0.5454356", "0.54433835", "0.5443111", "0.5443003", "0.54369473", "0.54346144", "0.54288787", "0.54255897", "0.54224205", "0.5421463", "0.5420624", "0.5418301", "0.5414254", "0.5408092", "0.5405126", "0.5395632", "0.539518", "0.53950113", "0.5381545", "0.53757405", "0.53756946", "0.5370713" ]
0.5370425
100
This function will insert data into the BST using a log_2(n) algorithm
def insert(self, key, data): debug.printMsg('Insert for "' + key + '" With data: ' + str(data) ) # if there is no root node if not self.root: debug.printMsg("No root was found, create one") self.root = Node(key, data) else: debug.printMsg("Root was found, starting recursive insert") self.recursiveInsert(key, data, self.root) # increment the size of the BST debug.printMsg("Incrementing size of BST") self.size = self.size + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bst_insert(root, data):\n if root is None:\n root = Tree(d=data)\n elif data > root.data:\n root.right = bst_insert(root.right, data)\n else:\n root.left = bst_insert(root.left, data)\n return root", "def _recursive_insert(self, data, node):\n\n\t\t#NOTE - this line prevents inserting duplicate data into the tree\n\t\t#another alternative is to create a list at each node holding the duplicates\n\t\tif (data == node.value()):\n\t\t\treturn\n\n\t\tif (data < node.value()):\n\t\t\tif node.lchild():\n\t\t\t\tself._recursive_insert(data, node.lchild())\n\t\t\telse:\n\t\t\t\tnode.setlchild(BTNode(value=data, parent=node, depth=node.depth() + 1))\n\t\telse:\n\t\t\tif node.rchild():\n\t\t\t\tself._recursive_insert(data, node.rchild())\n\t\t\telse:\n\t\t\t\tnode.setrchild(BTNode(value=data, parent=node, depth=node.depth() + 1))", "def insert(self, data):\n if self.isEmpty():\n self.root = BinarySearchTree(data)\n return\n\n if self.data < data:\n if self.right == None:\n self.right = BinarySearchTree(data)\n self.right.parentNode = self\n else:\n self.right.insert(data)\n else :\n if self.left == None:\n self.left = BinarySearchTree(data)\n self.left.parentNode = self\n\n else:\n self.left.insert(data)", "def bst_insert(sizes):\n tree = rbTree_main.BinarySearchTree();\n for i in range(sizes):\n tree.insert(random.random())", "def insert(self, data):\n \n def _find_parent(current, node):\n \"\"\"Recursively descend through the tree to find the node that\n should be the parent of the new node. Do not allow for duplicates.\n \"\"\"\n \n if node == current:\n raise ValueError(str(node.data) + \" is already in the tree.\")\n if node < current: # Travel left\n if current.left:\n return _find_parent(current.left,node)\n else:\n return current\n else: # Travel right\n if current.right:\n return _find_parent(current.right,node)\n else:\n return current\n \n n = KDTNode(data) # Make a new node\n if len(data) != self.k:\n raise ValueError(\"data must be of length \" + str(self.k))\n if not self.root:\n self.root = n # Case 1: empty tree\n n.axis = 0\n else: # Case 2: use _find_parent\n parent = _find_parent(self.root, n) # Get the parent\n if n < parent: parent.left = n # Insert the node\n else: parent.right = n\n n.prev = parent # Double link\n n.axis = (n.prev.axis + 1) % self.k\n return n", "def _insert(self, data, cur_node):\n if data < cur_node.data:\n if cur_node.left_child == None:\n cur_node.left_child = AVLNode(data)\n cur_node.left_child.parent=cur_node # set parent\n self._check_balance(cur_node.left_child)\n else:\n self._insert(data, cur_node.left_child)\n elif data > cur_node.data:\n if cur_node.right_child == None:\n cur_node.right_child = AVLNode(data)\n cur_node.right_child.parent = cur_node # set parent\n self._check_balance(cur_node.right_child)\n else:\n self._insert(data,cur_node.right_child)\n # else:\n # print(\"data already in tree!\")", "def _insert(self, node, root):\n if not root:\n root = node\n elif node.key < root.key:\n root.left = self._insert(node, root.left)\n if root.right and (root.left.height - root.right.height == 2):\n # Inserted node on the left side, check if left side is larger by 2\n # this is not allowed\n # at most 1 difference\n if node.key < root.left.key:\n root = self.rotate_with_left_child(root)\n else:\n root = self.double_with_left_child(root)\n # It's in wrong position, put it on the right\n elif node.key > root.key:\n root.right = self._insert(node, root.right)\n if root.left and (root.right.height - root.left.height == 2):\n # Inserted node on the right side, check if right side larger by 2\n # not allowed\n # max 1 difference\n if node.key > root.right.key:\n root = self.rotate_with_right_child(root)\n else:\n root = self.double_with_right_child(root)\n # It's in wrong position, put it on the left\n\n root.height = max(root.left.height if root.left else -1, root.right.height if root.right else -1) + 1\n # get root height, left or right subtree height + 1, depending which is greater\n return root", "def insert(self, data):\n if type(data) != np.ndarray or np.shape(data) != (len(data),):\n raise ValueError(\"The data type is incorrect\")\n \n if self.root == None:\n new_node = KDTNode(data) #Sets the root if it is empty.\n new_node.pivot = 0\n self.root = new_node\n self.k = len(data)\n return\n def my_step(data, current_node, parent_level):\n if current_node == None:\n current_node = KDTNode(data)\n if (parent_level) == len(data) -1:\n current_node.pivot = 0\n else:\n current_node.pivot = parent_level+1\n return(current_node)\n elif current_node.value[current_node.pivot] == data[current_node.pivot]:\n raise ValueError('The value is already in the tree.') #Traverses down the tree until either there is an empty spot, or the value is already there. If so, an error is raised.\n elif data[current_node.pivot] < current_node.value[current_node.pivot]:\n current_node.left=my_step(data, current_node.left, current_node.pivot)\n current_node.left.prev = current_node\n return(current_node)\n elif data[current_node.pivot] > current_node.value[current_node.pivot]:\n current_node.right=my_step(data, current_node.right, current_node.pivot)\n current_node.right.prev = current_node\n return(current_node)\n my_step(data, self.root, self.root.pivot)\n return\n \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def insert(self, data):\n if data < self.data:\n if self.left is None:\n self.left = Node(data, self)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data, self)\n else:\n self.right.insert(data)", "def insert(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data", "def create_bst(self, a, left, right):\n if left > right:\n return\n mid = (left + right) / 2\n self.insert(a[mid])\n self.create_bst(a, left, mid - 1)\n self.create_bst(a, mid + 1, right)", "def insert(self, key):\r\n if self.root.num_keys() == self.max_num_keys:\r\n self.root = Node([], [self.root])\r\n self.root.split_child(0)\r\n\r\n node = self.root \r\n while not node.is_leaf():\r\n index = node.search(key)\r\n\r\n child = node.children[index]\r\n if child.num_keys() == self.max_num_keys:\r\n node.split_child(index)\r\n\r\n if node.keys[index] < key:\r\n index += 1\r\n\r\n node = node.children[index] \r\n\r\n node.insert(key)", "def insert(self, value):\n\n\n if value < self.data:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BinaryNode(value)\n\n elif value > self.data:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BinaryNode(value)\n\n else:\n self.data = self.data", "def __insert_tree(self, t):\n\t\tif not t:\n\t\t\treturn\n\t\tif t.value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = t\n\t\t\telse:\n\t\t\t\tself.right.__insert_tree(t)\n\t\telif t.value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = t\n\t\t\telse:\n\t\t\t\tself.left.__insert_tree(t)", "def insert(self, data):\n def find_parent(current):\n \"\"\"Recursively step through the tree until finding the node\n that should be the parent of node to be inserted.\n If there is no such node, raise a ValueError.\n \"\"\"\n if np.allclose(data, current.value):\n raise ValueError(\"Duplicate data, cannot insert!!!\")\n elif data[current.pivot] < current.value[current.pivot]:\n if current.left is None:\n # Base case 1 Found parent! Child goes to the left\n if current.pivot == self.k - 1:\n #If pivot is at end of dimension, start over at 0\n new_node.pivot = 0\n else:\n new_node.pivot = current.pivot + 1\n #Set parents left child to the new node\n current.left = new_node\n else:\n return find_parent(current.left) # Recursively search left.\n else:\n if current.right is None:\n # Base case 2 Found Parent! Child goes to right\n if current.pivot == self.k - 1:\n #If pivot is at end of dimension, start over at 0\n new_node.pivot = 0\n else:\n new_node.pivot = current.pivot + 1\n #Set parents right child to the new node\n current.right = new_node\n else:\n return find_parent(current.right) # Recursively search right.\n new_node = KDTNode(data)\n if self.root == None:\n new_node.pivot = 0\n self.root = new_node\n self.k = len(data)\n elif len(data) != self.k:\n raise ValueError(\"Data is not k-dimensional!!!\")\n else:\n find_parent(self.root)", "def _insert(self, data: int, node: _Node) -> NoReturn:\n if data == node.data:\n raise ValueError(\"Duplicate value\")\n elif data < node.data:\n if node.left != None:\n self._insert(data=data, node=node.left)\n else:\n node.left = _Node(data=data)\n elif data > node.data:\n if node.right != None:\n self._insert(data=data, node=node.right)\n else:\n node.right = _Node(data=data)", "def _insert(self, root: TreeNode, node: TreeNode):\n if root is None:\n return # Could simply return/\"rebound\" the node parameter up the stack and assign where needed, or return\n\n if node.key < root.key: # First check to determine direction: left\n if root.left is None: # Second check to check if a left child doesn't exist\n root.left = node # If it doesn't simply assign\n else:\n self._insert(root.left, node) # Else, simply recur left\n\n elif node.key > root.key: # Similar for the right subtree\n if root.right is None:\n root.right = node\n else:\n self._insert(root.right, node)", "def _insert(self, key: int) -> TreeNode:\n node = self.root\n while True:\n # Check if a key is greater than node.\n if key > node.val:\n if not node.right:\n # node.right is a leaf\n node.right = TreeNode(val=key)\n node.right.parent = node\n return node\n node = node.right\n elif key < node.val:\n if not node.left:\n # node.left is a leaf\n node.left = TreeNode(val=key)\n node.left.parent = node\n return node\n node = node.left\n else:\n # print(f\"{key}: already in a Tree.\")\n return", "def _insert(self, value, cur_node):\n if value < cur_node.value:\n if cur_node.left_child == None:\n cur_node.left_child = Node(value)\n else: \n self._insert(value, cur_node.left_child)\n elif value > cur_node.value: #creating elif in case the value is same as the current node \n if cur_node.right_child == None:\n cur_node.right_child = Node(value)\n else:\n self._insert(value, cur_node.right_child)\n else:\n print(\"Value already in the tree\")", "def insert(self, node , hx, data):\n #if tree is empty , return a root node\n if node is None:\n self.node_count += 1\n return self.create_node(hx, data)\n if data <= node.data:\n node.left = self.insert(node.left, hx, data)\n elif data > node.data:\n node.right = self.insert(node.right, hx, data)\n\n return node", "def insert(self, value):\n\t\tif value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = BSTreeNode(value, parent=self)\n\t\t\telse:\n\t\t\t\tself.right.insert(value)\n\t\telif value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = BSTreeNode(value, parent=self)\n\t\t\telse:\n\t\t\t\tself.left.insert(value)\n\t\tself.check_balance()", "def insertLeaf(T,i):\r\n T.data.append(i) \r\n T.data.sort(key=lambda x: x.word)", "def insert(self, value):\n if value < self.value:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BSTNode(value)\n else:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BSTNode(value)", "def insert(self, val):\n\n\t\tif not self.root:\n\t\t\tself.root = BinaryTreeNode(val)\n\n\t\telse:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif not node.left:\n\t\t\t\t\tnode.left = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.left)\n\n\t\t\t\tif not node.right:\n\t\t\t\t\tnode.right = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.right)\n\n\t\tself.numNodes += 1", "def _insert_in_tree(self, k: str, current_node: str) -> int:\n dist_current_node = self.distance_function(\n self.hash_dict[k], self.dict_all[current_node].node_value\n )\n condition_insert_current_node_child = (\n not self.dict_all[current_node].children\n ) or (\n dist_current_node not in list(self.dict_all[current_node].children.values())\n )\n if condition_insert_current_node_child:\n self.dict_all[current_node].children[k] = dist_current_node\n self.dict_all[k] = BkTreeNode(\n k, self.hash_dict[k], parent_name=current_node\n )\n else:\n for i, val in self.dict_all[current_node].children.items():\n if val == dist_current_node:\n node_to_add_to = i\n break\n self._insert_in_tree(k, node_to_add_to)\n return 0", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = none\n else:\n self.right.insert(node)", "def insert(self, data):\n\n\t\tif (self.treetype() and type(data) != self.treetype()):\n\t\t\traise TypeError(str(type(data)) + \" is invalid for this tree.\")\n\n\t\tself._size += 1\n\n\t\tif (not self._root):\n\t\t\tself._root = BTNode(value=data, depth=1)\n\t\t\treturn\n\n\t\tself._recursive_insert(data, self._root)\n\t\treturn", "def testInsertDeep(self):\n\n #insert\n for i in xrange(randint(50, 180)):\n self.s.insert(randint(-2147483648,2147483647), i)\n\n #walk through the tree\n self.assertIsNotNone(self.s._root)\n self.assertIsNone(self.s._root.parent)\n self.assertIsNotNone(self.s._root.left)\n self.assertIsNotNone(self.s._root.right)\n\n def traversalHelper(n):\n if not n:\n return\n self.assertTrue((n.parent.left is n) or (n.parent.right is n))\n traversalHelper(n.left)\n traversalHelper(n.right)\n\n traversalHelper(self.s._root.left)\n traversalHelper(self.s._root.right)", "def insert(self, key):\n # Create new node\n n = TreeNode(key)\n if not self.node:\n self.node = n\n self.node.left = AvlTree()\n self.node.right = AvlTree()\n elif key < self.node.val:\n self.node.left.insert(key)\n elif key > self.node.val:\n self.node.right.insert(key)\n self.re_balance()", "def insertInternal(T,i):\r\n if T.isLeaf:\r\n insertLeaf(T,i)\r\n else:\r\n k = findChildA(T,i) \r\n if isFull(T.child[k]):\r\n m, l, r = split(T.child[k])\r\n T.data.insert(k,m) \r\n T.child[k] = l\r\n T.child.insert(k+1,r) \r\n k = findChildA(T,i) \r\n insertInternal(T.child[k],i)", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insert(self, item):\n # First, find the point of insertion.\n parent, current = None, self.root\n while current is not None and current.item != item:\n if item < current.item:\n parent, current = current, current.left\n else: # item > current.item\n parent, current = current, current.right\n # Next, check if item needs to be inserted.\n if current is None:\n # Create a new node and link it into the tree at the right place.\n current = _BSTNode(item)\n if parent is None:\n self.root = current\n elif item < parent.item:\n parent.left = current\n else: # item > parent.item\n parent.right = current\n # else do nothing: item is already in this BST.", "def insert(self, key, content, root=None):\n\n if root is None:\n root = self.root\n\n if self.size == 0:\n self.root = Node(content, key)\n self.allNodes.append(self.root)\n self.size += 1\n return\n\n elif key >= root.key:\n if root.right is None:\n root.right = Node(content, key)\n root.right.parent = root\n self.allNodes.append(root.right)\n self.size += 1\n return\n else:\n self.insert(key, content, root.right)\n return\n\n else:\n if root.left is None:\n root.left = Node(content, key)\n root.left.parent = root\n self.allNodes.append(root.left)\n self.size += 1\n\n return\n\n else:\n self.insert(key, content, root.left)\n return", "def recursiveInsert(self, key, data, curr):\n debug.printMsg(\"Entered recursiveInsert\")\n # check if the key is greater than current node key\n # we will go right\n debug.printMsg(\"checking whether we go right or left\")\n if key > curr.key:\n debug.printMsg(\"we go right\")\n # now check if there is a right node already\n debug.printMsg(\"checking if we have available space\")\n if curr.hasRightChild():\n debug.printMsg(\"nope, calling recursiveInsert again\")\n # well, we're shit out of luck and need to go further\n self.recursiveInsert(key, data, curr.right)\n else:\n debug.printMsg(\"yep, we'll insert it here\")\n # we found an empty spot\n curr.right = Node(key, data, curr)\n else:\n debug.printMsg(\"we go left\")\n # now check if there is a left node already\n if curr.hasLeftChild():\n debug.printMsg(\"checking if we have available space\")\n # well, we're shit out of luck and need to go further\n self.recursiveInsert(key, data, curr.left)\n else:\n # we found an empty spot\n debug.printMsg(\"yep, we'll insert it here\")\n curr.left = Node(key, data, curr)", "def _insert(self, key):\n if self.min > key:\n self.min = key\n if self.max < key:\n self.max = key\n if key == self.key:\n return self\n self.size += 1\n if key < self.key:\n if self.left is None:\n self.left = self._create_new(key)\n self.left.parent = self\n return self\n self.left = self.left._insert(key)\n else:\n if self.right is None:\n self.right = self._create_new(key)\n self.right.parent = self\n return self\n self.right = self.right._insert(key)\n return self", "def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1", "def insert(self, value):\n\t\tif value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = BSTreeNode(value)\n\t\t\telse:\n\t\t\t\tself.right.insert(value)\n\t\telif value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = BSTreeNode(value)\n\t\t\telse:\n\t\t\t\tself.left.insert(value)", "def test_insert_adds_value_to_tree(bst_balanced):\n bst_balanced.insert(15)\n assert bst_balanced.contains(15) is True\n assert bst_balanced.search(15).val == 15", "def insert(root: Node, key: int) -> Node:\n node = Node(key)\n l, r = split(root, key)\n return merge(merge(l, node), r)", "def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6", "def insert(root, key, value=None):\n if root is None:\n root = Node(key, value)\n else:\n if key >= root.key:\n if root.right is None:\n root.right = Node(key, value)\n else:\n # Use root.right as the root of the subtree\n insert(root.right, key, value)\n else:\n if root.left is None:\n root.left = Node(key, value)\n else:\n # Use root.left as the root of the subtree\n insert(root.left, key, value)", "def insert(self, value):\n new_node = Node(value)\n if self.root is None:\n self.root = new_node\n else:\n node = self.root\n while(node!=None):\n if(value <= node.data):\n if node.left is None:\n node.left = new_node\n node = node.left\n node = node.left\n elif(value > node.data):\n if node.right is None:\n node.right = new_node\n node = node.right\n node = node.right", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n # Update the min of this node if the inserted node has a smaller key.\n if node.key < self.min.key:\n self.min = node\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insert(self, item):\n insert_location = self.__find(item)\n if insert_location is None: #No root\n self.root = Node(item, None)\n elif item < insert_location.item:\n insert_location.left_child = Node(item, insert_location)\n else: # it should be that item >= insert_location.item\n insert_location.right_child = Node(item, insert_location)", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n # Updates the min of this node if the inserted node has a smaller\n # key.\n if node.key < self.min.key:\n self.min = node\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insert(self, key, value=None):\n if isinstance(key, list):\n for k in key:\n self.insert(k)\n else:\n if key == self.key:\n # update key: value\n self.value = value\n elif key < self.key:\n if self.left == None:\n self.left = Tree(key, value)\n else:\n self.left.insert(key, value)\n else:\n if self.right == None:\n self.right = Tree(key, value)\n else:\n self.right.insert(key, value)", "def insert(self, key, value):\n\n if None == self.root:\n self.root = BSTNode(key,value)\n return True\n current_node = self.root\n while current_node:\n if key == current_node.key:\n print(\"The key does exist!\")\n return False\n elif key < current_node.key:\n if current_node.left:\n current_node = current_node.left\n else:\n current_node.left = BSTNode(key, value, current_node)\n return True\n else:\n if current_node.right:\n current_node = current_node.right\n else:\n current_node.right = BSTNode(key,value,current_node)\n return True", "def Insert(root, node):\n target = root.ChooseLeaf(node)\n node.father = target\n target.leaves.append(node)\n target.MBR = merge(target.MBR, node.MBR)\n target.AdjustTree()\n if root.father != None:\n root = root.father\n return root", "def test_insert_WithDuplicates(self):\n\n self.bst.insert(10,1)\n self.bst.insert(10,2)\n \n self.bst.insert(5,2)\n \n self.bst.insert(20,3)\n self.bst.insert(20,4)\n \n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.bst.insert(5,123)\n self.bst.insert(14,456)\n\n self.assertEqual(self.bst.root.key, 10)\n self.assertEqual(self.bst.root.value, [1,2])\n\n # left subtree\n self.assertEqual(self.bst.root.left.key, 5)\n self.assertEqual(self.bst.root.left.value, [2,123])\n\n self.assertEqual(self.bst.root.left.left.key, 3)\n self.assertEqual(self.bst.root.left.left.value, [4])\n\n self.assertEqual(self.bst.root.left.right.key, 7)\n self.assertEqual(self.bst.root.left.right.value, [5])\n\n # right subtree\n self.assertEqual(self.bst.root.right.key, 20)\n self.assertEqual(self.bst.root.right.value, [3,4])\n\n self.assertEqual(self.bst.root.right.left.key, 15)\n self.assertEqual(self.bst.root.right.left.value, [6])\n\n self.assertEqual(self.bst.root.right.left.left.key, 14)\n self.assertEqual(self.bst.root.right.left.left.value, [7,456])\n\n self.assertEqual(self.bst.root.right.right.key, 25)\n self.assertEqual(self.bst.root.right.right.value, [8])", "def insert(self, root: TreeNode, item: int):\n if not root:\n return TreeNode(item)\n if item < root.value:\n root.left = self.insert(root.left, item)\n else:\n root.right = self.insert(root.right, item)\n return root", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def insert(self, value):\n i = 0\n n = len(self._tree)\n while i < n:\n cur = self._tree[i]\n self._counts[i] += 1\n if value < cur:\n i = 2 * i + 1\n elif value > cur:\n i = 2 * i + 2\n else:\n return\n raise ValueError(\"Value %s not contained in tree.\" \"Also, the counts are now messed up.\" % value)", "def insertBottom(self, value):\n if(self.left != None):\n self.left.insertBottom(value)\n if(self.right != None):\n self.right.insertBottom(value)\n if(self.right == None and self.left == None):\n newNode = BinaryTree(value)\n if random.random() > 0.5:\n self.right = newNode\n newNode.parent = self\n else:\n self.left = newNode\n newNode.parent = self", "def insert(self, item, key):\n if self.key == key:\n self.item = item\n elif self.key < key:\n if self.right:\n self.right.insert(item, key)\n else:\n self.right = BSTreeNode(item, key)\n else:\n if self.left:\n self.left.insert(item, key)\n else:\n self.left = BSTreeNode(item, key)\n # Replace by correct code\n pass", "def insert(self, value):\n i = 0\n n = len(self._tree)\n while i < n:\n cur = self._tree[i]\n self._counts[i] += 1\n if value < cur:\n i = 2 * i + 1\n elif value > cur:\n i = 2 * i + 2\n else:\n return\n raise ValueError(\"Value %s not contained in tree.\"\n \"Also, the counts are now messed up.\" % value)", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def insertElement(T,i):\r\n if not isFull(T):\r\n insertInternal(T,i)\r\n else:\r\n m, l, r = split(T)\r\n T.data = [m]\r\n T.child = [l,r]\r\n T.isLeaf = False\r\n k = findChildA(T,i) \r\n insertInternal(T.child[k],i)", "def insert(self, value: T) -> None:\n if self._array == []:\n self._array.append(value)\n else:\n parent_idx = (len(self._array) - 1) // 2\n curr_idx = len(self._array)\n self._array.append(value)\n \n # While the value to be inserted is less than it's parent,\n # keep swapping the parent and child from the bottom up until\n # the min heap properties hold or, until swapped with the root node.\n while value < self._array[parent_idx] and parent_idx >= 0:\n temp_value = self._array[parent_idx]\n self._array[parent_idx] = value\n self._array[curr_idx] = temp_value\n curr_idx = parent_idx\n parent_idx = (parent_idx - 1) // 2", "def __init__(self, data_set):\n BST.__init__(self)\n if not isinstance(data_set, np.ndarray):\n raise TypeError(\"data_set must be a numpy array.\")\n self.k = data_set.shape[1]\n for point in data_set:\n self.insert(point)", "def burst_insert(a: List):\n root = AVLTree()\n for item in a:\n root.insert(item)\n return root", "def insert(self, val):\n if not self.root:\n self.root = Node(val)\n self.size_number += 1\n else:\n self._sink(val, self.root)\n # check parent from node, until unbalanced.", "def test_insert_NoDuplicates(self):\n\n self.bst.insert(10,1)\n self.bst.insert(5,2)\n self.bst.insert(20,3)\n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.assertEqual(self.bst.root.key, 10)\n self.assertEqual(self.bst.root.value, [1])\n\n # left subtree\n self.assertEqual(self.bst.root.left.key, 5)\n self.assertEqual(self.bst.root.left.value, [2])\n\n self.assertEqual(self.bst.root.left.left.key, 3)\n self.assertEqual(self.bst.root.left.left.value, [4])\n\n self.assertEqual(self.bst.root.left.right.key, 7)\n self.assertEqual(self.bst.root.left.right.value, [5])\n\n # right subtree\n self.assertEqual(self.bst.root.right.key, 20)\n self.assertEqual(self.bst.root.right.value, [3])\n\n self.assertEqual(self.bst.root.right.left.key, 15)\n self.assertEqual(self.bst.root.right.left.value, [6])\n\n self.assertEqual(self.bst.root.right.left.left.key, 14)\n self.assertEqual(self.bst.root.right.left.left.value, [7])\n\n self.assertEqual(self.bst.root.right.right.key, 25)\n self.assertEqual(self.bst.root.right.right.value, [8])", "def sorted_insert(self, value):\n if self.__head is None:\n self.__head = Node(value, None)\n elif value < self.__head.data:\n self.__head = Node(value, self.__head)\n else:\n n = self.__head\n while n.next_node is not None and n.next_node.data <= value:\n n = n.next_node\n new_node = Node(value, n.next_node)\n n.next_node = new_node", "def insert(self, key, val=None):\n self.root = self._insert(self.root, key, val) # Returns root of resulting tree after insertion - update it\n self.n += 1", "def insert(root,key):\n node = Node(key)\n node.insert_without_rotation(root)\n Node.recalculate_heights(node)\n Node.rotatation_adjusting_heights(node)", "def insert(self, data):\n if not self:\n self.root.append(data)\n return self\n\n parent, current = self._lookup(data)\n if current: # data equivalent node found!\n current.append(data)\n else: # equivalent node not found!\n setattr(parent, \"right\" if parent < data else \"left\", Node().append(data))\n return self", "def insert(self, node, update=False):\n if not isinstance(node, RbNode):\n node = RbNode(node)\n node = super(RbTree, self).insert(node, update)\n \"\"\" :type: RbNode \"\"\"\n self._balance_insert(node)\n\n if self.debug:\n print 'After balancing:'\n print self\n print '*' * 20\n\n return node", "def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n \n def insert(nums: List[int], l: int, r: int) -> TreeNode:\n m = (l+r)//2\n if l == r:\n return None\n \n node = TreeNode(nums[m])\n node.left = insert(nums, l, m) \n node.right = insert(nums, m+1, r)\n return node\n \n return insert(nums, 0, len(nums))", "def insertLeft(root, newBranch):\r\n t = root.pop(1)\r\n if len(t) > 1:\r\n root.insert(1, [newBranch, t, []])\r\n else:\r\n root.insert(1, [newBranch, [], []])\r\n return root", "def bounded_insert(self, time, tailnumber):\n if self.root is None: \n node = self.insert(time, tailnumber)\n return node\n\n if self.simple is False: \n conflict = self.find_conflict(time)\n if conflict is not None: \n new_time = conflict.key + self.wait_time\n self.bounded_insert(new_time, tailnumber)\n else: \n node = self.insert(time, tailnumber)\n return node \n else: \n conflict = self.find_conflict(time)\n if conflict is None: \n node = self.insert(time, tailnumber)", "def insert(self,key):\n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n # if current character is not present \n if not current_node.children[index]: \n current_node.children[index] = self.getNode(key[level]) \n current_node = current_node.children[index] \n \n # mark last node as leaf \n current_node.isEndOfWord = True", "def insert(self, data, index):\n if index == 0:\n self.add(data)\n\n if index > 0:\n new = Node(data)\n position = index # Cada que se llama a current = current.next_node, se decrementa el valor de position en 1, cuando el valor sea cero, se ha llegado al nodo que está actualmente en la posición que queremos insertar el nuevo valor\n current = self.head\n\n while position > 1:\n current = current.next_node\n position -= 1\n \n prev_node = current\n next_node = current.next_node\n\n prev_node.next_node = new\n new.next_node = next_node", "def _insert(self, node, key, value_ref):\n #create a tree if there was none so far\n if node is None:\n #print ('a')\n new_node = RedBlackNode(\n RedBlackNodeRef(), key, value_ref, RedBlackNodeRef())\n elif key < node.key:\n newleft_ref = self._insert(self._follow(node.left_ref), key, value_ref)\n newleft = self.balance(self._follow(newleft_ref))\n new_node = self.balance(RedBlackNode.from_node(\n node,\n left_ref=RedBlackNodeRef(referent=newleft)))\n elif key > node.key:\n newright_ref = self._insert(self._follow(node.right_ref), key, value_ref)\n newright = self.balance(self._follow(newright_ref))\n new_node = self.balance(RedBlackNode.from_node(\n node,\n right_ref=RedBlackNodeRef(referent=newright)))\n else: #create a new node to represent this data\n new_node = RedBlackNode.from_node(node, value_ref=value_ref)\n #new_node = self._blacken(new_node)\n return RedBlackNodeRef(referent=new_node)", "def insert(self, key, value=None):\n if key in self.nodes:\n return None\n else:\n new_node = Node(key, value)\n (self.nodes)[key] = new_node \n current = self.root\n last = current\n\n if current is None:\n self.root = self.nodes[key]\n self.root.height = 0\n return new_node\n\n while (current is not None):\n if new_node.key > current.key:\n last = current\n current = current.right\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n else:\n last = current\n current = current.left\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n\n if new_node.key > last.key:\n last.right = new_node\n new_node.parent = last\n else:\n last.left = new_node\n new_node.parent = last\n\n self.root.height = self.get_height_tree()\n return new_node", "def insert(self, object, node=None):\r\n # If there's no node specified because it's the first call, begin from Root.\r\n if not node:\r\n node = self.root\r\n # If current node is empty, insert into this node (base case for recursion)\r\n if not node.object:\r\n node.object = object\r\n else:\r\n # If current node already holds said value, return False and throw error (base case for recursion)\r\n if object.key == node.object.key:\r\n print(\"Value\", object.key, \"already in tree\")\r\n return False\r\n # Determine to branch left or right, then if node doesn't exists, make a new one store object there\r\n # If it does exist, recursively call on that node\r\n if object.key > node.object.key:\r\n if not node.rightchild:\r\n node.rightchild = binarySearchTree.node(object, node)\r\n else:\r\n binarySearchTree.insert(self, object, node.rightchild)\r\n if object.key < node.object.key:\r\n if not node.leftchild:\r\n node.leftchild = binarySearchTree.node(object, node)\r\n else:\r\n binarySearchTree.insert(self, object, node.leftchild)\r\n return True", "def straight_bst():\n bst = BST()\n for i in range(1, 10):\n bst.insert_non_balance(i)\n return bst, 10, -9", "def _insert(self, root: AVLTreeNode, key, val=None) -> AVLTreeNode:\n if not root:\n return AVLTreeNode(key, val, bf=0) # If empty root this is the root of new tree\n if key < root.key:\n left_sub_root = self._insert(root.left, key, val) # insert and update left subroot\n root.left = left_sub_root\n left_sub_root.parent = root # assign the parent\n elif key > root.key:\n right_sub_root = self._insert(root.right, key, val) # insert and update right subroot\n root.right = right_sub_root\n right_sub_root.parent = root\n else:\n return root # no duplicate keys allowed; no insertion, return current root as is\n # finally, update heights and bf's of current root after insertion completed (postorder processing)\n root.height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n root.bf = self._get_height(root.left) - self._get_height(root.right)\n return self.rebalance(root) # RE-BALANCE CURRENT ROOT (if required)", "def insert(self, value):\n insertion_point = self._find(value)\n n = SplayNode(value)\n\n # value already in the tree; add at leftmost position in right subtreepa\n if value == insertion_point.value:\n if insertion_point.right is None:\n insertion_point.right = n\n n.parent = insertion_point\n else:\n insertion_point = insertion_point.right\n while insertion_point.left is not None:\n insertion_point = insertion_point.left\n insertion_point.left = n\n n.parent = insertion_point\n\n # value belongs to the left\n elif value < insertion_point.value:\n insertion_point.left = n\n n.parent = insertion_point\n\n # value belongs to the right\n else:\n insertion_point.right = n\n n.parent = insertion_point\n\n n._splay()\n return n # return new root", "def sorted_insert(self, value):\n if self.__head is None or self.__head.data > value:\n new_node = Node(value)\n if self.__head is not None:\n new_node.next_node = self.__head\n self.__head = new_node\n else:\n runner = self.__head\n while runner.next_node and value > runner.next_node.data:\n runner = runner.next_node\n runner.next_node = Node(value, runner.next_node)", "def insert(self, val):\n try:\n float(val)\n except ValueError:\n print(\"Not a number\")\n return False\n self.no_of_Nodes += 1\n if self.rootNode is None:\n self.rootNode = Node(val)\n return True\n\n currentNode = self.rootNode\n while True:\n if currentNode.val < val:\n if currentNode.right is None:\n currentNode.right = Node(val)\n currentNode.right.parent = currentNode\n return True\n else:\n currentNode = currentNode.right\n else:\n if currentNode.left is None:\n currentNode.left = Node(val)\n currentNode.left.parent = currentNode\n return True\n else:\n currentNode = currentNode.left", "def insert(node, key):\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def __insertTraversalRight(self, value):\n if self.right != None:\n #print('node found. Going down...')\n self.right.insertTraversal(value)\n else:\n #print('free edge found. Inserting node.')\n newNode = BinaryTree(value)\n self.right = newNode\n newNode.parent = self", "def insert(self, item):\n index = self.insert_at_next_index(item)\n self.items[index] = item\n while index > 1:\n parent_index = index / 2 # Truncate, e.g. 4 and 5 have parent 2.\n if self.is_heap_order(self.items[parent_index], self.items[index]):\n # The item does not need to bubble up anymore. Done.\n return\n else:\n # Swap items at index and parent_index\n temp = self.items[index]\n self.items[index] = self.items[parent_index]\n self.items[parent_index] = temp\n index = parent_index\n # The item bubbled all the way to the root. Done.\n return", "def test_insert_non_balance_second_depth_level():\n from bst import BST\n bst = BST()\n bst.insert_non_balance(4)\n bst.insert_non_balance(2)\n bst.insert_non_balance(3)\n assert bst.root.left.right.value == 3", "def insert(self, key):\n if self.root is None:\n self.root = self.Node(key)\n else:\n self.root = self.root.insert(key)", "def insert(self,node,key):\n position=self.find(node,key)\n if position.key==key:\n print(\"node already present\")\n elif position.key>key:\n n=Node(key)\n position.setLeftChild(n)\n n.setParent(position)\n print(n.getParent())\n else:\n n=Node(key)\n position.setRightChild(n)\n n.setParent(position)", "def insert(self, data, index):\n if index == 0:\n self.prepend(data)\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current or previous:\n if current_index == index:\n new_node = Node(data)\n new_node.next = current\n previous.next = new_node\n break\n\n previous = current\n current = current.next\n current_index += 1", "def minimal_tree(array: list):\n bst = BST()\n def build(l, r):\n if l == r: bst.insert(array[l]); return\n m = (l+r)//2\n # insert into the tree\n bst.insert(array[m])\n # build recursively\n build(l, m)\n build(m+1, r)\n build(0, len(array)-1)\n return bst", "def insert(self, item):\n # Handle the case where the tree is empty\n if self.is_empty():\n # if self.root is None:\n # TODO: Create a new root node\n self.root = ...\n # TODO: Increase the tree size\n self.size ...\n return\n # Find the parent node of where the given item should be inserted\n parent = self._find_parent_node(item)\n # TODO: Check if the given item should be inserted left of the parent node\n if ...:\n # TODO: Create a new node and set the parent's left child\n parent.left = ...\n # TODO: Check if the given item should be inserted right of the parent node\n elif ...:\n # TODO: Create a new node and set the parent's right child\n parent.right = ...\n # TODO: Increase the tree size\n self.size ...", "def insert(self, data):\n # add data to list'end\n self.heap_list.append(data)\n # adjust max-heap from bottom to top\n self.sift_up(len(self.heap_list)-1)", "def insert(self, data):\n if self.head == None:\n self.head = Node(data)\n else:\n curr = self.head\n while curr.link != None:\n curr = curr.link\n curr.link = Node(data)", "def sorted_insert(self, value):\n new = Node(value)\n if self.__head is None:\n self.__head = new\n return\n\n cur = self.__head\n if new.data < cur.data:\n new.next_node = self.__head\n self.__head = new\n return\n\n while (cur.next_node is not None) and (new.data > cur.next_node.data):\n cur = cur.next_node\n\n new.next_node = cur.next_node\n cur.next_node = new\n return", "def add_child(self, data):\n if data == self.data:\n return # node already exist\n\n if data < self.data:\n #add data to left subtree\n if self.left:\n self.left.add_child(data)\n else:\n self.left = BinarySearchTreeNode(data)\n else:\n #add data to right subtree\n if self.right:\n self.right.add_child(data)\n else:\n self.right = BinarySearchTreeNode(data)", "def insert(self, item):\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))", "def insert(self, word):\n current = self.root\n for i in word:\n if current.hash_map.get(i) is None:\n current.hash_map[i] = Node()\n current = current.hash_map.get(i)\n current.num += 1", "def insert(self, val):\n if self.val is None:\n self.__init__(val)\n elif self.val > val:\n self.left.insert(val)\n elif self.val < val:\n self.right.insert(val)", "def __insertTraversalLeft(self, value):\n if self.left != None:\n #print('node found. Going down...')\n self.left.insertTraversal(value)\n else:\n #print('free edge found. Inserting node.')\n newNode = BinaryTree(value)\n self.left = newNode\n newNode.parent = self", "def insert(self, value):\n if self._root:\n node = self._root\n child = self._root\n parent = None\n while node and child:\n if node.key == value:\n child = None\n else:\n parent = node\n if value < node.key:\n node = node._left\n else:\n node = node._right\n if child:\n child = Node(value, None, None)\n if value < parent.key:\n parent._left = child\n else:\n parent._right = child\n return True\n else:\n return False\n else:\n self._root = Node(value, None, None)\n return True", "def insertRight(root, newBranch):\r\n t = root.pop(2)\r\n if len(t) > 1:\r\n root.insert(2, [newBranch, [], t])\r\n else:\r\n root.insert(2, [newBranch, [], []])\r\n return root" ]
[ "0.72828513", "0.71956825", "0.71459913", "0.70616907", "0.69816333", "0.69398624", "0.6857214", "0.6762853", "0.6751353", "0.6740436", "0.672814", "0.6700522", "0.6673687", "0.66709936", "0.6667268", "0.6640333", "0.66368526", "0.6587889", "0.65469646", "0.65461093", "0.64869136", "0.6484786", "0.64637625", "0.6457644", "0.64382213", "0.6408277", "0.6408001", "0.63971996", "0.63905", "0.6387043", "0.63712335", "0.6366829", "0.6347456", "0.6343666", "0.6342247", "0.63394725", "0.63379765", "0.6333449", "0.6323903", "0.6312773", "0.6296045", "0.6275364", "0.62583107", "0.6254639", "0.62317365", "0.6209975", "0.62077314", "0.62033916", "0.6197838", "0.6191", "0.6190811", "0.61889774", "0.6178138", "0.617265", "0.61702603", "0.61582726", "0.61407405", "0.61302197", "0.6124221", "0.6107639", "0.6102868", "0.60927784", "0.60731703", "0.60701036", "0.6059246", "0.60511875", "0.6048993", "0.60436136", "0.603436", "0.6025486", "0.60219145", "0.59938645", "0.59928006", "0.59903806", "0.59899193", "0.5987717", "0.5986173", "0.5983342", "0.59809214", "0.59761274", "0.5972573", "0.5968562", "0.5968527", "0.59665847", "0.594663", "0.5934633", "0.5934146", "0.59295505", "0.5926266", "0.59209836", "0.592073", "0.5911769", "0.5897782", "0.5888106", "0.58815134", "0.58808947", "0.58682543", "0.5866894", "0.58646345", "0.586421" ]
0.7049252
4
This is the main algorithm for insert
def recursiveInsert(self, key, data, curr): debug.printMsg("Entered recursiveInsert") # check if the key is greater than current node key # we will go right debug.printMsg("checking whether we go right or left") if key > curr.key: debug.printMsg("we go right") # now check if there is a right node already debug.printMsg("checking if we have available space") if curr.hasRightChild(): debug.printMsg("nope, calling recursiveInsert again") # well, we're shit out of luck and need to go further self.recursiveInsert(key, data, curr.right) else: debug.printMsg("yep, we'll insert it here") # we found an empty spot curr.right = Node(key, data, curr) else: debug.printMsg("we go left") # now check if there is a left node already if curr.hasLeftChild(): debug.printMsg("checking if we have available space") # well, we're shit out of luck and need to go further self.recursiveInsert(key, data, curr.left) else: # we found an empty spot debug.printMsg("yep, we'll insert it here") curr.left = Node(key, data, curr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_op(self, op):", "def insert(self):\n pass", "def insert(self, data):\r\n pass", "def insert(self, i, x) -> None:\n pass", "def testInsert(self):\n\n for i in xrange(randint(50,150)):\n self.s.insert(i, None)", "def insert_values():\n pass", "def test_insert(self):\n query = \"insert into cds values(%s,%s,%s,%s)\"\n values = (109876,\"cinderella\",\"big 5\",5)\n self.a.insert(query,values)\n query1 = \"select * from cds where id=109876\"", "def test_insert(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n inserted12 = insert([int1], int2)\n self.assertEqual([int12], inserted12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n self.assertEqual([int13], insert([int12], int3))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n inserted4 = insert([],int4)\n self.assertEqual([int4], inserted4)\n self.assertEqual([int13, int58], insert([int12, int3], int58))\n self.assertEqual([int13, int58], insert([int58], int13))\n self.assertEqual([int13], insert([int2, int3], int1))\n self.assertEqual([int13], insert([int1, int2, int2, int3], int12))\n self.assertEqual([int1], insert([int1], int1))\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = insert([int1], 4)\n with self.assertRaises(ValueError):\n int1 = insert([3], int1)\n with self.assertRaises(ValueError):\n int1 = insert([3], \"not an interval\")\n with self.assertRaises(ValueError):\n int1 = insert([3], \"[1,3]\")\n with self.assertRaises(ValueError):\n int1 = insert([[]], \"\")\n with self.assertRaises(ValueError):\n int1 = insert([[12, \"hi\"]], \"interval\")\n with self.assertRaises(ValueError):\n int1 = insert([int1], \"\")\n with self.assertRaises(ValueError):\n int1 = insert([[]], int2)\n print(\"insert test complete\")", "def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6", "async def _insert_stmt(self):\n raise NotImplementedError", "def insert(self, indexes: Tuple[int, ...], tree: 'Tree') -> None:\n ...", "def do_insert(self,args):\n if len(args) != 0:\n for w in args.split():\n sl.insertList(int(w.rstrip()))", "def testInsertLength(self):\n\n num = randint(60,180)\n for i in xrange(num):\n self.s.insert(i, None)\n self.assertEqual(len(self.s), num)\n\n #try to insert duplicates\n for i in xrange(num):\n self.s.insert(i, None)\n self.assertEqual(len(self.s), num)", "def insert(pq):\n\ti = r.randint(0, bound-1)\n\tpq.put(i)\n\tlogging.info(\"insert %s\", i)", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def insert_data(self) -> None:\n if self.min_insert_size > self.insert_count:\n LOG.debug(\"Not enough data for insert....\")\n return\n LOG.debug(f'Inserting {self.insert_count} records...')\n self.insert.write(self.copy_trailer)\n self.insert.seek(0)\n conn = pg.connect(self.dsn)\n with conn.cursor() as cur:\n cur.copy_expert(self.cmd, self.insert)\n conn.commit()\n conn.close()\n self.insert.close()\n self.create_byte_buffer()", "def execute_insert(self,insert):\n try:\n self.cursor.execute(insert)\n self.connection.commit()\n except Exception as error:\n self.connection.rollback()\n raise error", "def insert(self, index, p_object): # real signature unknown; restored from __doc__\n pass", "def InsertNextPoint(self, ):\n ...", "def insertInternal(T,i):\r\n if T.isLeaf:\r\n insertLeaf(T,i)\r\n else:\r\n k = findChildA(T,i) \r\n if isFull(T.child[k]):\r\n m, l, r = split(T.child[k])\r\n T.data.insert(k,m) \r\n T.child[k] = l\r\n T.child.insert(k+1,r) \r\n k = findChildA(T,i) \r\n insertInternal(T.child[k],i)", "def insert_data(self):\n\n pass", "def bst_insert(sizes):\n tree = rbTree_main.BinarySearchTree();\n for i in range(sizes):\n tree.insert(random.random())", "def InsertPoint(self, p_int, ):\n ...", "def insert(self, data):\n #Check for multiple row insert and prepare for each\n if isinstance(data, list):\n i = 0\n total = len(data) - 1\n static_total = total\n new_data = data\n multi = True\n inserted = 0\n for x in range(len(new_data)):\n if self.__scrub_data(new_data[x]):\n pass\n else:\n raise Exception('Your insert data is not formatted correctly.')\n #Not multi-insert\n else:\n multi = False\n #if we are doing a multi insert\n if multi:\n #while we still have data to insert\n while total > 0:\n current_line = self.__row_id_in_file(self.current_row)\n #calculate how many lines we will insert on the first loop\n insert_number = int(self.rows_per_page) - int(current_line)\n if insert_number == 0:\n insert_number = int(self.rows_per_page)\n #Check that we aren't at max rows:\n if self.current_row + insert_number > self.max_rows:\n raise Exception('Sorry, the table, \\\"' + self.name + '\\\" can\\'t fit all those rows.')\n #populate first_data based on how many total rows are being inserted and how much room is on data page\n if insert_number < total:\n #grab how many we need to fill the current data page\n first_data = data[:insert_number]\n del data[:insert_number]\n else:\n first_data = data\n #record how many rows we are inserting this time:\n number_rows_to_insert = len(first_data)\n #prepare data\n first_data_string = ''\n first_path = self.__data_file_for_row_id(int(self.current_row) + 1)\n for x in range(len(first_data)):\n self.current_row += 1\n first_data_string = first_data_string + \"{\\\"data\\\": \" + json.dumps(first_data[x]) + \", \\\"row_id\\\": \" + str(self.current_row) + \"}\\n\"\n if self.__multi_append_row(first_data_string, first_path):\n if self.__check_write_success_multi_insert(first_data_string, first_path, number_rows_to_insert, current_line):\n pass\n else:\n raise Exception(\"There was a problem validating the write during multiple row insert.\")\n else:\n raise Exception(\"There was a problem inserting multiple rows.\")\n total -= insert_number\n print(str(static_total) + ' rows have been added.')\n #If not multi-insert\n else:\n self.current_row += 1\n if self.__scrub_data(data):\n row_id = self.current_row\n path = self.__data_file_for_row_id(row_id)\n #Check that we aren't at max rows:\n if self.current_row < self.max_rows:\n if self.__insert_modify_data_file(path, data):\n print('Row ' + str(row_id) + ' has been added.')\n else:\n raise Exception('There was a problem inserting row at ' + str(row_id) +'.')\n else:\n raise Exception('Sorry, the table, \\\"' + self.name + '\\\" is full.')\n else:\n raise Exception('Sorry, the data you tried to insert is invalid.')", "def insert(self, val: int) -> bool:", "def testInsertDeep(self):\n\n #insert\n for i in xrange(randint(50, 180)):\n self.s.insert(randint(-2147483648,2147483647), i)\n\n #walk through the tree\n self.assertIsNotNone(self.s._root)\n self.assertIsNone(self.s._root.parent)\n self.assertIsNotNone(self.s._root.left)\n self.assertIsNotNone(self.s._root.right)\n\n def traversalHelper(n):\n if not n:\n return\n self.assertTrue((n.parent.left is n) or (n.parent.right is n))\n traversalHelper(n.left)\n traversalHelper(n.right)\n\n traversalHelper(self.s._root.left)\n traversalHelper(self.s._root.right)", "def insert(self):\n\n # elements = [77, 26, 22, 33, 37, 38, 39, 44]\n # file = open(\"HashTable File\", \"w+\")\n # for i in elements:\n # file.writelines(str(i) + ' ')\n # file.close()\n\n file = open(\"../util/HashTable File\", \"r\")\n elements = file.readlines()\n string = elements[0]\n\n string_list = string.split()\n\n elements = []\n for i in range(0, len(string_list)):\n to_integer = int(string_list[i])\n elements.append(to_integer)\n\n for i in range(len(elements)):\n index = self.hash_function(elements[i])\n self.objects_list[index].append(elements[i])", "def insert(self, sample, *args):\n raise NotImplementedError", "def InsertUniquePoint(self, , p_int):\n ...", "def InsertUniquePoint(self, , p_int):\n ...", "def test_insert(self):\n self.minheap.heap = [0, 1, 4, 6, 9]\n self.minheap.insert(2)\n assert self.minheap.heap == [0, 1, 2, 6, 9, 4]", "def insertQuery(self, master, row_num):\n pass", "def test_insert(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n h.insert(7)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(10)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(5)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def insert_run():\n print(\"Start inserting\")\n try:\n finsert()\n except Exception as e:\n print(e)\n else:\n print(\"Completed successfully\")", "def insert(statement: str) -> []:\n raise NotImplementedError", "def insert_index(self):\n pass", "def insertElement(T,i):\r\n if not isFull(T):\r\n insertInternal(T,i)\r\n else:\r\n m, l, r = split(T)\r\n T.data = [m]\r\n T.child = [l,r]\r\n T.isLeaf = False\r\n k = findChildA(T,i) \r\n insertInternal(T.child[k],i)", "def InsertLog():", "def insert(database: str, table: str, register: list) -> int:\n\n bd = _database(database)\n\n if bd:\n\n tb = _table(database, table)\n\n if tb:\n\n encoding = bd[\"encoding\"]\t\n mode = tb[\"modo\"]\t \n\n for y in register:\t\n if type(y) == str:\t\n try:\t\n y.encode(encoding, \"strict\")\t\n except: \t\n return 1\n val = -1\n\n if mode == \"avl\":\n val = avl.insert(database, table, register)\n\n elif mode == \"b\":\n val = b.insert(database, table, register)\n\n elif mode == \"bplus\":\n val = bplus.insert(database, table, register)\n\n elif mode == \"hash\":\n val = hash.insert(database, table, register)\n\n elif mode == \"isam\":\n val = isam.insert(database, table, register)\n\n elif mode == \"json\":\n val = json.insert(database, table, register)\n\n elif mode == \"dict\":\n val = dict.insert(database, table, register)\n\n if val == 0:\n nombreST = str(database) + '-' + str(table)\n if BC.EsUnaTablaSegura(nombreST, _main_path):\n BC.insertSafeTable(nombreST, register, _main_path)\n\n return val\n\n else:\n return 3\n\n else:\n return 2", "def on_insert(self) -> None:", "def insertLeaf(T,i):\r\n T.data.append(i) \r\n T.data.sort(key=lambda x: x.word)", "def insert(self, product):\n pass", "def insert(self, *args):\n self.insert_count += 1\n self.total_ops += 1\n return super(BulkOperator, self).insert(*args)", "def insert(self, word):\n def r_insert(word,i):\n if len(word) <= i:\n return 0\n \n d = {}\n d[word[i]] = r_insert(word,i+1)\n \n return d\n \n if len(word) == 0:\n return \n \n d = self.root.d\n word = word + '$'\n \n for i in xrange(len(word)):\n if word[i] in d:\n d = d[word[i]]\n else:\n d[word[i]] = r_insert(word,i+1)\n break\n\n print self.root.d\n return", "def insertSort(arr):\n for i in range(1, len(arr)):\n value = arr[i]\n position = i - 1\n while position >= 0 and arr[position] < value:\n arr[position + 1] = arr[position]\n position -= 1\n arr[position + 1] = value\n return", "def test_insert(self):\n self.assertEqual(['INSERT', 'INTO', 'test', '(a,b) VALUES (1,2)'],\n grammar._INSERT_EXPR.parseString(\"INSERT INTO test (a,b) VALUES (1,2);\").asList())", "def test_insert_node_multiple_content_2():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(third))\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at mid)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"", "def test_insertion(self):\n integers = insertion_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def self_insert():\r\n insert_char(last_key())", "def insert_this_word_into_storage(new_word, new_vec, new_sim):\n ordered_distances[n-1] = new_sim;\n ordered_words[n-1] = new_word;\n #print(\"here i am: \", ordered_words[n-1]);\n #print(ordered_words);\n place_found = False;\n i = 0;\n for i in range(0, n-1):\n if(place_found != False):\n break;\n this_index = n-1-i;\n next_index = n-2-i;\n this_sim = ordered_distances[this_index];\n next_sim = ordered_distances[next_index];\n #print(\"this_sim (\" , this_sim, \") -vs- (\", next_sim, \")\");\n if(this_sim > next_sim):\n #print(\"true, bigger\");\n ordered_distances[this_index] = ordered_distances[next_index];\n ordered_distances[next_index] = new_sim;\n ordered_words[this_index] = ordered_words[next_index];\n ordered_words[next_index] = new_word;\n else:\n place_found = True;\n #print(ordered_distances);\n #print(place_found, i, n-1); ", "def insertRows(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def insertRow(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def insert(n, c, dnodecomm):\n\n _tot[c] += k[n]\n _in[c] += 2 * dnodecomm + network[n][n]\n bl[n] = c", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def before_insert(self, obj, st):\n pass", "def do_insert(self, text):\n args = text.split()\n if len(args) == 2:\n try:\n pos = int(args[0])\n value = int(args[1])\n self.list.insert(pos, value)\n print(self.list, sep=', ')\n except ValueError:\n print('Error: invalid literal.')\n except IndexError:\n print('Error: invalid position.')\n else:\n print('Error: insert takes two parameters.')", "def insertion_sort(L):\n for i in range(len(L)):\n insert(L, i)", "def insert_statement() -> str:\n pass", "def insert(conn, table_info, table_data):\n\n sql = ''' INSERT INTO ''' + table_info \n + ''' VALUES(''' + \"?,\" * (len(table_data)-1) + \"?)\"\n cursor = conn.cursor()\n cursor.execute(sql, table_data)\n conn.commit()", "def test_insert_node_multiple_content_1():\n first = 0\n second = 1\n third = 3\n chain = N.Node(second, N.Node(third))\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at start)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"", "def insert_aux(n, actual, e, f, prio):\n\tpoyo_datos = \"\"\"\n\t\t\t\tSELECT pokedex, type1, type2, hptotal, legendary \n\t\t\t\tFROM poyo\n\t\t\t\tWHERE nombre = :1\"\"\"\n\tcur.execute(poyo_datos, [n])\n\t# Lista con tupla [(pokedex, tipo1, tipo2, hptotal, legendary)]\n\tdata_poyo = cur.fetchall()\n\tdata_poyo = data_poyo[0]\n\tpokedex, t1, t2, total, l = data_poyo\n\tins_query = \"\"\"\n\t\t\t\tINSERT INTO sansanito (pokedex, nombre, type1, type2,\\\n\t\t\t\thpactual, hpmax, legendary, estado, ingreso, prioridad)\n\t\t\t\tVALUES (:1, :2, :3, :4, :5, :6, :7, :8, to_date(:9, 'DD/MM/YY HH24:MI'), :10)\"\"\" \n\t\t\t\t\n\tcur.execute(ins_query, [pokedex, n, t1, t2, actual, total, l, e, f, prio])", "def insert_into_db(self, database):\n\n # insert person\n keys = \"\"\n values = \"\"\n for key, value in self.person.items():\n # location\n if key == \"location\":\n # ensure location is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * FROM p21_cdm.location WHERE city='{value['city']}' \n AND zip='{value['zip']}') THEN INSERT INTO p21_cdm.location (city, zip) \n VALUES ('{value['city']}', '{value['zip']}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.person (location_id, {keys[:-1]}) \n VALUES((SELECT location_id \n FROM p21_cdm.location\n WHERE city='{self.person['location']['city']}' \n and zip='{self.person['location']['zip']}'), \n {values[:-1]})\"\"\")\n\n # insert visits\n for visit in self.visits:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n for key, value in visit.items():\n if key == \"care_site_name\":\n # ensure care site is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * \n FROM p21_cdm.care_site \n WHERE care_site_name='{value}') \n THEN INSERT INTO p21_cdm.care_site (care_site_name) \n VALUES ('{value}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.visit_occurrence (care_site_id, {keys[:-1]}) \n VALUES((SELECT care_site_id\n FROM p21_cdm.care_site\n WHERE care_site_name='{visit['care_site_name']}'),\n {values[:-1]}) \n RETURNING visit_occurrence_id\"\"\")\n\n # insert measurements, observations, conditions & procedures\n for data, tablename in [(self.measurements, \"measurement\"),\n (self.observations, \"observation\"),\n (self.conditions, \"condition_occurrence\"),\n (self.procedures, \"procedure_occurrence\")]:\n for entry in data:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n\n for key, value in entry.items():\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n entry[\"sql_id\"] = database.select(f\"\"\"INSERT INTO p21_cdm.{tablename}({keys[:-1]})\n VALUES({values[:-1]}) RETURNING {tablename}_id\"\"\")[0][0]\n\n # insert fact_relationships in both directions\n for table1, entry1, table2, entry2 in self.fact_relations:\n # 44818890 = Finding associated with (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table1}','{entry1['sql_id']}','{table2}','{entry2['sql_id']}','44818890')\"\"\")\n # 44818792 = Associated with finding (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table2}','{entry2['sql_id']}','{table1}','{entry1['sql_id']}','44818792')\"\"\")\n\n # make transactions persistent\n database.commit()", "def insertion_sort(deck):\n for i in range(len(deck)):\n deck = insert(deck, i, deck[i])\n return deck", "def insert(self, item):\n if self.type == \"lin\":\n index = self.hash(item[0])\n if self.items[index] is None:\n self.items[index] = item\n return True\n else:\n i = (index + 1) % self.n\n while i != index:\n if self.items[i] is None:\n self.items[i] = item\n return True\n i = (i + 1) % self.n\n return False\n\n if self.type == \"quad\":\n index = self.hash(item[0])\n if self.items[index] is None:\n self.items[index] = item\n return True\n else:\n j = 1\n i = (index + j**2) % self.n\n while i != index:\n if self.items[i] is None:\n self.items[i] = item\n return True\n j += 1\n i = (i + j**2) % self.n\n return False\n\n if self.type == \"sep\":\n index = self.hash(item[0])\n self.items[index].tableInsert(1, item)\n return True", "def insert(self, *a):\r\n return self.stack.insert(*a)", "def _insert_into_clean(self, entry):\n i = entry.hash\n new_entry = self.table[i]\n while new_entry.key is not None:\n i += self.second_hash(new_entry.key)\n new_entry = self.table[i]\n new_entry.key = entry.key\n new_entry.value = entry.value\n new_entry.hash = entry.hash\n self.used += 1\n self.filled += 1", "def test_insert_adds_value_to_tree(bst_balanced):\n bst_balanced.insert(15)\n assert bst_balanced.contains(15) is True\n assert bst_balanced.search(15).val == 15", "def test_binarytree_insert_exists(empty_list):\n assert empty_list.insert(42)", "def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def test_verify_insert(self):\n self._verify([self.applied_commands['insert']])", "def insert(self, e):\n elementsintable = 1\n for numberset in self.table:\n for number in numberset:\n elementsintable += 1\n if (elementsintable / len(self.table) > 0.75):\n self.rehash(len(self.table) * 2)\n self.table[hash(e) % len(self.table)].add(e)", "def insert_records(self, insert_query, insert_query_columns, wiki_data, table_name):\n print(\"Inserting {} rows into {}\".format(len(wiki_data), table_name))\n for index, item in enumerate(wiki_data):\n values_to_insert = [item[column]['value'] for column in insert_query_columns]\n try:\n self.cur.execute(insert_query, values_to_insert)\n except ValueError as ve:\n print(\"Could not execute query : {} with values\".format(insert_query, values_to_insert))\n raise ve\n\n if index % 1000 == 0:\n print(\"Inserted {} rows\".format(index))\n print(\"Inserted {} rows\".format(len(wiki_data)))\n print(\"Finished inserting {}\".format(table_name))", "def test_insert_node_multiple_content_3():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(second))\n node = N.Node(third)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at end)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"", "def test_insertion_for_each_element_input_list(empty_list):\n a = [5, 6, 7, 8]\n empty_list.insert(a)\n assert len(empty_list) == len(a)", "def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)", "def rbt_insert(sizes):\n tree = rbTree_main.RBTree();\n for i in range(sizes):\n tree.rb_insert(random.random());\n pass", "def insert(self, item):\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))", "def insert_sort(a):\n size = len(a)\n # find key\n for key in range(1, size):\n key_to_insert = a[key]\n # insert key in sorted sequence\n for margin in reversed(range(0, key)):\n if key_to_insert < a[margin]:\n a[margin + 1] = key_to_insert\n break\n else:\n # SHIFT THE MARGIN\n a[margin + 1] = a[margin]\n else:\n a[0] = key_to_insert\n return a", "def _insert(self, node, root):\n if not root:\n root = node\n elif node.key < root.key:\n root.left = self._insert(node, root.left)\n if root.right and (root.left.height - root.right.height == 2):\n # Inserted node on the left side, check if left side is larger by 2\n # this is not allowed\n # at most 1 difference\n if node.key < root.left.key:\n root = self.rotate_with_left_child(root)\n else:\n root = self.double_with_left_child(root)\n # It's in wrong position, put it on the right\n elif node.key > root.key:\n root.right = self._insert(node, root.right)\n if root.left and (root.right.height - root.left.height == 2):\n # Inserted node on the right side, check if right side larger by 2\n # not allowed\n # max 1 difference\n if node.key > root.right.key:\n root = self.rotate_with_right_child(root)\n else:\n root = self.double_with_right_child(root)\n # It's in wrong position, put it on the left\n\n root.height = max(root.left.height if root.left else -1, root.right.height if root.right else -1) + 1\n # get root height, left or right subtree height + 1, depending which is greater\n return root", "def insertar(self, dato):\n\t\tif self.posicion == 0:\n\t\t\tself.lista.insertar_primero(dato) \n\t\t\tself.actual = self.lista.prim \n\t\t\treturn self.actual.dato\n\t\tnodo = _Nodo(dato)\n\t\tself.anterior.prox = nodo\n\t\tnodo.prox = self.actual\n\t\tself.actual = nodo\n\t\tself.lista.len += 1\n\t\treturn self.actual.dato", "def after_insert(self, obj, st):\n pass", "def insert(self, *args):\n return _ida_hexrays.qvector_ccase_t_insert(self, *args)", "def insert(self, to_insert: Article) -> None:\n heapq.heappush(self.heap, to_insert)", "def _extract_and_insert(cursor, table, data, ignore_if_exists=True, **kwargs):\n if ignore_if_exists:\n return _insert_if_new(cursor, table, _subdict(_columns(cursor, table), data), **kwargs)\n else:\n return _insert_dict(cursor, table, _subdict(_columns(cursor, table), data), **kwargs)", "def test_do_insert_sibling(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_insert(revision_id=1, parent_id=0)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\r\n \"RAMSTK SUCCESS: Adding one or more items to the RAMSTK Program \"\r\n \"database.\")\r\n assert DUT.last_id == 4\r\n\r\n DUT.do_delete(DUT.last_id)", "def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True", "def eamap_insert(*args):\n return _ida_hexrays.eamap_insert(*args)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insertion(array):\n for i in range(1,len(array)):\n while i>0 and array[i]<array[i-1]:\n array[i], array[i-1] = array[i-1], array[i]\n i -= 1", "def insert(self,key):\n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n # if current character is not present \n if not current_node.children[index]: \n current_node.children[index] = self.getNode(key[level]) \n current_node = current_node.children[index] \n \n # mark last node as leaf \n current_node.isEndOfWord = True", "def insert(intlist,newint):\n intlist.append(newint)\n return mergeOverlapping(intlist)", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def test_insertion2(engine_contents, engine_locations):\n file_name = 'Triangle.java.xml'\n new_contents = copy.deepcopy(engine_contents)\n new_locations = copy.deepcopy(engine_locations)\n target1 = (file_name, '_inter_block', 10)\n target2 = (file_name, 'expr_stmt', 1)\n target3 = (file_name, 'expr_stmt', 0)\n assert XmlEngine.do_insert(engine_contents, engine_locations, new_contents, new_locations, target1, target2)\n assert XmlEngine.do_insert(engine_contents, engine_locations, new_contents, new_locations, target1, target3)\n dump = XmlEngine.dump(engine_contents[file_name])\n new_dump = XmlEngine.dump(new_contents[file_name])\n expected = \"\"\"--- \n+++ \n@@ -5,6 +5,10 @@\n }\n \n public static TriangleType classifyTriangle(int a, int b, int c) {\n+\n+ delay();\n+\n+ a = b;\n \n delay();\n \n\"\"\"\n assert_diff(dump, new_dump, expected)", "def insertion_sort(arr):\n pass", "def test_insert_WithDuplicates(self):\n\n self.bst.insert(10,1)\n self.bst.insert(10,2)\n \n self.bst.insert(5,2)\n \n self.bst.insert(20,3)\n self.bst.insert(20,4)\n \n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.bst.insert(5,123)\n self.bst.insert(14,456)\n\n self.assertEqual(self.bst.root.key, 10)\n self.assertEqual(self.bst.root.value, [1,2])\n\n # left subtree\n self.assertEqual(self.bst.root.left.key, 5)\n self.assertEqual(self.bst.root.left.value, [2,123])\n\n self.assertEqual(self.bst.root.left.left.key, 3)\n self.assertEqual(self.bst.root.left.left.value, [4])\n\n self.assertEqual(self.bst.root.left.right.key, 7)\n self.assertEqual(self.bst.root.left.right.value, [5])\n\n # right subtree\n self.assertEqual(self.bst.root.right.key, 20)\n self.assertEqual(self.bst.root.right.value, [3,4])\n\n self.assertEqual(self.bst.root.right.left.key, 15)\n self.assertEqual(self.bst.root.right.left.value, [6])\n\n self.assertEqual(self.bst.root.right.left.left.key, 14)\n self.assertEqual(self.bst.root.right.left.left.value, [7,456])\n\n self.assertEqual(self.bst.root.right.right.key, 25)\n self.assertEqual(self.bst.root.right.right.value, [8])", "def test_insertion_for_each_element_in_iterable_tuple(empty_list):\n b = (1, 2, 3)\n bb = LinkedList([])\n bb.insert(b)\n assert len(bb) == 3", "def insert_db():\n populate_tables()", "def insert(self, rule, ident):\n raise NotImplementedError" ]
[ "0.7743201", "0.7556854", "0.72071356", "0.717223", "0.71497864", "0.697614", "0.6849138", "0.67920786", "0.6714414", "0.6667265", "0.6652461", "0.6634596", "0.65935534", "0.6587159", "0.658314", "0.64836735", "0.6482253", "0.64740515", "0.64148915", "0.6397918", "0.6386229", "0.63693166", "0.63057905", "0.6300677", "0.62952965", "0.6290995", "0.6289422", "0.62778777", "0.62754446", "0.62754446", "0.627175", "0.62617004", "0.62587583", "0.6250698", "0.6239116", "0.62373877", "0.62254936", "0.6225303", "0.620011", "0.61958236", "0.6157505", "0.61543715", "0.6150985", "0.6129162", "0.61265266", "0.612572", "0.6114646", "0.6112156", "0.61111075", "0.61094636", "0.60845256", "0.60764694", "0.60710233", "0.6069658", "0.60682565", "0.6067816", "0.60570323", "0.6056269", "0.6051501", "0.604725", "0.6046371", "0.6034842", "0.60323083", "0.6031757", "0.60148746", "0.59975535", "0.59964204", "0.59913015", "0.5980367", "0.59800327", "0.5971895", "0.5971146", "0.5968418", "0.5935966", "0.59308183", "0.5928022", "0.592707", "0.59211403", "0.5896745", "0.5895388", "0.5891717", "0.5888472", "0.58877337", "0.5883317", "0.5875591", "0.5872838", "0.58702374", "0.58686644", "0.58674353", "0.58606553", "0.58606553", "0.58561337", "0.5850999", "0.5847747", "0.5845326", "0.5841236", "0.5835781", "0.58302915", "0.5828885", "0.582881", "0.58247006" ]
0.0
-1
Gets a specific key from the BST
def lookup(self, key): # check that this tree actually has a root node debug.printMsg("Call made to Lookup") debug.printMsg("checking if we have a BST") if self.root: debug.printMsg("Calling Recursive Lookup") (result, err) = self.recursiveLookup(key, self.root) # if we did not find anything if err: debug.printMsg("Oops, we couldn't find anything") return None else: # we found a result debug.printMsg("we found: ") return result else: debug.printMsg("Oops, the BST seems to not exist") # root doesnt exist return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)", "def search(self, key):\n x = self.root\n\n while x is not self.nil:\n if key == x.key:\n break\n\n if key < x.key:\n x = x.left\n else:\n x = x.right\n return x", "def _get(self, root: TreeNode, key: TreeNode) -> TreeNode:\n # Always do the edge-case check, which could raise an error, FIRST!!\n if root is None: # BC2 - not found\n return None\n # BST-order traverse: examine root first, then recur left or recur right depending on key comparison\n if root.key == key: # BC1 - found\n return root\n\n result_left_subtree = None\n result_right_subtree = None\n\n if key < root.key:\n result_left_subtree = self._get(root.left, key)\n elif key > root.key:\n result_right_subtree = self._get(root.right, key)\n\n if result_left_subtree is not None:\n return result_left_subtree\n elif result_right_subtree is not None:\n return result_right_subtree\n else:\n return None", "def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value", "def get_from_subtree(subtree, key):\n temp_subtree = subtree\n while temp_subtree is not None:\n if key == temp_subtree.key:\n return temp_subtree.value\n elif key < temp_subtree.key:\n temp_subtree = temp_subtree.left\n elif key > temp_subtree.key:\n temp_subtree = temp_subtree.right\n return None", "def get_node(self, key: str) -> Optional[Node]:", "def get_node(self, key: str) -> Node:", "def _get(self, k, currNode):\n if not currNode:\n return\n if k < currNode.key:\n return self._get(k, currNode.leftChild)\n elif k > currNode.key:\n return self._get(k, currNode.rightChild)\n elif k == currNode.key:\n return currNode", "def get_node(self, key):\n pos = self._get_node_pos(key)\n if pos is None:\n return None\n return self._hashring[self._sorted_keys[pos]]", "def search(root, key):\n if root is None:\n return None\n else:\n if root.key == key:\n return root.value\n elif root.right is None and root.left is None:\n return None\n elif key >= root.key:\n return search(root.right, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right\n elif key < root.key:\n return search(root.left, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right", "def find(self, key):\n if self.key == key:\n return self.item\n elif key > self.key:\n if self.right:\n return self.right.find(key)\n else:\n if self.left:\n return self.left.find(key)\n # Replace by correct code", "def lookup(self, key):\n k = self.get_position(key)\n\n if self.keys[k] == key:\n return node.values[k]\n\n # Lookup in the child node.\n if self.refs[k+1] == None:\n return None\n return self.refs[k+1].lookup(key)", "def search(self, key):\n if self.key == key:\n if self.val is not None:\n return self.val\n else:\n return self.key\n\n \"\"\"If the key of the node is smaller than the root node's key, traverse the left subtree\"\"\"\n if self.key < key:\n self.left.search(key)\n\n \"\"\"If the key of the node is greater than the root node's key, traverse the right subtree \"\"\"\n if self.key > key:\n self.right.search(key)\n\n \"\"\"If tree is empty, return None\"\"\"\n return None", "def lookup(self, key):\n return self.root.lookup(key)", "def search(self, key):\r\n (node, index) = self.root, self.root.search(key)\r\n while not node.contains_key_at(key, index) and not node.is_leaf():\r\n node = node.children[index]\r\n index = node.search(key)\r\n\r\n return (node, index) if node.contains_key_at(key, index) else None", "def __find_key_in_level(node, key):\n for child in node.children:\n if child.key == key:\n return child\n\n return False", "def find(self, key) -> Union[\"Node\", None]:\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left # traverse left\n\n elif key > current.key:\n current = current.right # traverse right\n\n if current is None: # failure\n break\n return current", "def search(self, key):\n if key in self.key_list:\n return (self.nodes)[key]\n return None", "def retrieve(self, key):\n index = self._hash_mod(key)\n node = self.storage[index]\n while node is not None:\n if node.key == key:\n return node.value\n node = node.next\n return None", "def search(self, key: int, possible_parent=False) -> TreeNode:\n node = prev_node = self.root\n while node:\n if key > node.val:\n prev_node = node\n node = node.right\n elif key == node.val:\n return node\n else:\n prev_node = node\n node = node.left\n if possible_parent:\n return prev_node\n return None", "def get(self, key):\n index = key % self.size\n curr_node = self.hash_table[index]\n\n while curr_node:\n if curr_node.key == key:\n return curr_node.value\n else:\n curr_node = curr_node.next\n\n return -1", "def find_iterative(self, node, key):\n current_node = node\n while current_node:\n if key == current_node.key:\n return current_node\n if key < current_node.key:\n current_node = current_node.left\n else:\n current_node = current_node.right\n return None", "def _get(self, key, current_node):\n pass", "def __getitem__(self, key):\n hash_ = self._hash(key)\n start = bisect.bisect(self._keys, hash_)\n if start == len(self._keys):\n start = 0\n return self._nodes[self._keys[start]]", "def __getitem__(self, key):\n hash_ = self._hash(key)\n start = bisect.bisect(self._keys, hash_)\n if start == len(self._keys):\n start = 0\n return self._nodes[self._keys[start]]", "def __next__(self):\n\n nxt = next(self.tree)\n if nxt is not None:\n return nxt.key", "def get(self, key):\n\n node = self._get_node(key) # Get the node with the key (if it exists)\n\n if node is None:\n return None\n else:\n return node.value", "def search(self, key):\n if self.root is None:\n return None\n return self.root.search(key)", "def get(self, key):\n # Your code here\n\n idx = self.hash_index(key)\n\n # check if the index is in range\n if idx >= 0 and idx < self.capacity:\n curr_node = self.hash_table[idx]\n\n # check if any node at index exists\n if curr_node is None:\n return None\n\n # if there's already something at this index\n while curr_node is not None:\n \n # check to see if there is an entry at this index whose key matches the provided key\n while curr_node.key is not key:\n curr_node = curr_node.next\n \n # if we never found an entry with a matching key, return None\n if curr_node.key is not key or curr_node is None:\n return None\n else:\n return curr_node.value\n \n \n # otherwise return None if the index is not in range\n else:\n return None", "def search(self, key):\n\n current = self.head\n\n while current:\n if current.data == key:\n return current\n else:\n current = current.next_node\n\n return None", "def search(self, key):\n if self.key == key:\n return self\n if self.key > key:\n return None if self.left is None else self.left.search(key)\n return None if self.right is None else self.right.search(key)", "def get(self, key):\r\n if not isinstance(key, str):\r\n raise TypeError(\"Key must be a string\")\r\n\r\n node = self._find_node(key)\r\n if node is None:\r\n return None\r\n else:\r\n return node.value[1]", "def get(self, key):\r\n\t\t# return None if the key doesn't exist\r\n\t\tif not self.contains_key(key):\r\n\t\t\treturn None\r\n\t\telse:\r\n\t\t\tindex = self.get_index(key) # get the index of the key\r\n\r\n\t\t\t# begin traversal of the linked list until we reach the key\r\n\t\t\tcur_node = self._buckets[index].head\r\n\t\t\twhile cur_node.key != key:\r\n\t\t\t\tcur_node = cur_node.next\r\n\r\n\t\t\treturn cur_node.value", "def find_recursive(self,node,key):\n if None == node or key == node.key:\n return node\n elif key < node.key:\n return self.find_recursive(node.left,key)\n else:\n return self.find_recursive(node.right,key)", "def _get_node(self, key):\n\n index = self._hash_function(key) % self.capacity # Get the index by hashing the key\n node = self._buckets[index].contains(key) # Get the node with the key (if it exists)\n return node", "def get_node(self, string_key):\n pos = self.get_node_pos(string_key)\n if pos is None:\n return None\n return self.ring[ self._sorted_keys[pos] ]", "def find(self, key):\n curr_node = self.head\n\n while curr_node is not None: # a normal traversal and checking first match\n if curr_node.data == key:\n return curr_node\n curr_node = curr_node.next\n\n return None", "def find(self, k):\n if k == self.key:\n return self\n elif k < self.key:\n if self.left is None:\n return None\n else:\n return self.left.find(k)\n else:\n if self.right is None: \n return None\n else:\n return self.right.find(k)", "def get_node(self, key, print_path=False):\r\n parent_node, search_node = self.__compare(key, method='search', print_path=print_path)\r\n self.__check_node(search_node)\r\n\r\n return search_node", "def successor(self, key: int) -> TreeNode:\n tree_node = self.search(key, possible_parent=True)\n if tree_node:\n if tree_node.right and tree_node.val <= key:\n right_subtree = tree_node.right\n while right_subtree.left:\n right_subtree = right_subtree.left\n return right_subtree\n else:\n while tree_node:\n if tree_node.val > key:\n return tree_node\n tree_node = tree_node.parent\n return", "def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None", "def find(self, key, forDel=False):\n found = False\n root = self.root\n while not found:\n if key > root.key:\n if root.right is None:\n print(\"Key not found\")\n return False\n root = root.right\n elif key < root.key:\n if root.left is None:\n print(\"Key not found\")\n return False\n root = root.left\n\n elif root.key == key:\n found = True\n\n if forDel:\n return root\n return root.content", "def get(self, key):\r\n index = self.hash(key)\r\n l = self.bucket[index]\r\n while l.next:\r\n if l.next.key == key:\r\n return l.next.val\r\n l = l.next\r\n return -1", "def __getitem__(self, key):\n return self._root.__getitem__(key)", "def get(self, key):\n hi = self.hash_index(key)\n if (self.storage[hi]):\n if(self.storage[hi].next):\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n\n return None", "def get_node(self, key, print_path=False):\n parent_node, search_node = self.__compare(key, method='search', print_path=print_path)\n\n return search_node", "def get(self, key: int) -> int:\n hashKey = key % 1000\n if self.bucket[hashKey]:\n node = self.bucket[hashKey]\n while node:\n if node.pair[0] == key:\n return node.pair[1]\n node = node.next\n return -1", "def get(self, key):\n # O(1) in best case and O(n) in worst case Time Complexity\n # O(1) in best case and O(n) in worst case Space Complexity\n\n currentNode = self.getElement(key)\n if (currentNode.next == None):\n return -1\n else:\n return currentNode.next.v", "def get(self, key: int) -> int:\n pos = key % self.space\n head = self.hash_table[pos]\n curr = head\n\n while curr.next:\n if curr.next.key == key:\n return curr.next.val\n curr = curr.next\n\n return -1", "def __getitem__(self, item: int) -> int:\n return self.root[item].key", "def get(self, key):\n ha = self.myhash(key)\n if key not in self.hashmap[ha][0]:\n return -1\n else:\n return self.hashmap[ha][1][self.hashmap[ha][0].index(key)]", "def find(self, key):\n if self.head is None:\n return\n itr = self.head\n while itr:\n if itr.data == key:\n return itr.data\n itr = itr.next\n return None", "def get(self, key):\n node = self._get_node(key)\n\n if node:\n return node.data", "def __getitem__(self, k):\n if self.is_empty():\n raise KeyError('key Error:' + repr(k))\n else:\n p = self._subtree_search(self.root(), k)\n self._rebalance_access(p)\n #this might be an unsuccessful search, so deal with this...\n if k!=p.key():\n raise KeyError('key error:'+repr(k))\n return p.value()", "def find_leaf(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n \n return cur_node", "def find_first_node(root_node, key):\n try:\n return next(find_nodes(root_node, key))\n except StopIteration:\n return None", "def search(self, key):\n return self.find_iterative(self.root,key)", "def __getitem__(self, key):\n result = self.tree[key]\n if result is not None:\n \"\"\"This needs to be deep-copied in order not to change the elements in the map via the reference, but\n return the value as in SetlX.\n The index 2 from key implies stands for the value as key-value-pairs are represented as lists of length 2\"\"\"\n return copy.deepcopy(result.key[2])", "def get(self, key):\n # Your code here \n index = self.hash_index(key) \n cur = self.data[index].head \n\n if cur==None:\n print(\"linked list is empty\")\n elif cur.key== key:\n return cur.value\n else:\n while cur.next:\n cur= cur.next\n if cur.key ==key: \n return cur.value", "def predecessor(self, key: int) -> TreeNode:\n tree_node = self.search(key, possible_parent=True)\n if tree_node:\n if tree_node.left and tree_node.val >= key:\n left_subtree = tree_node.left\n while left_subtree.right:\n left_subtree = left_subtree.right\n return left_subtree\n else:\n while tree_node:\n if tree_node.val < key:\n return tree_node\n tree_node = tree_node.parent\n return", "def binary_search_tree():\n\n class Node(object):\n def __init__(self, key):\n self.left = None\n self.right = None\n self.key = key\n\n def insert(node, key):\n \"\"\" Insertion method for a binary search tree \"\"\"\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node\n\n \"\"\" Let us create the following BST \n 50 \n / \\ \n 30 70 \n / \\ / \\ \n 20 40 60 80\n \"\"\"\n\n root = None\n root = insert(root, 50)\n root = insert(root, 30)\n root = insert(root, 20)\n root = insert(root, 40)\n root = insert(root, 70)\n root = insert(root, 60)\n root = insert(root, 80)", "def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)", "def _insert(self, key: int) -> TreeNode:\n node = self.root\n while True:\n # Check if a key is greater than node.\n if key > node.val:\n if not node.right:\n # node.right is a leaf\n node.right = TreeNode(val=key)\n node.right.parent = node\n return node\n node = node.right\n elif key < node.val:\n if not node.left:\n # node.left is a leaf\n node.left = TreeNode(val=key)\n node.left.parent = node\n return node\n node = node.left\n else:\n # print(f\"{key}: already in a Tree.\")\n return", "def GetRootKey(self):", "def search(self, key):\r\n left = 0 \r\n right = self.num_keys()\r\n while right > left:\r\n mid = (left + right)//2\r\n if self.keys[mid] >= key:\r\n right = mid\r\n else:\r\n left = mid + 1\r\n return left", "def get(self, key):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n return v\n return -1", "def __getitem__(self, key):\n\n if type(key) != self.type:\n raise TypeError\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n print(\"FIRST_CHAR\", first_char)\n print(\"self.children\", self.children)\n raise KeyError\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n raise KeyError\n\n return node.value\n else:\n return self.children[first_char][others]", "def minKeyTree(root):\n try:\n min = None\n if (root is not None):\n if (root['left'] is None):\n min = root\n else:\n min = minKeyTree(root['left'])\n return min\n except Exception as exp:\n error.reraise(exp, 'BST:minKeyNode')", "def _subtree_search(self, p, k):\n if k == p.key():\n return p\n elif k < p.key():\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else:\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n #unsuccesful search and return the last position searched\n return p", "def get(self, key):\n index = key % self.size\n\n cur = self.bucket[index]\n while cur:\n if cur.key == key:\n return cur.val\n cur = cur.next\n return -1", "def __search(node, value):\n if node:\n if node.key == value:\n return True\n else:\n if value < node.key:\n return BST.__search(node._left, value)\n else:\n return BST.__search(node._right, value)\n else:\n return False", "def get(self, key):\n dkey = digest(key)\n _log.debug(\"Server:get %s\" % base64.b64encode(dkey))\n # if this node has it, return it\n exists, value = self.storage.get(dkey)\n if exists:\n return defer.succeed(value)\n node = Node(dkey)\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to get key %s\" % key)\n return defer.succeed(None)\n spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find()", "def get(self, key):", "def get(self, key):", "def get(self, key: int) -> int:\n index = self.hash(key)\n curr = self.map[index]\n\n # Search through list\n while curr:\n # If the value in list matches key, return value\n if curr.val[0] == key: return curr.val[1]\n curr = curr.next\n\n # If it doesn't exist, return -1\n return -1", "def get(self, key):\r\n for i in range(len(self.lis)):\r\n if self.lis[i][0] == key:\r\n return self.lis[i][1]\r\n \r\n return -1", "def findNode(key,nodesList):\n for node in nodesList:\n if node[\"key\"] == key:\n return node\n print(\"Error:: Could not find node with given key\")", "def value_from_data_key(node, key):\n if key == 'tags_inher':\n return node.tags\n elif key == 'children_heading':\n return [c.heading for c in node.children]\n elif key in ('parent_heading',\n 'previous_same_level_heading',\n 'next_same_level_heading',\n ):\n othernode = getattr(node, key.rsplit('_', 1)[0])\n if othernode and not othernode.is_root():\n return othernode.heading\n else:\n return\n else:\n return getattr(node, key)", "def get(self, key, default=None):\n if self.root is not None:\n res = self._get(key, self.root)\n if res:\n return res\n else:\n return default\n return default", "def __getitem__(self, key: Union[int, str]) -> Node:\r\n node: Node = None\r\n if isinstance(key, int):\r\n node = self._nodes.get(key)\r\n if isinstance(key, str):\r\n node = self._node_name_map.get(key)\r\n\r\n if node is None:\r\n raise IndexError(\"Invalid key.\")\r\n\r\n return node", "def get(self, key: int) -> int:\n idx = key % self.size\n curr = self.hashmap[idx]\n while curr:\n if curr.key == key:\n return curr.value\n else:\n curr = curr.next\n return -1", "def get(self, key):\n node = self.head\n value = None\n exists = False\n while node: # Loop through nodes, looking for key\n if node.key == key:\n exists = True\n break\n\n if exists:\n if node is self.head:\n value = node.value\n else:\n self.delete(node)\n\n new_node = CacheNode(key, value)\n self.length += 1\n\n return value", "def get(self, key: int) -> int:\n sh = key % 37\n if self.map[sh] == None:\n return -1\n for i in range(len(self.map[sh])):\n kv = self.map[sh][i]\n if kv[0] == key:\n return kv[1]\n return -1", "def get(self, key: int) -> int:\n idx = key % 1000\n if not self.map[idx]:\n return -1\n else:\n curr = self.map[idx]\n while curr:\n if curr.key == key:\n return curr.val\n curr = curr.next\n return -1", "def get_min(self):\n if self.root is None: # BC1\n return float('+inf')\n\n current = self.root\n while current.left is not None: # Traverse like a linked-list\n current = current.left\n\n return current.key", "def get(self, key):\n index = int((keyIndex(key) & (self.BUCKET_SIZE - 1)))\n inner = self.keys[index]\n if inner == None:\n return None\n i = 0\n while len(inner):\n innerKey = inner[i]\n if innerKey == self.EMPTY_KEY:\n return None\n elif innerKey == key:\n return self.values[index][i]\n i += 1\n return None", "def get(self, key: int) -> int:\n index = key % 10000\n head = self.array[index]\n while head.next:\n head = head.next\n if head.key == key:\n return head.value\n break\n return -1", "def get(self, key):\n dkey = digest(key)\n # if this node has it, return it\n if self.storage.get(dkey) is not None:\n return defer.succeed(self.storage.get(dkey))\n node = Node(dkey)\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to get key %s\" % key)\n return defer.succeed(None)\n spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find()", "def contains(self, key: str) -> SLNode:\n cur = self.head\n while cur is not None:\n if cur.key == key:\n return cur\n cur = cur.next\n return cur", "def contains(self, key: str) -> SLNode:\n cur = self.head\n while cur is not None:\n if cur.key == key:\n return cur\n cur = cur.next\n return cur", "def get_key(self, proxy_index):\n return self.treeItem(proxy_index)", "def get_index(self, key):\r\n if self.hash_table[self.horner_hash(key)] is None:\r\n return None\r\n if self.hash_table[self.horner_hash(key)].key is key:\r\n return self.horner_hash(key)", "def get(self, key):\n hash_key = self._hash_function(key) % self.capacity # returns hashed keys corresponding bucket index\n bucket = self._buckets[hash_key] # get bucket for that index\n\n current = bucket.head # set bucket.head to variable as not to override linked list\n\n while current is not None: # iterate through linked list until value is found, or returns None\n if current.key == key:\n return current.value\n current = current.next", "def get(self, key):\n hash_key = key % self.key_space\n return self.hash_table[hash_key].get(key)", "def __getitem__(self, item):\n if self.child_keys is None:\n self.child_keys = sorted(self.children.keys(), key=str.lower)\n return self.children[self.child_keys[item]]", "def get(self, key: int) -> int:\n hashvalue = key%1000\n if self.hashset[hashvalue]==None:\n return -1\n head = self.hashset[hashvalue]\n \n while head:\n k,v = head.data \n if k==key:\n return v\n \n head = head.next\n return -1", "def _subtree_search(self, p, k):\n if k == p.key(): # found match\n return p\n elif k < p.key(): # search left subtree\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else: # search right subtree\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n return p # unsucessful search", "def find(self, key):\n curr = self.head\n while curr and curr.data != key:\n curr = curr.next\n return curr # Will be None if not found", "def get(self, key):\n\t\treturn self.__get(key, key[1:])", "def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False" ]
[ "0.75607723", "0.7343995", "0.7311506", "0.7305017", "0.7293168", "0.7194743", "0.7154004", "0.7130973", "0.71049505", "0.7073313", "0.704876", "0.70440394", "0.70387834", "0.7038038", "0.70353025", "0.70056677", "0.69532704", "0.6952328", "0.6924428", "0.69083416", "0.6879406", "0.68121743", "0.6801416", "0.6781831", "0.6781831", "0.6744702", "0.6730232", "0.6724606", "0.6707262", "0.6699226", "0.6699062", "0.66897124", "0.6686386", "0.66771686", "0.66745615", "0.66650635", "0.66599673", "0.6654871", "0.6647237", "0.66354275", "0.66344386", "0.66007876", "0.6589911", "0.6568055", "0.65612197", "0.6540177", "0.6526642", "0.64959985", "0.6484248", "0.6482545", "0.6433901", "0.6417933", "0.6406831", "0.6389141", "0.63537544", "0.6333527", "0.6322614", "0.631705", "0.6314052", "0.62835157", "0.6282465", "0.6279439", "0.62789094", "0.62785345", "0.6273688", "0.6248629", "0.6246662", "0.62417674", "0.62321985", "0.6223843", "0.62219316", "0.6221827", "0.62133884", "0.62133884", "0.6200174", "0.6181461", "0.6181002", "0.61714303", "0.61711985", "0.61704576", "0.6165103", "0.6155171", "0.6146233", "0.6144468", "0.61379683", "0.6125719", "0.61244303", "0.61233056", "0.61001956", "0.61001956", "0.6099273", "0.6097101", "0.6086575", "0.6083975", "0.60727745", "0.607055", "0.60563755", "0.60469973", "0.60410964", "0.60310113" ]
0.7251455
5
Recusrisvely searched the BST using log_2(n) algorithm to find the key is there is
def recursiveLookup(self, key, curr): # basically repeat insert debug.printMsg("Entered recursiveLookup") # if we found a match break debug.printMsg('Checking base condition: ' + key + ' = ' + curr.key) if key == curr.key: debug.printMsg("Success, found") return (curr, None) # if the key is larger than curr elif key > curr.key: debug.printMsg("Nope, now checking if we should go right") debug.printMsg("yep") debug.printMsg("Check if we still have room to search") if curr.hasRightChild(): debug.printMsg("Moving further right") # move onto the next node along the search path return self.recursiveLookup(key, curr.right) else: debug.printMsg("Nope, ran out of search path. bummer") # hit the end and there was no match return (None, True) else: debug.printMsg("Nope, we're going left") debug.printMsg("Check if we still have room to search") if curr.hasLeftChild(): debug.printMsg("Moving further left") return self.recursiveLookup(key, curr.left) else: debug.printMsg("Shit balls, we ran out of search path") return (None, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_search_tree():\n\n class Node(object):\n def __init__(self, key):\n self.left = None\n self.right = None\n self.key = key\n\n def insert(node, key):\n \"\"\" Insertion method for a binary search tree \"\"\"\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node\n\n \"\"\" Let us create the following BST \n 50 \n / \\ \n 30 70 \n / \\ / \\ \n 20 40 60 80\n \"\"\"\n\n root = None\n root = insert(root, 50)\n root = insert(root, 30)\n root = insert(root, 20)\n root = insert(root, 40)\n root = insert(root, 70)\n root = insert(root, 60)\n root = insert(root, 80)", "def search(self, key):\n x = self.root\n\n while x is not self.nil:\n if key == x.key:\n break\n\n if key < x.key:\n x = x.left\n else:\n x = x.right\n return x", "def search(root, key):\n if root is None:\n return None\n else:\n if root.key == key:\n return root.value\n elif root.right is None and root.left is None:\n return None\n elif key >= root.key:\n return search(root.right, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right\n elif key < root.key:\n return search(root.left, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right", "def find_iterative(self, node, key):\n current_node = node\n while current_node:\n if key == current_node.key:\n return current_node\n if key < current_node.key:\n current_node = current_node.left\n else:\n current_node = current_node.right\n return None", "def search(self, key):\n if self.key == key:\n if self.val is not None:\n return self.val\n else:\n return self.key\n\n \"\"\"If the key of the node is smaller than the root node's key, traverse the left subtree\"\"\"\n if self.key < key:\n self.left.search(key)\n\n \"\"\"If the key of the node is greater than the root node's key, traverse the right subtree \"\"\"\n if self.key > key:\n self.right.search(key)\n\n \"\"\"If tree is empty, return None\"\"\"\n return None", "def find_recursive(self,node,key):\n if None == node or key == node.key:\n return node\n elif key < node.key:\n return self.find_recursive(node.left,key)\n else:\n return self.find_recursive(node.right,key)", "def search(self, key: int, possible_parent=False) -> TreeNode:\n node = prev_node = self.root\n while node:\n if key > node.val:\n prev_node = node\n node = node.right\n elif key == node.val:\n return node\n else:\n prev_node = node\n node = node.left\n if possible_parent:\n return prev_node\n return None", "def binarySearch(nums, key): # find the most closer one but smaller then the key (better one)\n begin = 0\n end = len(nums)-1\n while end - begin > 1:\n mid = begin + (end - begin >> 1)\n if key <= nums[mid]:\n end = mid\n else:\n begin = mid\n if key > nums[end]:\n return end\n if nums[begin]< key <= nums [end]:\n return begin\n return -1", "def search(self, key):\r\n left = 0 \r\n right = self.num_keys()\r\n while right > left:\r\n mid = (left + right)//2\r\n if self.keys[mid] >= key:\r\n right = mid\r\n else:\r\n left = mid + 1\r\n return left", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def _subtree_search(self, p, k):\n if k == p.key():\n return p\n elif k < p.key():\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else:\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n #unsuccesful search and return the last position searched\n return p", "def _get(self, root: TreeNode, key: TreeNode) -> TreeNode:\n # Always do the edge-case check, which could raise an error, FIRST!!\n if root is None: # BC2 - not found\n return None\n # BST-order traverse: examine root first, then recur left or recur right depending on key comparison\n if root.key == key: # BC1 - found\n return root\n\n result_left_subtree = None\n result_right_subtree = None\n\n if key < root.key:\n result_left_subtree = self._get(root.left, key)\n elif key > root.key:\n result_right_subtree = self._get(root.right, key)\n\n if result_left_subtree is not None:\n return result_left_subtree\n elif result_right_subtree is not None:\n return result_right_subtree\n else:\n return None", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Queue\n q = Queue()\n mapper = {} #child_point : (parent_point, direction_to_child)\n q.push(problem.getStartState())\n mapper[problem.getStartState()] = None #root\n\n while (not q.isEmpty()):\n point = q.pop()\n\n if (problem.isGoalState(point)):\n c = point\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n for child in problem.getSuccessors(point):\n if (child[0] not in mapper):\n q.push(child[0])\n mapper[child[0]] = (point, child[1])\n\n # util.raiseNotDefined()", "def search(self, key):\n if self.key == key:\n return self\n if self.key > key:\n return None if self.left is None else self.left.search(key)\n return None if self.right is None else self.right.search(key)", "def binary_search(seq, key): # find the most closer one to the key but smaller than the key\n# begin/end /not exist\n\n\n begin = 0\n end = len(seq)-1\n if key > seq[end]:\n return end\n while begin <= end:\n mid1 = (begin + end)//2\n mid2 = (begin + end)//2 + 1\n tmp1 = seq[mid1]\n tmp2 = seq[mid2]\n if key > tmp1 and key > tmp2:\n begin = mid1\n elif key < tmp1:\n end = mid1\n elif key > tmp1 and key < tmp2:\n return mid1\n elif key == tmp1:\n end = mid1 - 1\n return -1", "def __search(node, value):\n if node:\n if node.key == value:\n return True\n else:\n if value < node.key:\n return BST.__search(node._left, value)\n else:\n return BST.__search(node._right, value)\n else:\n return False", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n \n \n qu.push(node)\n lis.append(node)\n \n print qu.list\n while qu.isEmpty()!=True:\n node=qu.pop()\n pos=node['state']\n visited.append(pos)\n print visited\n if problem.isGoalState(pos):\n print \"found\"\n return getPath(problem,node)\n #break\n suc=problem.getSuccessors(pos)\n if suc ==None:\n continue \n \n print suc\n for step in suc:\n #if step not in dic :\n if step[0] not in visited:\n childnode={}\n childnode['parent']=pos\n childnode['direction']=step[1]\n childnode['state']=step[0]\n qu.push(childnode)\n lis.append(childnode)\n \n\n #util.raiseNotDefined()", "def search(self, key):\r\n (node, index) = self.root, self.root.search(key)\r\n while not node.contains_key_at(key, index) and not node.is_leaf():\r\n node = node.children[index]\r\n index = node.search(key)\r\n\r\n return (node, index) if node.contains_key_at(key, index) else None", "def find(self, key) -> Union[\"Node\", None]:\n current = self\n while key != current.key:\n if key < current.key:\n current = current.left # traverse left\n\n elif key > current.key:\n current = current.right # traverse right\n\n if current is None: # failure\n break\n return current", "def search ( root, key ):\n\n print \"Searching i1 for equality (%s,%s,%s,%s)\"\\\n %(key[0], key[1], key[2], key[3])\n cur_node = root\n nodes_visited = 0\n global leaf_visited \n leaf_visited = 0\n while not cur_node.isleaf():\n cur_node = cur_node.search_key_full_eq( key )\n nodes_visited += 1\n found = cur_node.search_key_full_eq( key )\n print \" visited %d leaf nodes\\n\" %leaf_visited \n nodes_visited = nodes_visited + leaf_visited\n print \"Total nodes visited:\", nodes_visited\n pages = set()\n if len(found) == 0:\n print \"Record not found.\"\n else:\n print \"%d Records found.\\n\"%len(found),\n for (key, pageid) in found:\n pages.add(pageid)\n print \"%s pageid:%s \\n\" %(key, pageid), \n print\n print \"Total disk pages:\", len(pages)\n print \"\"", "def binary_search(lst, key):\n lst.sort()\n low = 0\n high = len(lst) - 1\n while low <= high:\n mid = low + (high - low) //2\n if lst[mid] == key:\n return mid\n if lst[mid] < key:\n low = mid + 1\n else:\n hihg = mid - 1\n return -low - 1", "def _insert(self, key: int) -> TreeNode:\n node = self.root\n while True:\n # Check if a key is greater than node.\n if key > node.val:\n if not node.right:\n # node.right is a leaf\n node.right = TreeNode(val=key)\n node.right.parent = node\n return node\n node = node.right\n elif key < node.val:\n if not node.left:\n # node.left is a leaf\n node.left = TreeNode(val=key)\n node.left.parent = node\n return node\n node = node.left\n else:\n # print(f\"{key}: already in a Tree.\")\n return", "def breadth_first_search(self, target: Dict) -> Optional[Node]:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node: Node = assist_queue.popleft()\n flag = True\n for k, v in target.items():\n flag = flag and getattr(current_node, k) == v\n if not flag:\n break\n if flag:\n return current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)\n return None", "def find_leaf(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n \n return cur_node", "def find(self, key, forDel=False):\n found = False\n root = self.root\n while not found:\n if key > root.key:\n if root.right is None:\n print(\"Key not found\")\n return False\n root = root.right\n elif key < root.key:\n if root.left is None:\n print(\"Key not found\")\n return False\n root = root.left\n\n elif root.key == key:\n found = True\n\n if forDel:\n return root\n return root.content", "def breadth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while (True):\r\n state = fringe[0]\r\n del fringe[0]\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def _subtree_search(self, p, k):\n if k == p.key(): # found match\n return p\n elif k < p.key(): # search left subtree\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else: # search right subtree\n if self.right(p) is not None:\n return self._subtree_search(self.right(p), k)\n return p # unsucessful search", "def binary_search(L, key):\r\n mid = len(L) // 2\r\n if len(L) == 2:\r\n return L[0] == key or L[1] == key\r\n if L[mid] == key:\r\n return True\r\n elif L[mid] > key:\r\n return binary_search(L[:mid], key)\r\n else:\r\n return binary_search(L[mid:], key)", "def find(self, k):\n if k == self.key:\n return self\n elif k < self.key:\n if self.left is None:\n return None\n else:\n return self.left.find(k)\n else:\n if self.right is None: \n return None\n else:\n return self.right.find(k)", "def binary_search(l, value):\n \n e = len(l) - 1\n b = 0\n while e-b > 1:\n mid = (b+e)// 2\n key = l[mid]\n if value < key:\n e = mid - 1\n elif value > key:\n b = mid + 1\n else: #found value :)\n return mid\n if l[b] == value:\n return b\n elif l[e] == value:\n return e\n return -1", "def __search_tree(word, index=0, node=None):\n if index + 1 > len(word):\n return node\n\n current_key = word[index]\n\n child_node = _Node.__find_key_in_level(node, current_key)\n\n if not child_node:\n return False\n\n return _Node.__search_tree(word, index + 1, child_node)", "def successor(self, key: int) -> TreeNode:\n tree_node = self.search(key, possible_parent=True)\n if tree_node:\n if tree_node.right and tree_node.val <= key:\n right_subtree = tree_node.right\n while right_subtree.left:\n right_subtree = right_subtree.left\n return right_subtree\n else:\n while tree_node:\n if tree_node.val > key:\n return tree_node\n tree_node = tree_node.parent\n return", "def _binary_search(mylist, key, left, right):\n\t### TODO\n\tif left <= right:\n\n\t\tmidPoint = (left + right) // 2\n\t\tif mylist[midPoint] == key:\n\t\t\treturn midPoint\n\n\t\telif mylist[midPoint] > key:\n\t\t\treturn _binary_search(mylist, key, left, midPoint - 1)\n\n\t\telif mylist[midPoint] < key:\n\t\t\treturn _binary_search(mylist, key, midPoint + 1, right)\n\n\treturn -1", "def bfs(self, root: TreeNode) -> int:\n if not root:\n return 0\n queue = deque([(root, 1)])\n while queue:\n node, level = queue.popleft()\n if not node.left and not node.right:\n return level\n if node.left:\n queue.append((node.left, level + 1))\n if node.right:\n queue.append((node.right, level + 1))\n return -1", "def breadth_first_search(root_node):\n if root_node.goal_test():\n return root_node\n\n frontier = [root_node]\n explored = []\n\n while frontier:\n node = frontier.pop(0)\n explored.append(node)\n\n for successor in node.generate_successors():\n if not successor:\n continue\n if not (successor.is_in(frontier) and successor.is_in(explored)):\n if successor.goal_test():\n return successor\n frontier.append(successor)\n return None # No Solution", "def lookup(self, key):\n # check that this tree actually has a root node\n debug.printMsg(\"Call made to Lookup\")\n debug.printMsg(\"checking if we have a BST\")\n if self.root:\n debug.printMsg(\"Calling Recursive Lookup\")\n (result, err) = self.recursiveLookup(key, self.root)\n # if we did not find anything\n if err: \n debug.printMsg(\"Oops, we couldn't find anything\")\n return None\n else: \n # we found a result\n debug.printMsg(\"we found: \")\n return result\n else:\n debug.printMsg(\"Oops, the BST seems to not exist\")\n # root doesnt exist\n return None", "def search(state, goal_state):\n\n def gn(node):\n return node.gn()\n\n tiles_places = []\n for i in range(len(goal_state)):\n for j in range(len(goal_state)):\n heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))\n\n def hn(node):\n cost = 0\n for i in range(len(node.state)):\n for j in range(len(node.state)):\n tile_i, tile_j = tiles_places[node.state[i][j]][1]\n if i != tile_i or j != tile_j:\n cost += abs(tile_i - i) + abs(tile_j - j)\n return cost\n\n def fn(node):\n return gn(node) + hn(node)\n\n return bfs.search(state, goal_state, fn)", "def breadthFirstSearch(problem):\n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in tempSuccList:\n repeat = False\n for s in successor:\n if (s[0] == succ[0]):\n repeat = True\n if (repeat == False):\n successor.append(succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def _recursiveSearch(self, data, node):\n\t\tif (not node):\n\t\t\treturn None\n\t\tif (node.value() == data):\n\t\t\treturn node\n\t\telif (data < node.value()):\n\t\t\treturn self._recursiveSearch(data, node.lchild())\n\t\telse:\n\t\t\treturn self._recursiveSearch(data, node.rchild())", "def bsearch_left(nums: List[int], target: int) -> int:\n low, high = 0, len(nums) - 1\n while low <= high:\n mid = low + (high - low) // 2\n if nums[mid] < target:\n low = mid + 1\n else:\n high = mid - 1\n if low < len(nums) and nums[low] == target:\n return low\n else:\n return -1", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()", "def binary_search(node, value):\n aux_node = None\n while node is not None and node.value != value:\n if value < node.value:\n aux_node = node.left\n node = aux_node\n else:\n aux_node = node.right\n node = aux_node\n return node if node.value == value else None", "def search_key_full_eq(self, key):\n for i in xrange(len(self.keys)):\n flag = 0\n for indx in range(4):\n if cmp(self.keys[i][indx],key[indx]) == 0:\n flag = 0\n continue\n if cmp(key[indx],\"*\") == 0:\n print \" visited internal! ==>\", self.keys[i]\n return self.pointers[i]\n elif self.keys[i][indx] > key[indx]:\n flag = 1\n else:\n flag = 2\n break \n # print \"searching %s:%s:%d\" %(str(self.keys[i]),str(key),flag)\n if flag == 1:\n if i > 0:\n print \" visited internal ==>\", self.keys[i] \n return self.pointers[i]\n else:\n print \" visited internal ==>\", self.keys[0] \n return self.pointers[0]\n elif flag == 0:\n print \" visited internals ==>\", self.keys[i]\n return self.pointers[i]\n print \" visited internalsed ==>\", self.keys[-1] \n return self.pointers[-1]", "def binarySearch(A, k):\n \n #TODO: Implement without using python's in-built functiondef binary(A, k):\n def bSearch(A, k, low, high):\n if high == low:\n if A[low] == k:\n return low\n else:\n return -1\n mid = (low + high)//2\n if A[mid] == k:\n return mid\n elif A[mid] > k:\n if low == mid:\n return -1\n else:\n return bSearch(A, k, low, mid-1)\n else:\n return bSearch(A, k, mid+1, high)\n if isinstance(A, list) == False or isinstance(k, int) == False:\n return -1\n else:\n if len(A) == 0:\n return -1\n else:\n x = bSearch(A, k, 0, len(A)-1)\n return x", "def search(self, val):\n currentNode = self.rootNode\n while True:\n if currentNode is None:\n print(\"Number not found.\")\n return None\n elif currentNode.val == val:\n print(\"Number found.\")\n return currentNode\n elif currentNode.val < val:\n currentNode = currentNode.right\n else:\n currentNode = currentNode.left", "def find_BFS(self, value):\n to_visit = [self]\n while to_visit:\n curr = to_visit.pop(0) # BFS -> .pop(0) -> queue \n if curr.value == value:\n return curr\n to_visit.extend(curr.children)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()", "def keySetTree(root, klist):\n try:\n if (root is not None):\n keySetTree(root['left'], klist)\n lt.addLast(klist, root['key'])\n keySetTree(root['right'], klist)\n return klist\n except Exception as exp:\n error.reraise(exp, 'BST:keySetTree')", "def test_search_finds_node(bst_balanced):\n assert bst_balanced.search(1).val == 1", "def find(self, key):\n if self.key == key:\n return self.item\n elif key > self.key:\n if self.right:\n return self.right.find(key)\n else:\n if self.left:\n return self.left.find(key)\n # Replace by correct code", "def Trees__CheckBST():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:https://www.hackerrank.com/challenges/ctci-is-binary-search-tree/problem\n # O(n) solution. Passes all test cases.\n # Tricky part with leaf in left side of root being bigger than root. (or right/smaller)\n # E.g:\n # 3\n # 2 6\n # 1 4 5 7\n # Note, the 4 is bigger than the parent 3. But in a proper BST parent\n # must be bigger than all items on parent's left side.\n # Keep track of last biggest element as we descend to children (& last smallest element)\n # val < last biggest (last_left) #for cases like 4\n # val > last smallest (last_right) # for mirror side.\n # Convieniently, this also ensures uniqueness.\n def checkBST(root):\n queue = []\n queue.append((root, None, None)) # node, last_left, last_right.\n while queue:\n node, last_left, last_right = queue.pop()\n if not node:\n continue\n if last_left and not node.data < last_left \\\n or last_right and not node.data > last_right:\n return False\n queue.append((node.left, node.data, last_right))\n queue.append((node.right, last_left, node.data))\n return True", "def recursive_search(i, F, t, s, explored, leaders, order):\n x = len(explored)\n if x % 10 == 0:\n print(\"Length of explored: {}\".format(x))\n explored.append(i)\n if order == 2:\n leaders[i] = s\n arc_list = db.Database.find_one(collection=\"biggraph\", query={\"key\": i})\n if arc_list:\n for node in arc_list['value']:\n if node not in explored:\n F, t, leaders, explored = recursive_search(node, F, t, s, explored, leaders, order)\n if order == 1:\n t += 1\n F[i] = t\n return F, t, leaders, explored", "def tree_find(T, x):\n if T.is_empty:\n return False\n if x == T.label:\n return True\n if x < T.label:\n return tree_find(T.left, x)\n else:\n return tree_find(T.right, x)", "def bidirectional_search(self):\n begin = time.time()\n\n initial_node = self.get_node(self.initial_state)\n\n final_node = self.get_node(self.final_state)\n\n queue = [initial_node, final_node]\n\n initial_node.visited_right = True\n \n final_node.visited_left = True\n\n visited_nodes = []\n \n while queue:\n node = queue.pop(0)\n\n if self.is_intersecting(node):\n end = time.time()\n\n method_time = end - begin\n\n copy_node = node\n\n path = []\n\n while node:\n path.append(node)\n\n node = node.parent_right\n\n path.reverse()\n\n del path[-1]\n\n while copy_node:\n path.append(copy_node)\n\n copy_node = copy_node.parent_left\n\n self.add_result('Busca bidirecional', method_time, path, visited_nodes)\n \n return True\n else:\n states = node.neighboring_states()\n\n neighbors = [self.add_node(state) for state in states]\n\n for neighbor in neighbors:\n if node.visited_left and not neighbor.visited_left:\n neighbor.parent_left = node\n\n neighbor.visited_left = True\n\n queue.append(neighbor)\n \n visited_nodes.append(neighbor)\n\n if node.visited_right and not neighbor.visited_right:\n neighbor.parent_right = node\n\n neighbor.visited_right = True\n\n queue.append(neighbor)\n\n visited_nodes.append(neighbor)\n \n end = time.time()\n\n method_time = end - begin\n\n self.add_result('Busca bidirecional', method_time, [], visited_nodes)\n\n return False", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Queue()\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init)\n closed = []\n while not open.isEmpty():\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currCost = currNode[2]\n\n if problem.isGoalState(currState):\n return currPath[1:]\n else:\n if currState not in closed:\n closed.append(currState)\n successors = problem.getSuccessors(currState)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath + [each[1]], currCost + each[2])\n open.push(temp)\n return False", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def test_search_returns_none_when_value_notin_tree_left(bst_all_to_left):\n assert bst_all_to_left.search(0) is None", "def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1", "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITHM FOR bFS \n Create a queue Q\n enqueue root node to Q\n while Q is not empty:\n dequeu an item v from Q\n mark the item v as visited \n for each node w that is directed from v:\n enqueue w to Q\n \n \n \"\"\"\n\n fringes = util.Queue()\n explored =[]\n fringes.push((problem.getStartState(),[]))\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n goal = currentNode\n pathToGoal = currDir\n #print \"final path is : \", pathToGoal\n\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.append(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n fringes.push((childNode[0],currDir+[childNode[1]]))\n\n\n return pathToGoal", "def search(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n current_node = self._root\n while current_node:\n if val == current_node._data:\n return current_node\n if val > current_node._data:\n current_node = current_node._rkid\n else:\n current_node = current_node._lkid\n return", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (queue y set)\n openNodes = util.Queue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Metemos el nodo en la cola\n openNodes.push(node)\n\n #Iteramos para cada nodo de la pila\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #Sacamos el nodo de arriba de la pila\n node = openNodes.pop()\n if problem.isGoalState(node.name):\n break\n else: #Expandimos los nodos sucesores del nodo n si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n #Metemos al sucesor en la cola\n openNodes.push(succNode)\n #Metemos el nodo n en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def breadth_first_search(problem):\n fringe = util.Queue()\n return general_search(problem, fringe)", "def test_insert_WithDuplicates(self):\n\n self.bst.insert(10,1)\n self.bst.insert(10,2)\n \n self.bst.insert(5,2)\n \n self.bst.insert(20,3)\n self.bst.insert(20,4)\n \n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.bst.insert(5,123)\n self.bst.insert(14,456)\n\n self.assertEqual(self.bst.root.key, 10)\n self.assertEqual(self.bst.root.value, [1,2])\n\n # left subtree\n self.assertEqual(self.bst.root.left.key, 5)\n self.assertEqual(self.bst.root.left.value, [2,123])\n\n self.assertEqual(self.bst.root.left.left.key, 3)\n self.assertEqual(self.bst.root.left.left.value, [4])\n\n self.assertEqual(self.bst.root.left.right.key, 7)\n self.assertEqual(self.bst.root.left.right.value, [5])\n\n # right subtree\n self.assertEqual(self.bst.root.right.key, 20)\n self.assertEqual(self.bst.root.right.value, [3,4])\n\n self.assertEqual(self.bst.root.right.left.key, 15)\n self.assertEqual(self.bst.root.right.left.value, [6])\n\n self.assertEqual(self.bst.root.right.left.left.key, 14)\n self.assertEqual(self.bst.root.right.left.left.value, [7,456])\n\n self.assertEqual(self.bst.root.right.right.key, 25)\n self.assertEqual(self.bst.root.right.right.value, [8])", "def __find_node(self, element) -> _AVLTreeNode or None:\n\n curr_node = self.__root\n while curr_node is not None:\n\n if self.__key(element) < self.__key(curr_node.data):\n curr_node = curr_node.left\n elif self.__key(curr_node.data) < self.__key(element):\n curr_node = curr_node.right\n else:\n return curr_node\n\n return None", "def _bin_search_recursive(self, v, start, end):\n if end < start:\n return start\n\n mid = (start + end) / 2\n if self.values[mid] == v:\n return mid\n elif self.values[mid] < v:\n return self._bin_search_recursive(v, mid + 1, end)\n else:\n return self._bin_search_recursive(v, start, mid - 1)", "def search(self, word):\n if not word:\n return False\n discrepancy = 1\n stack = [(child, word, discrepancy)\n for child in self.root.children.values()]\n while stack:\n curr_node, curr_word, curr_disc = stack.pop()\n if len(curr_word) == 1:\n if curr_node.nil:\n if curr_disc == 0 and curr_word == curr_node.key:\n return curr_node.nil\n elif curr_disc == 1 and curr_word != curr_node.key:\n return curr_node.nil\n else:\n for key, child in curr_node.children.items():\n if curr_word[0] == curr_node.key:\n stack.append((child, curr_word[1:], curr_disc))\n elif curr_disc> 0:\n stack.append((child, curr_word[1:], curr_disc - 1))\n return False", "def find(self, node):\n if not node:\n return 0\n\n left = self.find(node.left)\n right = self.find(node.right)\n cur = 1 # node.val\n path = 1\n if left and node.left.val == node.val:\n path += left\n cur = left + 1\n\n if right and node.right.val == node.val:\n path += right\n if right > left:\n cur = right + 1\n\n self.ret = max(self.ret, path - 1)\n return cur", "def depth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while(len(fringe) > 0):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while(True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val: #( x, y, z)\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def bfs(root):\n\tdistances = {root: 0}\n\tqueue = deque(root)\n\n\twhile len(queue) != 0:\n\t\tnode = queue.popleft()\n\t\tvisit(node)\n\t\tfor neighbor in node.neighbors:\n\t\t\tif not distances.get(neighbor):\n\t\t\t\tdistances[neighbor] = distances[node] + 1\n\t\t\t\tqueue.append(neighbor)\n\n\treturn distances", "def in_order_recursive(tree, vertex, keys):\n if vertex == -1:\n return True\n result = in_order_recursive(tree, tree[vertex][1], keys)\n # If the previous result is False, then the check is completed.\n # If the value of the last key in the list is greater than the current one,\n # then it is not bst, return False.\n # If the vertex has a left child and the key values ​​are equal,\n # then it is also not bst, return False.\n if not result or (keys and (keys[-1] > tree[vertex][0])) or (\n tree[vertex][1] != -1 and tree[tree[vertex][1]][0] == tree[vertex][0]\n ):\n return False\n keys.append(tree[vertex][0])\n result = in_order_recursive(tree, tree[vertex][2], keys)\n return result", "def find_nodes_from_here(start_node, key):\n node_ = start_node\n yield from find_nodes(node_, key)\n while node_.parent:\n this_key_ = node_.key\n node_ = node_.parent\n if node_.key == key: # pragma: no branch\n yield node_\n for child_ in node_.children:\n if child_.key == this_key_: # pragma: no branch\n continue\n yield from find_nodes(child_, key)", "def binsearch(alist, key, start, end):\n mid = len(alist) // 2\n if start > end:\n return None\n elif start < end:\n return binsearch(alist, key, start, mid-1)\n else:\n return mid", "def search(self, key): \n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n if not current_node.children[index]: \n return False\n current_node = current_node.children[index] \n \n return current_node != None and current_node.isEndOfWord", "def search(self, key):\n return self.find_iterative(self.root,key)", "def recoverTree1(self, root: Optional[TreeNode]) -> None:\n self.traverseTree(root)\n def find_two_swapped(nums: List[int]) -> (int, int):\n length = len(nums)\n for i in range(length - 1, 0, -1):\n if nums[i] < nums[i - 1]:\n j = i - 1\n while j >=0 and nums[j] > nums[i]:\n j -= 1\n return nums[i], nums[j + 1]\n a, b = find_two_swapped(self.arr)\n\n self.nodeMap[a].val = b\n self.nodeMap[b].val = a", "def testBinarySearchTree():\n\n \"\"\"\n Example After Deletion\n 7\n / \\\n 1 4\n\n \"\"\"\n t = BinarySearchTree()\n t.insert(8)\n t.insert(3)\n t.insert(6)\n t.insert(1)\n t.insert(10)\n t.insert(14)\n t.insert(13)\n t.insert(4)\n t.insert(7)\n\n # Prints all the elements of the list in order traversal\n print(t.__str__())\n\n if t.getNode(6) is not None:\n print(\"The label 6 exists\")\n else:\n print(\"The label 6 doesn't exist\")\n\n if t.getNode(-1) is not None:\n print(\"The label -1 exists\")\n else:\n print(\"The label -1 doesn't exist\")\n\n if not t.empty():\n print((\"Max Value: \", t.getMax().getLabel()))\n print((\"Min Value: \", t.getMin().getLabel()))\n\n t.delete(13)\n t.delete(10)\n t.delete(8)\n t.delete(3)\n t.delete(6)\n t.delete(14)\n\n # Gets all the elements of the tree In pre order\n # And it prints them\n list = t.traversalTree(InPreOrder, t.root)\n for x in list:\n print(x)", "def minimal_tree(array: list):\n bst = BST()\n def build(l, r):\n if l == r: bst.insert(array[l]); return\n m = (l+r)//2\n # insert into the tree\n bst.insert(array[m])\n # build recursively\n build(l, m)\n build(m+1, r)\n build(0, len(array)-1)\n return bst", "def delete(self, key):\n\n # find node\n node = self.root\n while node and node.key != key:\n if key > node.key:\n node = node.right\n elif key < node.key:\n node = node.left\n\n if not node:\n return None\n\n # if has 2 child\n if node.right and node.left:\n # crawl to smallest node of right subtree\n smallest_node = node.right\n while smallest_node and smallest_node.left:\n smallest_node = smallest_node.left\n\n balancing_node = smallest_node.parent\n\n # replace smallest_node with node in tree\n smallest_node.parent.left = None\n smallest_node.parent = node.parent\n if not node.parent:\n pass\n elif node.parent < node:\n node.parent.right = smallest_node\n else:\n node.parent.left = smallest_node\n\n # if has 1 child\n elif node.right or node.left:\n balancing_node = node.parent\n if node.right:\n child = node.right\n else:\n child = node.left\n\n child.parent = node.parent\n if not node.parent:\n self.root = child\n elif node.parent < node:\n node.parent.right = child\n else:\n node.parent.left = child\n\n # no child\n else:\n balancing_node = node.parent\n if not node.parent:\n self.root = None\n else:\n if node.parent < node:\n node.parent.right = None\n else:\n node.parent.left = None\n\n balancing_node and self.rebalance(balancing_node)\n node.left, node.right, node.parent = [None] * 3\n return node", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #the logic is same as for DFS just that i made use of a Queue data structure\n #Here the queue acts as a FIFO queue\n neighbourNodes = util.Queue()\n moves = []\n neighbourNodes.push((problem.getStartState(),moves))\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n poppedNodeState, poppedNodeAction= neighbourNodes.pop()\n if(poppedNodeState in seenNodes):\n continue\n if problem.isGoalState(poppedNodeState):\n return poppedNodeAction\n seenNodes.add(poppedNodeState)\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n if(state in seenNodes):\n continue\n neighbourNodes.push((state, poppedNodeAction+[action]))\n return moves\n #util.raiseNotDefined()", "def test_insert_NoDuplicates(self):\n\n self.bst.insert(10,1)\n self.bst.insert(5,2)\n self.bst.insert(20,3)\n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.assertEqual(self.bst.root.key, 10)\n self.assertEqual(self.bst.root.value, [1])\n\n # left subtree\n self.assertEqual(self.bst.root.left.key, 5)\n self.assertEqual(self.bst.root.left.value, [2])\n\n self.assertEqual(self.bst.root.left.left.key, 3)\n self.assertEqual(self.bst.root.left.left.value, [4])\n\n self.assertEqual(self.bst.root.left.right.key, 7)\n self.assertEqual(self.bst.root.left.right.value, [5])\n\n # right subtree\n self.assertEqual(self.bst.root.right.key, 20)\n self.assertEqual(self.bst.root.right.value, [3])\n\n self.assertEqual(self.bst.root.right.left.key, 15)\n self.assertEqual(self.bst.root.right.left.value, [6])\n\n self.assertEqual(self.bst.root.right.left.left.key, 14)\n self.assertEqual(self.bst.root.right.left.left.value, [7])\n\n self.assertEqual(self.bst.root.right.right.key, 25)\n self.assertEqual(self.bst.root.right.right.value, [8])", "def question4(T,r,n1,n2):\n\n\tif(len(T)<=1):\t\t\t\t\t\t\t\t# Edge case : If the Tree only consists of a root and no children\n\t\treturn -1\n\n\tif(n1==None or n2==None):\t\t\t\t\t# Edge case : If n1 and n2 are not actually numbers\n\t\treturn -1\n\n\tlen_T = len(T)\n\tif(not n1 < len_T or not n2 < len_T):\t\t# Edge case : If the nodes gives in parameters do not actually exist in the tree\n\t\treturn -1\n\n\tn1_list = []\t\t\t\t\t\t\n\tn2_list = []\n\n\tfor i in range(len(T)):\t\t\t\t\t\t# Traverse the list and append all the parents of node1 if found in O(N)\n\t\tif T[i][n1]==1:\n\t\t\tn1_list.append(i)\n\n\tfor i in range(len(T)):\t\t\t\t\t\t# Traverse the list and append all the parents of node2 is found in O(N)\n\t\tif T[i][n2]:\n\t\t\tn2_list.append(i)\n\n\t\t\t\t\t\t\t\t\t\t\t\t# The root is a common ancestor of every node in the tree\n\tif not r in n1_list:\t\t\t\t\t\t# check if the root is in the list, if not, add it\n\t\tn1_list.append(r)\n\n\tif not r in n2_list:\t\t\t\t\t\t# check if the root is in the list, if not, add it\n\t\tn2_list.append(r)\n\n\tn1_list = reversed(n1_list)\t\t\t\t\t# Since we are operating on a binary tree, we sort\n\tfor i in n1_list:\t\t\t\t\t\t\t# in decending order to operate on the latest nodes\n\t\tif i in n2_list:\t\t\t\t\t\t# if a match is found, we know that it is the lowest common ancestor\n\t\t\treturn i \t\t\t\t\t\t\t# If nothing is found, the root node is bound to be returned. And it correct.", "def search(self, key):\n if self.root is None:\n return None\n return self.root.search(key)", "def get_lca_in_bst(root, node_0, node_1):\n res = root\n s = node_0 if node_0.data < node_1.data else node_1\n b = node_1 if node_0.data < node_1.data else node_0\n while (res.data < s.data) or (res.data > b.data):\n while res.data < s.data:\n res = res.right\n while res.data > b.data:\n res = res.left\n return res", "def _get_node_pos(self, key):\n if not self._hashring:\n return\n\n k = md5_bytes(key)\n key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0]\n\n nodes = self._sorted_keys\n pos = bisect(nodes, key)\n\n if pos == len(nodes):\n return 0\n return pos", "def binary_search(my_list, key):\n if len(my_list) == 0:\n return False\n\n low = 0\n high = len(my_list) - 1\n\n while low <= high:\n midpoint = (high + low) // 2\n if key == my_list[midpoint]:\n return True\n if key < my_list[midpoint]:\n high = midpoint - 1\n else:\n low = midpoint + 1\n else:\n return False", "def binary_search(self, num_lst, key):\r\n # Running time: O(log n) with O(n logn) overhead\r\n # get sorted list\r\n num_lst = sorted(num_lst)\r\n \r\n low, high, idx = 0, len(num_lst), -1\r\n \r\n while low < high:\r\n mid = int(math.floor((low+high) / 2.0))\r\n \r\n if key < num_lst[mid]: high = mid - 1\r\n elif key > num_lst[mid]: low = mid + 1\r\n elif key == num_lst[mid]: \r\n idx = mid\r\n return idx\r\n \r\n return idx", "def get_ceil_index(left, right, key, A):\n while right - left > 1:\n mid = left + ( right - left) // 2\n\n if A[mid] >= key:\n right = mid\n else:\n left = mid\n\n\n return right", "def binary(sequence, n, key=def_key):\n length = len(sequence)\n\n if length == 0:\n return None\n\n j = 0 # start\n k = length # stop\n i = (j + k) // 2 # middle\n\n while j <= i:\n # If sequence[i] is the element, return its index\n if n == key(sequence[i]):\n return i\n elif j == i:\n return None\n elif n < key(sequence[i]):\n k = i # update stop\n else:\n j = i # update start\n\n i = (j + k) // 2\n\n # Did not find the element\n return None", "def known_bst():\n bst = BST()\n bst.insert_non_balance(5)\n bst.insert_non_balance(4)\n bst.insert_non_balance(2)\n bst.insert_non_balance(8)\n bst.insert_non_balance(7)\n bst.insert_non_balance(9)\n return bst, 3, 0", "def search(self, root: TreeNode, item: int):\n if not root:\n return False\n if root.value == item:\n return True\n if item < root.value:\n return self.search(root.left, item)\n else:\n return self.search(root.right, item)", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def insert(self, key, value):\n\n if None == self.root:\n self.root = BSTNode(key,value)\n return True\n current_node = self.root\n while current_node:\n if key == current_node.key:\n print(\"The key does exist!\")\n return False\n elif key < current_node.key:\n if current_node.left:\n current_node = current_node.left\n else:\n current_node.left = BSTNode(key, value, current_node)\n return True\n else:\n if current_node.right:\n current_node = current_node.right\n else:\n current_node.right = BSTNode(key,value,current_node)\n return True", "def get_from_subtree(subtree, key):\n temp_subtree = subtree\n while temp_subtree is not None:\n if key == temp_subtree.key:\n return temp_subtree.value\n elif key < temp_subtree.key:\n temp_subtree = temp_subtree.left\n elif key > temp_subtree.key:\n temp_subtree = temp_subtree.right\n return None", "def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes", "def breadthFirstSearch(initialState, finalState):\n\n def exploreNext(neighbor, move):\n \"\"\"Finds out if the neighbor is withinf the boundaries and explore it.\n `explored` is the set used in the BFS function.\n `stateQueue` is the queue inside the BFS function.\n `currentState` is each visited node inside the loop of the BFS function.\n\n \"\"\"\n if (neighbor != None and tuple(neighbor) not in explored):\n nextState = State(neighbor)\n nextState.path = currentState.path.copy()\n nextState.path.append(move)\n stateQueue.append(nextState)\n\n stateQueue = deque([]) # List of States\n explored = set() # Set of tuples of each visited state of the puzzle\n sizeBytesCounter = 0\n\n # Init queue\n stateQueue.append(State(initialState))\n\n # while queue is not empty\n while stateQueue:\n currentState = stateQueue.popleft()\n sizeBytesCounter += sys.getsizeof(currentState)\n\n # Add an unmodified list to the set, a tuple\n explored.add(tuple(currentState.puzzle))\n\n if finalState == currentState.puzzle:\n return currentState, explored, sizeBytesCounter\n \n # Create a node of the current state\n currentNode = Node(currentState.puzzle)\n\n # Iterate over posible paths\n exploreNext(*currentNode.up())\n exploreNext(*currentNode.down())\n exploreNext(*currentNode.left())\n exploreNext(*currentNode.right())\n \n return None", "def fn(k, i):\n ii = -1 \n for x in path:\n if gcd(nums[k], x) == 1: # coprime \n if path[x] and path[x][-1][1] > ii: \n ans[k] = path[x][-1][0]\n ii = path[x][-1][1]\n \n path.setdefault(nums[k], []).append((k, i))\n for kk in tree.get(k, []): \n if kk not in seen: \n seen.add(kk)\n fn(kk, i+1)\n path[nums[k]].pop()", "def binary_search_by_recursion(sorted_collection, item, left, right):\n if (right < left):\n return None\n\n midpoint = left + (right - left) // 2\n\n if sorted_collection[midpoint] == item:\n return midpoint\n elif sorted_collection[midpoint] > item:\n return binary_search_by_recursion(sorted_collection, item, left, midpoint-1)\n else:\n return binary_search_by_recursion(sorted_collection, item, midpoint+1, right)", "def searchTreeF(node, d):\n if isinstance(node, DecisionTree):\n if node.i == 999: return node.mostCommon()\n if d[node.i] < node.v:\n return searchTreeF(node.lt, d)\n else:\n return searchTreeF(node.gt, d)\n else:\n return node", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Queue()\n return GraphSearch(problem, 'bfs').search(fringe)" ]
[ "0.7437438", "0.7111154", "0.69640565", "0.69203466", "0.6868692", "0.6824069", "0.6769169", "0.6727306", "0.67060405", "0.667904", "0.6678879", "0.65389585", "0.65307343", "0.6508008", "0.6506147", "0.649953", "0.6497162", "0.64918184", "0.6488997", "0.6484715", "0.6478493", "0.64705837", "0.646589", "0.6464406", "0.64341295", "0.6399682", "0.636757", "0.6359498", "0.6358129", "0.63537645", "0.63382334", "0.63370234", "0.63344866", "0.63338035", "0.6320585", "0.6284104", "0.62827224", "0.6264804", "0.6251605", "0.62420726", "0.6236959", "0.6212365", "0.6201734", "0.61908704", "0.61848474", "0.61819786", "0.61781174", "0.6169648", "0.61653894", "0.6159238", "0.61553854", "0.61524683", "0.61510336", "0.6138368", "0.6137815", "0.6135901", "0.61283076", "0.611663", "0.61061263", "0.60825586", "0.6081148", "0.60789055", "0.6069992", "0.6050785", "0.6050774", "0.6038552", "0.6037548", "0.6035032", "0.60238427", "0.6019691", "0.6019108", "0.6017067", "0.6014474", "0.59976083", "0.5993878", "0.5990576", "0.5986813", "0.59834003", "0.5981416", "0.5976228", "0.5974945", "0.5972862", "0.5972838", "0.59697866", "0.5967165", "0.59670496", "0.5961488", "0.59589887", "0.5957813", "0.5956523", "0.59539217", "0.5952542", "0.5949919", "0.5942379", "0.594181", "0.5938489", "0.5937443", "0.5927503", "0.5927056", "0.5925055" ]
0.6536112
12
Publish a registration to the core, listing the API commands.
def register_to_core(self): self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cli(ctx, input, output):\n print(\"Registering...\")\n ctx.obj = dict(\n component=Registration,\n input=input,\n output=output,\n stack=ImageStack.from_path_or_url(input),\n )", "def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def auto_discover():\n auto_registration(\"actions\")", "def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n if 'paths' not in self.spec:\n raise RuntimeError(f'{self.spec_path} is not a valid OpenAPI spec.')\n for path_, path_info in self.spec['paths'].items():\n for verb, method_info in path_info.items():\n if verb.upper() not in HTTP_VERBS:\n continue\n ref_ext = {}\n ref_ext['tag'] = method_info['tags'][0]\n ref_ext['operation_id'] = method_info['operationId']\n key = self.REGISTRY_KEY_TEMPLATE.format(verb=verb.upper(),\n command=path_)\n self.registry[key] = ref_ext\n logger.debug(f'Generated registry:\\n{self.registry}')", "def registered(self):\n log.info(\"Registered.\")\n pass", "def get(self, request, format=None):\n operations = register.meta\n return Response(operations)", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def publish():\n pass", "async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"", "def publish(self):\n return", "def register_commands(self):\n\n with open(self._full_register_name, 'r') as file_to_read:\n command_register = json.loads(file_to_read.read())\n\n commands = command_register.get(\"commands\")\n if commands is None:\n logging.error(\"Command register is incorrect\")\n return []\n\n command_objects = []\n\n for command in commands:\n module_name = command.get(\"module\")\n class_name = command.get(\"class_name\")\n\n if (module_name is None) or (class_name is None):\n logging.error(\"Commands in the register are described in incorrect way.\")\n raise KeyError()\n\n try:\n command_module = importlib.import_module(module_name)\n command_class = getattr(command_module, class_name)\n command_object = command_class()\n command_objects.append(command_object)\n except ModuleNotFoundError as e:\n logging.error(\"Command modules specified in the register are not found!\")\n raise e\n\n return command_objects", "def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))", "def register_events():\n return [Events.Command(\"example_command\")]", "def test_registration(self):\n models = [BlogEntry, BlogRoll]\n pubsub.register(models)\n self.assertTrue(set(models).issubset(pubsub.registry))", "def publish(self, kpi_dict):\n pass", "def register(blueprint):\n blueprint.add_route(post, \"/call-records\", methods=['POST'])", "def register():\n\n print(\"Request: \", request)\n print(\"foo: \", request.app.ep_mapping)\n print(json.load(request.body))\n endpoint_details = json.load(request.body)\n print(endpoint_details)\n\n # Here we want to start an executor client.\n # Make sure to not put anything into the client, until after an interchange has\n # connected to avoid clogging up the pipe. Submits will block if the client has\n # no endpoint connected.\n endpoint_id = str(uuid.uuid4())\n fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)\n connection_info = fw.connection_info\n ret_package = {'endpoint_id': endpoint_id}\n ret_package.update(connection_info)\n print(\"Ret_package : \", ret_package)\n\n print(\"Ep_id: \", endpoint_id)\n request.app.ep_mapping[endpoint_id] = ret_package\n return ret_package", "def run(self, registry):", "def plugin_register(api):\n api.range_tool_register('Example/01', MyPlugin)\n return True", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def on_register(self, response):\n print('You have been registered!')\n self.on_auth(response)", "async def register_completions(ls: RobotFrameworkLanguageServer, *args):\n params = RegistrationParams([Registration(str(uuid.uuid4()), COMPLETION, {\"triggerCharacters\": \"[':']\"})])\n response = await ls.register_capability_async(params)\n if response is None:\n ls.show_message(\"Successfully registered completions method\")\n else:\n ls.show_message(\"Error happened during completions registration.\", MessageType.Error)", "def registration():\n registration_page = Registration()\n registration_page.registration_main_page()", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def regist_list(request):\n if request.method == 'GET':\n obj = Registration.objects.all()\n serializer = RegistrationSerializer(obj, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = RegistrationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def register_resources(self):\n raise NotImplementedError", "def register(self, parent):\n parent.registerCommand('delete', self.processDeleteCommand)\n parent.registerCommand('meshcreated', self.processMeshCreated)", "def register(args, config):\n\n api = config['API']\n r = Request(api['register'], method='GET')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n print('UH OH!')\n return\n # read in the template we got from the server\n jsn = json.loads(resp.read().decode())\n out = {}\n reqs = jsn.get('RegistrationRequirements')\n w = \"\"\"\n| *---------------------------------------------------------------* |\n| Welcome to FLEET, new user! Please follow the prompt to register. |\n| *---------------------------------------------------------------* |\n\"\"\"\n print(w)\n print('\\nPlease provide the following information: \\n')\n for k, v in reqs.items(): # prompt and assign to out\n m = '{} (Requirements: {}): '.format(k, v)\n if k.lower() == 'password':\n out[k.lower()] = getpass.getpass(m) # make keys lowercase\n else:\n out[k.lower()] = input(m)\n r = Request(\n api['register'], data=urlencode({'RegistrationInfo': out}).encode(), \n method='POST'\n )\n try:\n resp = urlopen(r)\n jsn = json.loads(resp.read().decode())\n except HTTPError as e:\n print('Something went wrong processing your request to register')\n return\n if jsn.get('errors'):\n print('Some errors were found. Please fix the following and retry:\\n')\n for e in jsn.get('errors'):\n print(e)\n else:\n info = jsn.get('registered')\n print('You have been successfully registered:\\n{}'.format(info))", "def register(self):\n logger.info(\"Registering with Hub...\")\n register_complete = Event()\n\n def on_register_complete(result=None, error=None):\n # This could be a failed/successful registration result from the HUB\n # or a error from polling machine. Response should be given appropriately\n if result is not None:\n if result.status == \"assigned\":\n logger.info(\"Successfully registered with Hub\")\n else: # There be other statuses\n logger.error(\"Failed registering with Hub\")\n if error is not None: # This can only happen when the polling machine runs into error\n logger.info(error)\n\n register_complete.set()\n\n self._polling_machine.register(callback=on_register_complete)\n\n register_complete.wait()", "def registry(request):\n return render(request, 'registry.html', {\"registries\": get_registries()})", "def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def register(self):\n raise NotImplementedError()", "def RunCommand(self, params):\n prefix = ['container', 'hub', 'memberships', 'register']\n return self.Run(prefix + params)", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def __publish_event(self, event_type, registration):\n # Get the import or export reference\n if event_type in rsa_beans.EXPORT_TYPES:\n reference = registration.get_export_reference()\n else:\n reference = registration.get_import_reference()\n\n # Prepare the event bean\n event = rsa_beans.RemoteServiceAdminEvent(event_type,\n self._context.get_bundle(),\n reference,\n registration.get_exception())\n\n # Call listeners in the current thread\n for listener in self._listeners[:]:\n listener.remoteAdminEvent(event)", "def register_blueprints(api):\n for module in MODULES:\n api.register_blueprint(module.blp)", "async def __add_commands(self):\r\n commands_to_add: List[ClientCommandStructure] = [\r\n cmd for cmd in ChatCommandHandler.register.values()\r\n if cmd.app not in self._api_commands\r\n ]\r\n\r\n if commands_to_add:\r\n for cmd in commands_to_add:\r\n endpoint = f\"applications/{self.client.bot.id}\"\r\n\r\n if cmd.app.guild_id is not MISSING:\r\n endpoint += f\"/guilds/{cmd.app.guild_id}\"\r\n\r\n await self.client.http.post(\r\n endpoint + \"/commands\",\r\n cmd.app.to_dict()\r\n )", "def list(pat: str, resource_registration_endpoint: str,\n secure: bool = False) -> List[str]:\n headers={\"Authorization\": \"Bearer \"+pat}\n\n disable_warnings_if_debug(secure)\n response = request(\"GET\", resource_registration_endpoint, headers=headers, verify=secure)\n\n if not is_ok(response):\n raise Exception(\"An error occurred while listing resources: \"+str(response.status_code)+\":\"+str(response.reason)+\":\"+str(response.text))\n\n return response.json()", "async def register(websocket):\n app['websockets'].add(websocket)\n await notify_users()", "def write_registry(self) -> None:\n self.manager.write_registry()", "def register(self):\n if self.registered:\n return\n\n config = current_app.config.get('TERMINAL_CONFIGS', {})\n apps = config.get('apps', [])\n\n for app in apps:\n cls, mod = app.rsplit('.', maxsplit=1)\n imported = import_module(cls)\n instance = getattr(imported, mod)()\n\n if getattr(instance, 'name', None) is None:\n continue\n\n if getattr(instance, 'hidden', False):\n self.hidden[getattr(instance, 'name')] = instance\n else:\n self.apps[getattr(instance, 'name')] = instance\n\n self.__set_apps_aliases(getattr(instance, 'name'), getattr(instance, 'aliases'))\n\n self.registered = True", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def register(self):\n self.app.bind('CreateSuperUserCommand', CreateSuperUser())\n self.app.bind('InstallCommand', Install())\n self.app.bind(\n 'AdminUserMigrationDirectory',\n os.path.join(package_directory, 'migrations')\n )", "def _register(self):\n self._log(self.botlog, 'Registering as %s' % self.nickname)\n self._send('USER %s B C :%s' % (self.ident, self.realname))\n self._send('NICK %s' % self.nickname)", "def register_adhocs(self):\n aboutform = self.plugin['xep_0004'].makeForm('form', \"About SleekBot\")\n aboutform.addField('about', 'fixed', value= self.__doc__)\n self.plugin['xep_0050'].addCommand('about', 'About Sleekbot', aboutform)\n pluginform = self.plugin['xep_0004'].makeForm('form', 'Plugins')\n plugins = pluginform.addField('plugin', 'list-single', 'Plugins')\n for key in self.cmd_plugins:\n plugins.addOption(key, key)\n plugins = pluginform.addField('option', 'list-single', 'Commands')\n plugins.addOption('about', 'About')\n #plugins.addOption('config', 'Configure')\n self.plugin['xep_0050'].addCommand('plugins', 'Plugins', pluginform, self.form_plugin_command, True)", "def _add_subcommands():\n geocube.add_command(cmd_modules.make_geocube.make_geocube)", "def _register_handler(self, callback, cmd, helphint, hidden, handlers,\n synonyms=(), plugin=None):\n # Register any synonyms (done before we frig with the handlers)\n for entry in synonyms:\n self._register_handler(callback, entry, helphint, True, handlers,\n plugin=plugin)\n\n # Allow simple commands to be passed as strings\n cmd = cmd.split() if isinstance(cmd, (str, unicode)) else cmd\n\n for part in cmd:\n handlers = handlers.subcommands.setdefault(part, Handlers([], {}))\n handlers.handlers.append(Registration(callback, \" \".join(cmd),\n helphint, hidden, plugin))", "def registration_started(self):\n pass", "def register(app):\n app.register_plugin('jwtvalidate', execute, helptext())", "def post(self):\n reg = self.request.get('registry')\n region_name = self.request.get('region')\n if reg and len(reg) > 0 and reg.isalnum() and validate_region(region_name):\n region = get_region_id(region_name)\n # Create Registry on IOT Core\n iot = IOT()\n success, message = iot.create_registry(region,reg)\n if success:\n # Add registry to Datastore\n ds = Datastore()\n status = ds.add_registry(reg, region_name)\n self.response.headers['Content-Type'] = 'text/plain'\n if status:\n self.response.write('Registry Added')\n else:\n self.response.write('Registry already exists')\n else:\n self.response.write(message)\n else:\n self.response.write('invalid parameters: ' + reg + \" \" + region_name )", "def Transform(self, registration):\n yaml = yp.YamlPrinter()\n self._TransformKnownFields(yaml, registration)\n self._TransformRemainingFields(yaml, registration)", "def register_publisher(self, hostname, expire=-1):", "def register():\n result = register_helper(User)\n return jsonify(result[0]), result[1]", "async def _perform_register(self):\n data = {\"username\": self.user, \"password\": self.password}\n return await self._perform_request(\"register\", data, lambda r: r.text())", "def register(self, voice=False):\n payload = {\"type\": \"register\", \"username\": self.username, \"voice\": voice}\n self._send_command(payload)", "def register(self):\n\n RPCObjectsRegistry.add(self)", "def register(self, cli: Client, channel: str) -> None:\n subscribers = self._channels_to_subscribers.get(channel, [])\n subscribers.append(cli)\n self._channels_to_subscribers[channel] = subscribers", "def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)", "def make_command_register(collector):\n\n def _register(*args, name=None):\n a_transform = _transform(*args)\n return collector.register(transform=a_transform, name=name)\n\n return _register", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "def view_registry(self) -> None:\n\n arr = self.load_links()[0]\n for i,v in enumerate(arr):\n print(f\"<{i}: {v}>\\n\")\n pass", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def api():\n\treturn \"The API call\"", "def post(self):\n request_error = self.__validatePackageRegisterRequest(request)\n if request_error:\n return jsonify(error_message=request_error), 400\n login = get_jwt_identity()\n self.__registerPackageFromRequest(login, request)\n return \"Created\", 201", "def register_commands(self):\n for module in copy.copy(sys.modules).values():\n for command in module_functionalities(module, 'MARA_CLICK_COMMANDS', click.Command):\n if 'callback' in command.__dict__ and command.__dict__['callback']:\n package = command.__dict__['callback'].__module__.rpartition('.')[0]\n if package != 'flask':\n register_command(self, command, package)", "def register_cli_commands(app):\n app.cli.add_command(init_events_command)", "def samples():\n\n # get the total register count from the XML API\n try:\n r = requests.get(config.XML_API_URL_TOTAL_COUNT)\n no_of_items = int(r.content.decode('utf-8').split('<RECORD_COUNT>')[1].split('</RECORD_COUNT>')[0])\n\n page = request.values.get('page') if request.values.get('page') is not None else 1\n per_page = request.values.get('per_page') if request.values.get('per_page') is not None else 20\n items = _get_items(page, per_page, \"IGSN\")\n except Exception as e:\n print(e)\n return Response('The Samples Register is offline', mimetype='text/plain', status=500)\n\n r = pyldapi.RegisterRenderer(\n request,\n request.url,\n 'Sample Register',\n 'A register of Samples',\n items,\n [config.URI_SAMPLE_CLASS],\n no_of_items\n )\n\n return r.render()", "def call(self, **kwargs):\n self.w(u\"<h1>Welcome to the management system.</h1>\")\n #self.wview(\"registration\")", "def registerConsole(self, userID, key):\r\n self._endpoint.registerConsole(userID, key)", "def list(self, name=None):\n Console.ok(f\"LIST: Using {Registry.PROTOCOL_NAME} Protocol\")\n return self.protocol.list(name)", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def list_callables(self):\n self.logger.debug(\"List of callable API objects requested\")\n # Dict of subsystem object names to their callable methods.\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n # Filter out methods which are not explicitly flagged for export\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)", "def _register_ops(self):\n ops = []\n ops.append(BatchAppsOps.register(\"shared.home\",\n \"Home\",\n self._home))\n ops.append(BatchAppsOps.register(\"shared.management_portal\",\n \"Management Portal\",\n self._management_portal))\n return ops", "def register_command(self, func):\n self.commands[func.__name__] = func", "def _help_add_regs(self, output, handlers, plugin=None):\n for reg in handlers.handlers:\n if not reg.hidden and (plugin is None or plugin is reg.plugin):\n output.append(\" %s %s\" % (reg.command, reg.helphint))\n for _, hdlrs in sorted(handlers.subcommands.items()):\n self._help_add_regs(output, hdlrs, plugin)", "def upload():\n sh('python setup.py register sdist upload')", "def _register(self, comm, handler):", "def commands():", "def list_all_apis():\n app.logger.info('Request for api list')\n func_list = []\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n methods = ','.join(rule.methods)\n func_list.append(\n (rule.rule, methods, app.view_functions[rule.endpoint].__doc__))\n return make_response(jsonify(name='Promotion REST API Service',\n version='1.0',\n functions=func_list), status.HTTP_200_OK)", "def register(self):\n self._log.debug(\"Registering Nsr op data path %s as publisher\",\n NsrOpDataDtsHandler.XPATH)\n\n hdl = rift.tasklets.DTS.RegistrationHandler()\n with self._dts.group_create() as group:\n self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,\n handler=hdl,\n flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)", "def _register_to_mongodb(logger, summary: Dict = None):\n logger.info('registering image to Jina Hub database...')\n\n with resource_stream('jina', '/'.join(('resources', 'hubapi.yml'))) as fp:\n hubapi_yml = yaml.load(fp)\n\n hubapi_url = hubapi_yml['hubapi']['url'] + hubapi_yml['hubapi']['push']\n\n if not credentials_file().is_file():\n logger.error(f'user hasnot logged in. please login using command: {colored(\"jina hub login\", attrs=[\"bold\"])}')\n return\n\n with open(credentials_file(), 'r') as cf:\n cred_yml = yaml.load(cf)\n access_token = cred_yml['access_token']\n\n if not access_token:\n logger.error(f'user has not logged in. please login using command: {colored(\"jina hub login\", attrs=[\"bold\"])}')\n return\n\n headers = {\n 'Accept': 'application/json',\n 'authorizationToken': access_token\n }\n try:\n import requests\n response = requests.post(url=f'{hubapi_url}',\n headers=headers,\n data=json.dumps(summary))\n if response.status_code == requests.codes.ok:\n logger.info(response.text)\n elif response.status_code == requests.codes.unauthorized:\n logger.error(f'user is unauthorized to perform push operation. '\n f'please login using command: {colored(\"jina hub login\", attrs=[\"bold\"])}')\n elif response.status_code == requests.codes.internal_server_error:\n if 'auth' in response.text.lower():\n logger.error(f'authentication issues!'\n f'please login using command: {colored(\"jina hub login\", attrs=[\"bold\"])}')\n logger.error(f'got an error from the API: {response.text}')\n except Exception as exp:\n logger.error(f'got an exception while invoking hubapi for push {repr(exp)}')", "def get_registries(self):\n raise NotImplementedError(\"get_registries method is not implemented.\")", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n modules = self.discover_modules()\r\n self.write_modules_api(modules,outdir)", "async def register(ctx, *args):\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")", "def register_pc_blueprints(app):\n blueprints = [\n registration_page,\n spectrum_inquiry_page,\n grant_page,\n heartbeat_page,\n relinquishment_page,\n deregistration_page,\n ]\n register_blueprints(app, blueprints, app.config['API_PREFIX'])", "def registered_dtrajs(request):\n key = request.node.funcargs.get(\"registered_key\")\n\n return examples.registered[key]", "def registered(username: str):\n r = client.is_registered(username=username)\n print(json.dumps(r))", "def register (method, event):\n Publisher.subscribe (method, event)", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def register_topic(self, name, command):\n topic_name = command['topic_name']\n try:\n topic_type = self.get_interface_type(command['interface_type'], '.msg')\n self.pubs[topic_name] = self.create_publisher(topic_type, topic_name, 1)\n except JoyTeleopException as e:\n self.get_logger().error(\n 'could not register topic for command {}: {}'.format(name, str(e)))", "def register(name, value):\n\n return Plugins.register(name, value)", "def commands():\n pass" ]
[ "0.6083113", "0.6011665", "0.59751356", "0.5912697", "0.5612715", "0.55548126", "0.5551435", "0.5506128", "0.54874337", "0.53894794", "0.5374884", "0.5348596", "0.5333862", "0.53083724", "0.52938396", "0.52787286", "0.52458644", "0.5237489", "0.5214423", "0.52052796", "0.5195345", "0.51941556", "0.5175724", "0.51690984", "0.5162744", "0.51612574", "0.5149885", "0.5149885", "0.51403415", "0.5127455", "0.5123719", "0.5115077", "0.5096258", "0.50622463", "0.50534964", "0.50362015", "0.50342715", "0.50147134", "0.50142974", "0.50039613", "0.5001791", "0.4991709", "0.49848968", "0.49775082", "0.49764708", "0.4975991", "0.4970153", "0.4970153", "0.4958945", "0.4952692", "0.49393773", "0.49249414", "0.4921457", "0.49006322", "0.49003956", "0.4895619", "0.4876849", "0.48745424", "0.48644614", "0.48574796", "0.48391876", "0.48351112", "0.48328876", "0.4828029", "0.48260778", "0.48234293", "0.48213935", "0.48185435", "0.48162878", "0.47994208", "0.47975177", "0.47947785", "0.47864652", "0.47852916", "0.47788316", "0.47783208", "0.4776774", "0.47748297", "0.47627217", "0.47613317", "0.4759699", "0.47487473", "0.4740894", "0.47291443", "0.47284076", "0.47246128", "0.47197688", "0.4717535", "0.47174832", "0.4717191", "0.47038138", "0.47030765", "0.47014207", "0.46954066", "0.469325", "0.46930325", "0.46902555", "0.46850762", "0.4684583", "0.46770477" ]
0.6798816
0
Subscribe to the queue matching the instance's name. Pass the command to the process_command function.
def subscribe_to_commands(self): self.basic_consume(self.process_command, queue=self.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_command(self, cmd):\n self.command_queue.put(cmd)", "def Enqueue(self, command):\n\n self.queue.put(command)", "def command(self, command_string):\n self.__command_queue.append(command_string)", "def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)\n self.channel.basic_consume(queue=queue,\n on_message_callback=action,\n auto_ack=True)\n self.channel.start_consuming()", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "def _process_command_queue(self, command_queue):\n while True:\n if len(command_queue) > 0:\n command_tuple = command_queue.pop()\n func, kwargs = command_tuple[0], command_tuple[1]\n getattr(self, func)(**kwargs)\n time.sleep(.5)", "def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def sendCommand(self, command:str=\"?\"):\n self.commandQueue.put(command)\n #self.queueLock.release()\n pass", "def subscribe(self, queue, action=None):\n if action:\n self.broker.subscribe(queue, action)\n else:\n self.broker.subscribe(queue)", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def enqueue(self, command):\n\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n q = []\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if command not in q:\n q.append(command)\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()", "def on_queued_command(self, event, index=None):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n api_loop(\n event.channel.send_message,\n \"There aren't any songs queued right now.\",\n )\n elif index is None:\n api_loop(\n event.channel.send_message,\n \"There are {} songs queued ({} minutes). To get a specific song's info, just do this command + index.\".format(\n len(self.get_player(event.guild.id).queue),\n self.minutes_format(self.get_player(\n event.guild.id,\n ).queue_length),\n ),\n )\n elif (index.replace(\"-\", \"\").strip(\" \").isdigit() and\n 0 <= (int(index.replace(\"-\", \"\").strip(\" \")) - 1) <=\n len(self.get_player(event.guild.id).queue)):\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[\n int(index.replace(\"-\", \"\").strip(\" \")) - 1\n ].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"The song at index ``{}`` is ``{}`` by ``{}`` with length ``{}`` minutes and is sourced from ``{}``.\".format(\n int(index.replace(\"-\", \"\").strip(\" \")),\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n elif index.replace(\"-\", \"\").isdigit():\n api_loop(event.channel.send_message, \"Invalid index input.\")\n else:\n matched_list = dict()\n for item in self.get_player(event.guild.id).queue:\n ratio = partial_ratio(item.metadata[\"title\"], index)\n if ratio >= 70:\n matched_list[\"#{} ({}% match)\".format(\n self.get_player(event.guild.id).queue.index(item)+1,\n ratio,\n )] = item.metadata[\"title\"]\n if matched_list:\n embed = bot.generic_embed_values(\n title=\"Queue search results\",\n footer_text=\"Requested by {}\".format(event.author),\n non_inlines={\n k: matched_list[k] for k in list(matched_list)[-25:]\n },\n footer_img=event.author.get_avatar_url(size=32),\n timestamp=event.msg.timestamp.isoformat(),\n )\n api_loop(event.channel.send_message, embed=embed)\n else:\n api_loop(\n event.channel.send_message,\n \"No similar items found in queue.\",\n )", "def start_cron(message_queue, queue_name=settings.APN_SEARCH_QUEUE, handler_class=MessageHandler):\n logging.info('Starting search update script.')\n message_handler = handler_class().process_message\n with message_queue.open(queue_name) as queue:\n for message_body, message_id in queue:\n message_handler(message_body, message_id, queue)", "def command(self, msg):\n self.cmd_pub.publish(msg)", "def _enqueue_server_command(self, command: ServerCommand) -> None:\n with self._subprocess_commands_lock:\n self._subprocess_commands.append(command)", "async def _queue(self, msg):\n if msg.voice_client is not None:\n if msg.guild.id in self.player:\n if self.player[msg.guild.id]['queue']:\n emb = discord.Embed(\n colour=self.random_color, title='queue')\n emb.set_footer(\n text=f'Command used by {msg.author.name}', icon_url=msg.author.avatar_url)\n for i in self.player[msg.guild.id]['queue']:\n emb.add_field(\n name=f\"**{i['author'].author.name}**\", value=i['title'], inline=False)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(\"No songs in queue\")", "def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()", "def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def listen_commands(self):\n self._consumer_rabbit_connection = BlockingConnection(ConnectionParameters(self._rabbit_host))\n self._consumer_rabbit_channel = self._consumer_rabbit_connection.channel()\n\n # Listen buy/sell orders from external system\n self._listen_queue(QueueName.CMD_BUYSELL, self.on_cmd_buysell)\n self._listen_queue(QueueName.MSG_RAW, self.on_raw_msg)\n # self._logger.info(f\"Declaring rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.queue_declare(queue=QueueName.CMD_BUYSELL, durable=True, auto_delete=True)\n # self._logger.info(f\"Consiming to rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.basic_consume(QueueName.CMD_BUYSELL, self.on_cmd_buysell,\n # consumer_tag=\"WebQuikBroker\")\n self._consumer_rabbit_channel.start_consuming()", "def receive_key(self, key):\n self.queue.put(key)", "def send_msg(self, my_queue, my_msg):", "def add_command(self, name, command=None, scheduler=None, limit_queue=None, on_full_queue=\"skip_current\", priority=0):\n if name in self._commands:\n raise ValueError(\"command {} already exists\".format(name))\n if command is None:\n command=getattr(self,name)\n if scheduler is None:\n scheduler=callsync.QQueueLengthLimitScheduler(max_len=limit_queue or 0,on_full_queue=on_full_queue)\n self._commands[name]=(command,scheduler)\n self._add_scheduler(scheduler,priority)\n self._override_command_method(name)", "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "def add_to_queue(self, word):\n self.q.put(word)\n print(\"word \\'{}\\' added in clients queue\".format(word))", "def runQueueEnqueue(self):\n raise NotImplementedError", "def send(self, job_command, payload):\n self.work_queue_client.send(job_command, payload)", "def _listen_queue(self, queue, callback):\n # Listen buy/sell orders from external system\n self._logger.info(f\"Declaring rabbit queue {queue}\")\n self._consumer_rabbit_channel.queue_declare(queue=queue, durable=True, auto_delete=True)\n self._logger.info(f\"Declaring callback to rabbit queue: {queue}, callback: {callback}\")\n self._consumer_rabbit_channel.basic_consume(queue, callback,\n consumer_tag=queue)", "def queue_consumer(self, q):\n\n self.status = 'Running...'\n\n while True:\n try:\n msg = q.get_nowait()\n if msg is None:\n break\n self.update_plot(msg)\n except Queue.Empty:\n time.sleep(0.1)\n\n self.status = 'Done'", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def processq(self):\n\n while True:\n command = None\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if any(q):\n command = q.pop(0)\n # remember q has now changed\n if not any(q):\n self.queuefile.unlink()\n else:\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()\n\n if command:\n self.execute(command)\n else:\n break", "def handle_command(message, slack_config):\n\n message.react(\"+1\")\n\n handler = {\n \"schedule_job\": handle_schedule_job,\n \"cancel_job\": handle_cancel_job,\n \"schedule_suppression\": handle_schedule_suppression,\n \"cancel_suppression\": handle_cancel_suppression,\n }[slack_config[\"type\"]]\n\n handler(message, slack_config)", "def __launch__(self,config,command=None,**kwargs):\n if command is None:\n command = ['sleep 30;','qsub']\n return SampleQsubProcess.__launch__(self,config,command=command,**kwargs)", "def callback_queue(self, data):\n\t\tglobal command_flag, current_command\n\t\t#grab action command\n\t\taction = data.action\n\t\t#Discards pending messages and null/invalid messages\n\t\tif action.split(' ')[0] in riu.valid_cmds and not data.pending:\n\t\t\tif action in riu.interrupts:\n\t\t\t\tself.process_interrupt(action)\n\t\t\telif action.split(' ')[0] not in self.ignored_commands:\n\t\t\t\tprint(\"Setting action\")\n\t\t\t\tcommand_flag = True\n\t\t\t\tcurrent_command = action", "def dispatch(self, event):\n self.queue.put(event)", "def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def __handle_action_queue(self, file_name: str):\n # value is double encoded\n file_name = unquote(file_name)\n\n command = Controller.Command(Controller.Command.Action.QUEUE, file_name)\n callback = WebResponseActionCallback()\n command.add_callback(callback)\n self.__controller.queue_command(command)\n callback.wait()\n if callback.success:\n return HTTPResponse(body=\"Queued file '{}'\".format(file_name))\n else:\n return HTTPResponse(body=callback.error, status=400)", "def register(self, command_name, command):\n self._commands[command_name] = command", "def _traverse_command(self, name, *args, **kwargs):\n # TODO: implement instance level cache\n if not name in self.available_commands:\n raise AttributeError(\"%s is not an available command for %s\" % (name, self.__class__.__name__))\n attr = getattr(self.connection, \"%s\" % name)\n key = self.key\n log.debug(u\"Requesting %s with key %s and args %s\" % (name, key, args))\n result = attr(key, *args, **kwargs)\n result = self.post_command(\n sender=self,\n name=name,\n result=result,\n args=args,\n kwargs=kwargs\n )\n return result", "def command(self, command):\n\n self._command = command", "def command(self, command):\n\n self._command = command", "def command(self, command):\n\n self._command = command", "def _command(self, *cmd, handler=None):", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "def start_consuming(self, channel, rx_queue_name):\n if self.should_stop():\n logger.info(\"ready to stop, pause to consume\")\n return\n logger.info('Issuing consumer related RPC commands')\n self._consumer_tag = channel.basic_consume(\n self.on_message, rx_queue_name, auto_ack = False)\n channel.start_consuming()", "def cmd(self, command):\n self._commands.append(command)", "def spawn_process_for(self, gq):\n pipe_top, pipe_bottom = multiprocessing.Pipe()\n p = multiprocessing.Process(target=GridQueue.listen,args=(gq, pipe_bottom))\n p.start()\n self.pipes[gq.index] = pipe_top", "def __setitem__(self, command: TelnetCommand, expected_response: ExpectedResponse):\n self._queue[command.group][command] = expected_response", "def process_received_message(self, message):\n self.log.debug('Received \"%s\"', message)\n self.receive_queue.put(message)", "def on_next_command(self, event):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n return event.channel.send_message(\"There aren't any songs queued.\")\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[0].metadata,\n )\n event.channel.send_message(\n \"Next in queue is ``{}`` by ``{}`` with length ``{}`` minutes using ``{}``.\".format(\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')", "def run(self):\n\t\tfor item in self.pubSub.listen():\n\t\t\tself.processItem(item)", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def amqp_process_for_nfvi_kpi(self):\n if self.amqp_client is None and self.enable:\n self.amqp_client = multiprocessing.Process(\n name=\"AmqpClient-{}-{}\".format(self.mgmt['ip'], os.getpid()),\n target=self.run_collectd_amqp)\n self.amqp_client.start()", "def enqueue(self, name):\n pass", "def send(self, bcp_command, callback=None, **kwargs):\n self.sending_queue.put(bcp.encode_command_string(bcp_command,\n **kwargs))\n if callback:\n callback()", "def event_queue_proc(self,event):\r\n event()", "def on_command(server, user, command, args):", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "async def _run_command(self, command, *args, **kwargs):\n pass", "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def _process_run(queue: Queue, func: Callable[[Any], Any] = None,\n *args, **kwargs):\n queue.put(func(*args, **kwargs))", "def put_message(cls, message):\n rp = cls.get()\n rp.queue_receive.put(message)", "def start_consuming(self):\n self.logger.debug(\"Issuing consumer related RPC commands\")\n\n self._channel.basic_qos(prefetch_count=self._max_concurrent)\n self._channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\n consume_kwargs = {\"queue\": self._queue_name}\n if PIKA_ONE:\n consume_kwargs[\"on_message_callback\"] = self.on_message\n else:\n consume_kwargs[\"consumer_callback\"] = self.on_message\n\n self._consumer_tag = self._channel.basic_consume(**consume_kwargs)", "def on_queue_next_command(self, event, index):\n self.pre_check(event)\n self.same_channel_check(event)\n if 1 < index <= len(self.get_player(event.guild.id).queue):\n index -= 1\n self.get_player(event.guild.id).queue.insert(\n 0,\n self.get_player(event.guild.id).queue.pop(index),\n )\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[0].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"Moved ``{}`` to the front of the queue.\".format(\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n else:\n api_loop(event.channel.send_message, \"Invalid index input.\")", "def command_callback(self, command):\n while not self.socket_available: # wait for socket to be available\n pass\n self.socket_available = False # block socket from being used in other processes\n if self.robot.is_in_error():\n self.robot.ResetError()\n self.robot.ResumeMotion()\n reply = self.robot.exchange_msg(command.data, decode=False)\n self.socket_available = True # Release socket so other processes can use it\n if reply is not None:\n self.reply_publisher.publish(reply)", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def addQueueEntry(*args):\n try:\n #A unique id for each command.\n self.cmd_seq = self.cmd_seq + 1\n #Create a new queu entry\n self.entries[self.cmd_seq] = _QueueEntry(self, name, args, self.cmd_seq, self.log)\n #append it to the command queue\n self.queue.append(self.cmd_seq)\n #Return handle to the new entry for setting callbacks on.\n return self.entries[self.cmd_seq]\n except Exception as ex:\n self.log.failure(\"Error in addQueueEntry {err!r}\",err=str(ex))", "def work():\n with rq.Connection(create_connection()):\n worker = rq.Worker(list(map(rq.Queue, listen)))\n worker.work()", "def run(self):\n\n self.make_connection()\n self.channel()\n self.declare_queue()\n self.publish_message()\n self.close_connection()", "def receive_key(self, key):\n try:\n self.queue.put(key)\n except:\n raise #Just collecting possible exceptions for now", "def say(self, message):\n log.raw(\"Add {0} message to {1} queue\".format(message, self.name))\n if message == \"stop\":\n log.debug(show_trace())\n if not self.is_running() and message == \"stop\":\n log.error(\"CATCH AND AVOID stop BEFORE LAUNCED VLC\")\n return\n self._stdin_queue.put_nowait(message)", "async def queue(self, msg, song):\n title1 = await Downloader.get_info(self, url=song)\n title = title1[0]\n data = title1[1]\n # NOTE:needs fix here\n if data['queue']:\n await self.playlist(data, msg)\n # NOTE: needs to be embeded to make it better output\n return await msg.send(f\"Added playlist {data['title']} to queue\")\n self.player[msg.guild.id]['queue'].append(\n {'title': title, 'author': msg})\n return await msg.send(f\"**{title} added to queue**\".title())", "def subscribe_sqs_queue(self, topic, queue):\r\n t = queue.id.split('/')\r\n q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,\r\n t[1], t[2])\r\n resp = self.subscribe(topic, 'sqs', q_arn)\r\n policy = queue.get_attributes('Policy')\r\n if 'Version' not in policy:\r\n policy['Version'] = '2008-10-17'\r\n if 'Statement' not in policy:\r\n policy['Statement'] = []\r\n statement = {'Action' : 'SQS:SendMessage',\r\n 'Effect' : 'Allow',\r\n 'Principal' : {'AWS' : '*'},\r\n 'Resource' : q_arn,\r\n 'Sid' : str(uuid.uuid4()),\r\n 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}\r\n policy['Statement'].append(statement)\r\n queue.set_attribute('Policy', json.dumps(policy))\r\n return resp", "def subscribe(ami, worker):\n\n if hasattr(worker, \"event\"):\n for event in worker.event:\n if isinstance(event, (str, unicode)):\n functionName = \"handle_\" + event\n if hasattr(worker, functionName):\n functionToCall = getattr(worker, functionName)\n ami.events.subscribe(event, functionToCall)\n return", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def torque_job(cmd, pollpath, name, queue):\r\n qsub_call = \"qsub -k oe -N %s -q %s\" % (\"MOTU\", queue)\r\n to_submit = 'echo \"%s; echo $? > %s\" | %s' % (cmd, pollpath, qsub_call)\r\n\r\n return to_submit", "def _addCommand(self, command):\n self.updater.dispatcher.add_handler(command)", "def process_queue_item(self, job_details):\n raise NotImplementedError(\"Workers must implement run.\")", "def message(cls, user, message, context):\n q.enqueue(foo, args=(user, message, context), result_ttl=0)\n pass", "def publish(self, queue, message):\n\n # Instead of passing a queue to the constructor, the publish checks if\n # the target queue exists. If not, it declares the target queue\n if not self.queue:\n self.channel.queue_declare(queue=queue)\n self.queue = queue\n\n self.channel.basic_publish(\n exchange='', routing_key=queue, body=message)", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def _queue_job(jid):\n q.put(jid)", "def _queue_job(jid):\n q.put(jid)", "def process(self, message=None):\n\n while self.running:\n message = self.channel.basic.get(self.queue)\n if message:\n content = message.body\n\n # log message\n if self.debug:\n self.log(\"Recieved: \" + str(content))\n\n # send to child nodes\n self.scatter(Message(**self.parse(content)))\n else:\n # yield to other greenlet\n # self.tick()\n self.sleep(1)", "def send(self, item):\n self.input_queue.put(item)", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def run(self):\n while True:\n self.command = input(\"> cmd >>> \")\n self.invoker.run(self.command)", "def on_data(self, data):\n if data is not None:\n # Send the data to the parent process\n logging.debug('Received raw data : ' + str(data))\n self.mp_queue.put(data)", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def run(self):\n self.channel.queue_declare(self._request_queue)\n self.channel.basic_consume(self._request_queue, self.on_message)\n try:\n msg = \"Waiting for message ...\"\n print(msg)\n logging.info(msg)\n self.channel.start_consuming()\n except KeyboardInterrupt:\n self.channel.stop_consuming()\n\n self.connection.close()", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def enqueue(self, fn):\n self.queue.put(fn)", "def launcher(i,q,cmd):\n while True:\n #grabs ip,cmd from queue\n ip = q.get()\n print \"Thread %s: Running %s to %s\" % (i,cmd,ip)\n host = \"root@%s\"%ip\n subprocess.call([\"ssh\", host, cmd])\n q.task_done()", "def _queue(self, timeout=None):\n while True:\n # Retry self._kq_control if the system call was interrupted\n try:\n events = self._kq.control(None, 16, timeout)\n break\n except OSError, e:\n if e.errno == errno.EINTR:\n continue\n raise\n for ev in events:\n event = (ev.ident, ev.filter)\n if event in self._kq_events:\n if (ev.filter == select.KQ_FILTER_PROC and\n ev.fflags == select.KQ_NOTE_EXIT):\n self._kq_events.pop(event).emit()\n else:\n self._kq_events[event].emit()", "def handle_scheduled_command(\n self, command, channel, user, msg_type, args=None):\n if args:\n command = \" \".join([command, args])\n\n response = self.handle_command(command, channel, user, msg_type)\n self.slack_client.response_to_client(response)" ]
[ "0.63098824", "0.62418693", "0.6216443", "0.6078142", "0.6037645", "0.5994807", "0.59508395", "0.5937547", "0.5917672", "0.5872839", "0.58456135", "0.58215594", "0.5785163", "0.5764144", "0.5762315", "0.5658088", "0.55472076", "0.55459744", "0.5537582", "0.5512194", "0.5503746", "0.54825854", "0.54805356", "0.54773724", "0.54315215", "0.5409387", "0.5345059", "0.53336084", "0.5333292", "0.53247607", "0.5320445", "0.5311855", "0.53018403", "0.52919406", "0.5286885", "0.52772856", "0.52738583", "0.5271689", "0.5267163", "0.5261219", "0.5261043", "0.5238044", "0.5238044", "0.5238044", "0.5225681", "0.5223151", "0.5220448", "0.52146757", "0.52133405", "0.52129215", "0.52107024", "0.52094936", "0.5200088", "0.51857144", "0.517162", "0.51709807", "0.5165542", "0.5161088", "0.51506376", "0.5148389", "0.5147521", "0.5146298", "0.51405585", "0.5137652", "0.5137167", "0.51228166", "0.51175326", "0.51138926", "0.51039934", "0.5102257", "0.50978076", "0.5097301", "0.5093013", "0.5084734", "0.50797796", "0.5072149", "0.5068875", "0.5067634", "0.50581324", "0.5052784", "0.5051023", "0.50423247", "0.50406814", "0.503818", "0.5033555", "0.50315005", "0.50274384", "0.50274384", "0.502658", "0.5012616", "0.5005564", "0.5004334", "0.5001379", "0.49867165", "0.49827015", "0.4974374", "0.4974104", "0.49713507", "0.49703336", "0.49667382" ]
0.75445706
0
Call the command(s) that correspond to the message
def process_command(self, ch, method, properties, body): body_json = json.parse(body) for key in body_json: if self.commands.get(key) is not None: self.commands[key](body_json[key])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd(self, message):\n pass", "def execute(self, irc_c, msg, cmd):", "def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def do_command(self, args):\n pass", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def exec_commands(com):\n reply = ''\n if com is not None:\n if com == commands[0]:\n tables = db.create_tables(houses, from_)\n if tables == True:\n for j in range(len(c_responses[0]) - 1):\n# can use join and split functions to create softer code?? at least in future instances\n bot.send_message(c_responses[0][j], from_)\n else:\n reply = c_responses[0][(len(c_responses[0])-1)]\n elif com == commands[1]:\n house_info = db.house_info(from_)\n # Add feautures to find highest scoring house and return number of members\n reply = \"Houses:\\n\"\n for house in house_info:\n reply += house[1] + \"\\n\"\n if house[2] != None:\n reply += f\"Score: {house[2]}pts\\n\\n\"\n else:\n reply += f\"Score: 0pts\\n\\n\"\n elif com.startswith(commands[2]):\n instructions = com.split()\n id = 0\n info = user_query()\n user_id = info['user']['id']\n check = db.check_admin(from_, user_id)\n if check and check != 'not sorted':\n for house in houses:\n id += 1\n if house == instructions[1]:\n score = db.update_house_score(id, instructions[2], from_)\n reply = f\"{instructions[1]} new score is {score}\"\n else:\n reply = \"You have no power over me! PS:(if you are an admin use the /appoint me command to be recognised as such)\"\n\n\n elif com == commands[3]:\n username = item['message']['from']['username']\n user_id = item['message']['from']['id']\n num = db.add_member_info(username, from_, user_id)\n if num[1]:\n reply = f\"Better be... {houses[num[0]-1]}\"\n else:\n print(num[0][0])\n reply = f\"I stand by my decision, {houses[num[0][0]-1]} will help you on the way to greatness!\"\n elif com == commands[4]:\n m_list = db.member_info(from_)\n reply = str(m_list)\n elif com == commands[5]:\n info = user_query()\n username = info['user']['username']\n m_info = db.member_info(from_, username)\n reply = f\"\"\"\n Username: {m_info[2]}\\nHouse: {houses[m_info[3]]}\\nStatus: {m_info[4]}\\nScore: {m_info[5]}\\n\n \"\"\"\n elif com == commands[6]:\n info = user_query()\n username = info['user']['username']\n user_id = info['user']['id']\n status_info = info['status']\n if status_info == 'creator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Headmaster')\n reply = f\"Rise Headmaster {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Headmaster\"\n elif status_info == 'administrator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Professor')\n reply = f\"Hence forth you shall be known as Professor {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Professor\"\n else:\n reply = 'Desist pretender! Only the entitled may command me so!'\n elif com == commands[7]:\n for command in commands:\n reply += f'{command}\\n'\n print(reply)\n \n return reply", "def process_commands(self, commands: List[str]):", "def commands():", "def cmd(self):", "def _command(self, *cmd, handler=None):", "def execute(UserMessage,player):\n if UserMessage['Action'] == \"Kill\": #This list represents the commands file I had before, neater that way?\n SendKill(UserMessage,player)\n if UserMessage['Action'] == 'Vote1':\n Vote1(UserMessage)\n if UserMessage['Action'] == 'Vote2':\n Vote2(UserMessage)\n if UserMessage['Action'] == \"DataPLZ\":\n DataPLZ(UserMessage)\n pass", "def get_command_called(self, slack_message: str) -> _SingleCommand:\n for command in self:\n command_part, _ = command.split_message(slack_message)\n if command_part:\n return command", "def execute_cmd(self, raw_string, session=None):\n super(Bot, self).msg(raw_string, session=session)", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def on_dccmsg(self, c, e):\n\n args = e.arguments()[0].split(\" \", 1)\n if len(args) > 0:\n self.do_command(args[0], c, e)", "def command(self, cmd):\n self.lmp.command(cmd)", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "def command():\n pass", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def execute_message_received(self, message_received):\n pass", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "def handle_command(ARGS, CLIENT, command, channel):\n message = '''Commands I know:\n list teams\n scores <optional week number>\n does Brandon suck\n '''\n message = \"\"\n attachments = \"\"\n if command == \"list teams\":\n message = '\\n'.join(map(lambda x: x.team_name, ARGS.league.teams))\n elif command == \"does brandon suck\":\n message = 'yes'\n elif 'scores' in command:\n pieces = command.split(' ')\n if len(pieces) == 1:\n message = 'Current Scoreboard'\n matchups = ARGS.league.scoreboard(projections=True)\n else:\n message = 'Scoreboard for week ' + pieces[1]\n matchups = ARGS.league.scoreboard(pieces[1], projections=True)\n\n attachments = [{\n 'fallback': 'A textual representation of your table data',\n 'fields': [\n {\n 'title': 'Home',\n 'value': '\\n'.join(map(lambda x: x.home_team.team_abbrev + \" \" + str(x.home_score) + \" (\" + str(x.home_projection) + \")\", matchups)),\n 'short': True\n },\n {\n 'title': 'Away',\n 'value': '\\n'.join(map(lambda x: x.away_team.team_abbrev + \" \" + str(x.away_score) + \" (\" + str(x.away_projection) + \")\", matchups)),\n 'short': True\n }\n ]\n }]\n CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, attachments=attachments, as_user=True)\n\n # CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, as_user=True)", "def run(self, commands: list[str]):\n ...", "async def execute(self, client, message, arg):\n\t\treturn", "def execute(cmd, msg, private=False):\n cmd_dict = private_cmds if private else cmds\n if cmd in cmd_dict:\n return cmd_dict[cmd](msg)", "async def _list_commands(self):\n message_cmds = \"regular commands:\\n\"\n tts_cmds = \"tts commands:\\n\"\n cur = self.conn.cursor()\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is true;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n tts_cmds += invoke[0] + ', '\n tts_cmds = tts_cmds[0:-2]\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is false;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n message_cmds += invoke[0] + ', '\n message_cmds = message_cmds[0:-2]\n cur.close()\n await self.bot.say(message_cmds)\n await self.bot.say(tts_cmds)", "def execute(self, devices, command_bytes):", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def message_callback(self, message):\n message_data = json.loads(message)\n\n if message_data.get('command') == 'error':\n return self.command_error(message_data)\n\n if 'device_type' in message_data and not message_data['device_type'].startswith(self.device_filter):\n return\n\n # Try to find a matching command and execute it\n command_name = message_data['command']\n command_data = message_data.get('data', {})\n device_name = message_data.get('name')\n\n command_handler_name = 'command_{}'.format(command_name)\n if not hasattr(self, command_handler_name):\n logging.info(\"{} does not support command {}\".format(\n self,\n command_name\n ))\n return\n\n command_handler = getattr(self, command_handler_name)\n return command_handler(device_name, command_data)", "async def yandexcmd(self, message):\r\n await self.say(message, None, utils.get_args_raw(message))", "async def send_commands(ans: Message):\n await ans.answer(all_commands)", "def send_message(self, the_message):\n the_command = \"python3 -m mycroft.messagebus.send '\"+the_message+\"'\"\n print(\"send_message(): running command: \"+the_command)\n try:\n result = subprocess.check_output(the_command, shell=True)\n except subprocess.CalledProcessError as e:\n self.mpc_rc = str(e.returncode)\n print(\"send_message(): command: \"+the_command+\" returned \"+str(e.returncode))", "def execute_command(command):\r\n if 0 == len(command):\r\n return\r\n\r\n if command[0] in verbs[\"move\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"go where?\")\r\n else:\r\n execute_go(command[1])\r\n\r\n elif command[0] in verbs[\"take\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Take what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_take(item_id)\r\n\r\n elif command[0] in verbs[\"drop\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Drop what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_drop(item_id)\r\n\r\n elif command[0] in verbs[\"use\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"use what?\")\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n execute_use(item_id)\r\n\r\n elif command[0] in verbs[\"look\"]:\r\n if len(command) == 1:\r\n print_room(current_room)\r\n elif command[1] in nouns[\"inventory\"]:\r\n print_inventory_items(inventory)\r\n elif command[1] in nouns[\"self\"]:\r\n print_condition()\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if item_id in inventory.keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif item_id in current_room[\"items\"].keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif entity_id in current_room[\"entities\"].keys():\r\n wrap_print(entities[entity_id][\"description\"])\r\n else:\r\n wrap_print(\"You can not view that.\")\r\n\r\n elif command[0] in verbs[\"attack\"]:\r\n if len(command) > 2:\r\n item_id = get_multi_word_string(command, items)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if len(command) <= 1:\r\n wrap_print(\"attack what?\")\r\n elif entity_id not in current_room[\"entities\"].keys():\r\n wrap_print(\"You cannot attack that.\")\r\n elif len(command) <= 2:\r\n wrap_print(\"What with?\")\r\n elif item_id not in inventory.keys():\r\n wrap_print(\"You do not have a that item.\")\r\n elif items[item_id][\"damage\"] == False:\r\n wrap_print(\"You cannot attack using that item.\")\r\n else:\r\n execute_attack(entity_id, item_id)\r\n\r\n elif command[0] == \"help\":\r\n print(\"To move in a given direction type: go <DIRECTION>\")\r\n print(\"To pick up an item type: take <ITEM>\")\r\n print(\"To drop an item type: drop <ITEM>\")\r\n print(\"To use an item type: use <ITEM>\")\r\n print(\"To look at something of interest type: view <ITEM>\")\r\n print(\"to attack a character type: attack <CHARACTER> with <item>\")\r\n print(\"to : attack <CHARACTER> with <item>\")\r\n print(\"To quit the game type: quit\\n\")\r\n wrap_print(\"\"\"Verb variations are supported, so 'run south', or 'inspect item' are valid inputs.\"\"\")\r\n wrap_print(\"\"\"Items and characters with multiple words in their name are also supported like regular items.\"\"\")\r\n\r\n elif command[0] == \"quit\":\r\n if len(command) == 1:\r\n wrap_print(\"goodbye!\")\r\n global playing\r\n playing = False\r\n\r\n else:\r\n wrap_print(\"That makes no sense.\")", "def receive(self, command_list):\n for cmd in command_list:\n self._handle_command(cmd)", "def _process_commands(self, pwd, cmds):\n if self.func_map.get(cmds[0]):\n func = self.func_map[cmds[0]]\n \n args, kwargs = self._get_args(cmds[1:]) \n err_msg = self._check_input(func, args, kwargs)\n if err_msg: return err_msg\n \n _, return_msg = func(*args, **kwargs)\n\n else:\n return_msg = '[error]: no cmd found.'\n return return_msg", "def ConsoleRun(self, command, sender):\n pass", "def multiple_messages(self, messages):\n for message in messages:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)", "def send_and_parse(self, cmd):\n\n lines = self.__send(cmd)\n messages = self.__protocol(lines)\n return messages", "def command(self, msg):\n self.cmd_pub.publish(msg)", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Person not found. Last name may need to be capitalized. Try *{}*.\".format(\"@Rog Smog find [Last name]\")\n\n # Finds and executes the given command, filling in response\n response = None\n\n #Score lookup\n if command.startswith(\"score\"):\n print(\"Score request recieved. Processing...\")\n wb = openpyxl.load_workbook('Feedback_Form.xlsx')\n sheet = wb.active\n for row in range(2, sheet.max_row +1):\n if sheet['B' + str(row)].value is not None:\n open(\"FeedbackEmails.txt\", \"a\").write(sheet['B' + str(row)].value)\n open(\"PriorTrainJUST.txt\", \"a\").close()\n if sheet['B' + str(row)].value is not None and eval('command').replace('score ','') in sheet['B' + str(row)].value:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text= [\"FIX\", sheet['BX' + str(row)].value, \"VT\", sheet['AZ' + str(row)].value, \"Live\", sheet['AB' + str(row)].value]\n )\n print(\"Score request processed.\")\n if eval('command').replace('score ','') not in open(\"FeedbackEmails.txt\").read():\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text= \"Email not found.\"\n )\n open('FeedbackEmails.txt', 'w').close()\n \n #Person Lookup\n if command.startswith(\"find\"):\n print(\"Info request recieved. Processing...\")\n wb = openpyxl.load_workbook('Database.xlsx')\n sheet = wb.active\n for row in range(2, sheet.max_row +1):\n if sheet['D' + str(row)].value is not None and eval('command').replace('find ','') in sheet['D' + str(row)].value:\n response = [sheet['B' + str(row)].value, sheet['D' + str(row)].value, sheet['G' + str(row)].value, sheet['L' + str(row)].value, sheet['H' + str(row)].value, sheet['R' + str(row)].value]\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )\n print(\"request processed.\")\n print(eval('command').replace('find ',''))", "def do_command(self, command):\n self.state.start_turn(command)\n messages = []\n\n for stage in ('before_turn', 'during_turn', 'after_turn'):\n messages += self.do_actions(stage)\n\n if self.game_over:\n messages += self.do_actions('after_game')\n break\n\n return messages", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def call_command(self, name, args):\n try:\n cmd = self.commands[name]\n except KeyError:\n self.answer(\"Unknown command\", success=False)\n else:\n try:\n result, success = cmd(*args)\n result = result or \"\"\n self.answer(result, success=success)\n except TypeError as e:\n self.answer(\n \"Error when calling function: {}\".format(e),\n success=False,\n )", "def multi_command_start(self):\n\n log.debug(\n 'Starting multi-command message for device \"{0}\"'.format(self.name))\n\n if self.driver not in [drivers.pyvisa, drivers.lgpib]:\n raise NotImplementedError(\n 'Unsupported driver: \"{0}\".'.format(self.driver))\n\n self.multi_command = []\n self.responses_expected = 0", "def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)", "def show_commands(self, message):\n user = self.ts.get_user(message)\n web_view_link = self.spreadsheets['commands'][1]\n short_url = self.shortener.short(web_view_link)\n self._add_to_whisper_queue(user, 'View the commands at: {}'.format(short_url))", "def run_command(self, command, joy_state):\n cmd = self.command_list[command]\n if cmd['type'] == 'topic':\n self.run_topic(command, joy_state)\n elif cmd['type'] == 'action':\n if cmd['action_name'] in self.offline_actions:\n self.get_logger().error('command {} was not played because the action '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['action_name']))\n self.register_action(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_action(command, joy_state)\n elif cmd['type'] == 'service':\n if cmd['service_name'] in self.offline_services:\n self.get_logger().error('command {} was not played because the service '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['service_name']))\n self.register_service(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_service(command, joy_state)\n else:\n raise JoyTeleopException(\n 'command {} is neither a topic publisher nor an action or service client'\n .format(command))", "def handle_command(command, channel):\r\n # Default response is help text for the user\r\n default_response = \"Not sure what you mean\"\r\n default_food_response = \"I didn't quite catch that, but I see that you mentioned something about food. If you want me to order some food, try: @Starter Bot Order <<food>>\"\r\n\r\n # Finds and executes the given command, filling in response\r\n # This is where you start to implement more commands!\r\n response = None\r\n\r\n verb_list=['order','place','make']\r\n food_list = [line.rstrip('\\n') for line in open('food.txt')]\r\n\r\n print(\"Made the lists\")\r\n\r\n predictor = Predictor.from_path(\"srl-model-2018.05.25.tar.gz\")\r\n result=predictor.predict(command)\r\n print(result)\r\n\r\n for dictionary in result['verbs']:\r\n verb = dictionary['verb']\r\n if verb in verb_list:\r\n if verb=='order':\r\n try:\r\n response = dictionary['description']\r\n response=response.split('ARG1: ')[1].replace(']','')\r\n except:\r\n print(\"We did an oopsie here\")\r\n\r\n print(\"Went through the dictionaries\")\r\n\r\n if response == None:\r\n for word in command:\r\n if word in food_list:\r\n response=default_food_response\r\n break\r\n\r\n # Sends the response back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=response or default_response\r\n )", "def msg_cmd(cmd, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CMD, \"\", cmd, version, order)", "def execute(self, cmd=\"\", msg=\"\", speak=False, duration=0):\n\n self.speak = speak\n\n if self.server or not self.testing:\n if self.speak:\n self.say(msg)\n try:\n subprocess.Popen([\"notify-send\", \"Dragonfire\", msg])\n except BaseException:\n pass\n if cmd != \"\":\n time.sleep(duration)\n try:\n subprocess.Popen(cmd, stdout=FNULL, stderr=FNULL)\n except BaseException:\n pass\n return msg", "async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")", "def handle(self, msg):\n\n if msg.command == \"PING\":\n self._sendmsg(\"PONG :{}\".format(msg.args[0]))\n\n elif msg.command == \"JOIN\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has joined {}\".format(name, channel))\n\n elif msg.command == \"PART\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has left {}\".format(name, channel))\n\n elif msg.command == \"KICK\":\n name = msg.sendername\n channel = msg.args[0]\n victim = msg.args[1]\n print(\"{} has kicked {} from {}\".format(name, victim, channel))\n\n elif msg.command == \"QUIT\":\n name = msg.sendername\n print(\"{} has quit IRC\".format(name))\n\n elif msg.command == \"KILL\":\n name = msg.sendername\n victim = msg.args[0]\n print(\"{} has killed {}\".format(name, victim))\n\n elif msg.command == \"NICK\":\n name = msg.sendername\n newname = msg.args[0]\n print(\"{} is now known as {}\".format(name, newname))\n\n elif msg.command == \"MODE\":\n name = msg.sendername\n target = msg.args[0]\n mode = msg.args[1]\n print(\"{} has set the mode of {} to {}\".format(name, target, mode))\n\n elif msg.command == \"NOTICE\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}]! {}\".format(name, target, message))\n\n elif msg.command == \"PRIVMSG\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}] {}\".format(name, target, message))\n\n elif msg.command.isdigit():\n print(msg.args[-1])\n\n else:\n print(str(msg))\n\n hooks.handle(self, msg)", "async def command_proc(self, message):\n parser = DiscordArgumentParser(description=\"A Test Command\", prog=\">stats\")\n parser.set_defaults(message=message)\n sp = parser.add_subparsers()\n\n sub_parser = sp.add_parser('user',\n description='test something')\n sub_parser.add_argument(\n \"user_id\",\n action=ValidUserAction,\n help=\"Mention of the user in question\",\n metavar=\"@user\",\n nargs=\"?\",\n )\n sub_parser.set_defaults(cmd=self._cmd_user)\n\n sub_parser = sp.add_parser('global',\n description='test something')\n sub_parser.set_defaults(cmd=self._cmd_global)\n\n try:\n self.log.info(\"Parse Arguments\")\n results = parser.parse_args(shlex.split(message.content)[1:])\n self.log.info(results)\n if type(results) == str:\n self.log.info(\"Got normal return, printing and returning\")\n self.log.info(type(results))\n await self.client.send_message(message.channel, results)\n return\n elif hasattr(results, 'cmd'):\n await results.cmd(results)\n return\n else:\n msg = parser.format_help()\n await self.client.send_message(message.channel, msg)\n return\n except NoValidCommands as e:\n # We didn't get a subcommand, let someone else deal with this mess!\n self.log.error(\"???\")\n pass\n except HelpNeeded as e:\n self.log.info(\"TypeError Return\")\n self.log.info(e)\n msg = f\"{e}. You can add `-h` or `--help` to any command to get help!\"\n await self.client.send_message(message.channel, msg)\n return\n pass\n\n return", "def handle_command(command, channel):\n response = \"Not sure what you mean. \" + \\\n \"Try the following commands: \\n\" +\\\n \"@netsilbot alert list\\n\" +\\\n \"@netsilbot alert details <alertID>\\n\" +\\\n \"@netsilbot service list\\n\" +\\\n \"@netsilbot service details <serviceID>\\n\"+\\\n \"(You can add 'text' or 'raw' options for formatting the output)\"\n\n\n if command.startswith(COMMANDS[0]):\n #print command\n subcommand = command.split(' ')[1]\n if(subcommand=='list'):\n if(len(command.split(' '))>2):\n formatOutput = command.split(' ')[2]\n else:\n formatOutput=''\n\n response = GetAlertList(formatOutput)\n\n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='details'):\n response = GetAlertDetails([],command.split(' ')[2])\n sendSlackMessage(response, channel)\n\n elif(subcommand=='rule'):\n subsubcommand = command.split(' ')[2]\n if(subsubcommand=='list'):\n if(len(command.split(' '))>3):\n formatOutput = command.split(' ')[3]\n else:\n formatOutput=''\n\n response = GetAlertRuleList(formatOutput)\n \n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subsubcommand=='details'):\n response = GetAlertRuleDetails([],command.split(' ')[3])\n sendSlackMessage(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='template'):\n subsubcommand = command.split(' ')[2]\n if(subsubcommand=='list'):\n if(len(command.split(' '))>3):\n formatOutput = command.split(' ')[3]\n else:\n formatOutput=''\n\n response = GetAlertTemplateList(formatOutput)\n \n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subsubcommand=='details'):\n response = GetAlertTemplateDetails([],command.split(' ')[3])\n sendSlackMessage(response, channel)\n\n else:\n sendSlackMessage(response, channel)\n\n elif command.startswith(COMMANDS[1]):\n subcommand = command.split(' ')[1]\n if(subcommand=='list'):\n if(len(command.split(' '))>2):\n formatOutput = command.split(' ')[2]\n else:\n formatOutput=''\n\n response = GetServiceList(formatOutput)\n\n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='details'):\n response = GetServiceDetails([],command.split(' ')[2])\n sendSlackMessage(response, channel)\n \n else:\n sendSlackMessage(response, channel)\n\n elif command.startswith(COMMANDS[2]):\n subcommand = command.split(' ')[1]\n if(subcommand=='run'):\n if(len(command.split(' '))>2):\n queryText = command.split('run')[1].strip()\n else:\n queryText=''\n\n print queryText\n\n response=''\n response = RunQuery(query=queryText)\n #print response\n\n sendSlackMessageWithAttactment(response, channel)\n \n else:\n sendSlackMessage(response, channel)\n\n else:\n sendSlackMessage(response, channel)", "def execute_commands(self, commands):\n for cmd in commands:\n self.action_list[cmd](commands[cmd])\n if cmd == 'r':\n break", "def run(self):\n # To add a command to the command dispatch table, append here.\n self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, 2]})\n self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, 1]})\n self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, 2]})\n self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, 2]})\n self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, 3]})\n self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, 3]})\n self.command_dispatch.update({self.STRING_DATA: [self._string_data, 2]})\n self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, 2]})\n self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, 2]})\n self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, 2]})\n self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, 2]})\n self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, 2]})\n\n while not self.is_stopped():\n if len(self.pymata.command_deque):\n # get next byte from the deque and process it\n data = self.pymata.command_deque.popleft()\n\n # this list will be populated with the received data for the command\n command_data = []\n\n # process sysex commands\n if data == self.START_SYSEX:\n # next char is the actual sysex command\n # wait until we can get data from the deque\n while len(self.pymata.command_deque) == 0:\n pass\n sysex_command = self.pymata.command_deque.popleft()\n # retrieve the associated command_dispatch entry for this command\n dispatch_entry = self.command_dispatch.get(sysex_command)\n\n # get a \"pointer\" to the method that will process this command\n method = dispatch_entry[0]\n\n # now get the rest of the data excluding the END_SYSEX byte\n end_of_sysex = False\n while not end_of_sysex:\n # wait for more data to arrive\n while len(self.pymata.command_deque) == 0:\n pass\n data = self.pymata.command_deque.popleft()\n if data != self.END_SYSEX:\n command_data.append(data)\n else:\n end_of_sysex = True\n\n # invoke the method to process the command\n method(command_data)\n # go to the beginning of the loop to process the next command\n continue\n\n #is this a command byte in the range of 0x80-0xff - these are the non-sysex messages\n\n elif 0x80 <= data <= 0xff:\n # look up the method for the command in the command dispatch table\n # for the digital reporting the command value is modified with port number\n # the handler needs the port to properly process, so decode that from the command and\n # place in command_data\n if 0x90 <= data <= 0x9f:\n port = data & 0xf\n command_data.append(port)\n data = 0x90\n # the pin number for analog data is embedded in the command so, decode it\n elif 0xe0 <= data <= 0xef:\n pin = data & 0xf\n command_data.append(pin)\n data = 0xe0\n else:\n pass\n\n dispatch_entry = self.command_dispatch.get(data)\n\n # this calls the method retrieved from the dispatch table\n method = dispatch_entry[0]\n\n # get the number of parameters that this command provides\n num_args = dispatch_entry[1]\n\n #look at the number of args that the selected method requires\n # now get that number of bytes to pass to the called method\n for i in range(num_args):\n while len(self.pymata.command_deque) == 0:\n pass\n data = self.pymata.command_deque.popleft()\n command_data.append(data)\n #go execute the command with the argument list\n method(command_data)\n\n # go to the beginning of the loop to process the next command\n continue", "def mainCommand(self, args):\r\n command = args.pop(0).lower() # calls exception if no arguments present\r\n if command in vars(CommandManager):\r\n vars(CommandManager)[command](self, *args) # calls exception if wrong amount of arguments\r", "def echo(self, messages):\n for msg in messages:\n self.env.process(self.instruct_transmission(msg, self.ports[0]))", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def call_all(self, msg_tag, message):\n return self.hub.call_all(self.get_private_key(), msg_tag, message)", "def _handle_commands(self, event, session):\n message = event['body']\n\n for regex, func, help in self._COMMANDS:\n match = regex.match(message)\n if match is not None:\n func(self, event, session=session, **match.groupdict())\n return True\n\n return False", "def command(self, msg):\n if msg.startswith('PRINT '):\n console.ori_log(msg[6:])\n elif msg.startswith('EVAL '):\n self._global._ = eval(msg[5:])\n flexx.ws.send('RET ' + self._global._) # send back result\n elif msg.startswith('EXEC '):\n eval(msg[5:]) # like eval, but do not return result\n elif msg.startswith('DEFINE-JS '):\n eval(msg[10:])\n #el = document.createElement(\"script\")\n #el.innerHTML = msg[10:]\n #document.body.appendChild(el)\n elif msg.startswith('DEFINE-CSS '):\n # http://stackoverflow.com/a/707580/2271927\n el = document.createElement(\"style\")\n el.type = \"text/css\"\n el.innerHTML = msg[11:]\n document.body.appendChild(el)\n elif msg.startswith('TITLE '):\n if not self.nodejs:\n document.title = msg[6:]\n elif msg.startswith('ICON '):\n if not self.nodejs:\n link = document.createElement('link')\n link.rel = 'icon'\n link.href = msg[5:]\n document.head.appendChild(link)\n #document.getElementsByTagName('head')[0].appendChild(link);\n elif msg.startswith('OPEN '):\n window.win1 = window.open(msg[5:], 'new', 'chrome')\n else:\n console.warn('Invalid command: \"' + msg + '\"')", "def _command_processor(self, cmd: str) -> None:\n\n if cmd == \"translate\":\n oracion = self.session.prompt(\n \"... Texto en español: \",\n validator=TbSETValidator(\"text_max_len\"),\n complete_while_typing=False)\n\n self.translate(oracion)\n elif cmd == \"train\":\n confirmation = self.session.prompt(\"... This will take at least 30' with a GPU. Are you sure? (y/n): \",\n validator=TbSETValidator(\"yes_no\"))\n\n if confirmation in \"yY\":\n self.train()\n else:\n print(\"Wrong command, please try again.\\n\")", "def process_command(self, command):\n if not (type(command) is tuple and len(command) == 2):\n raise ValueError(\"Expected command to be a tuple of a string and a list\")\n\n action, channels = command\n self.logger.info(\"Received command %s (%s)\" % (action, ','.join(channels)))\n if action == 'join':\n for channel in channels:\n self.conn.join(channel)\n self.channels += channels\n elif action == 'part':\n for channel in channels:\n self.conn.part(channel)\n self.channels = [c for c in self.channels if c not in channels]", "def getCommands(self):", "def run(self):\n for command in CUSTOM_COMMANDS:\n self.run_custom_command(command)", "def processMessage(self, *args, **kwargs):\r\n pass", "def execute_cmd(client, server, msg):\n cmd = msg.strip().split(' ')[0]\n if cmd[0] == \".\":\n server.logger.info(\"BLACKLIST {} : {}\".format(client.ip, cmd))\n client.exit_status = 0\n return\n if cmd in SCRIPTED:\n server.logger.info(\"SCRIPTED CMD {} : {}\".format(client.ip, cmd))\n method = getattr(sys.modules[__name__], \"{}_cmd\".format(cmd))\n result = method(server, client, msg)\n elif cmd not in BLACK_LIST:\n server.logger.info(\"EXECUTING CMD {} : {}\".format(client.ip, cmd))\n response = client.run_in_container(msg)\n if \"exec failed\" not in response:\n if response == \"\\n\":\n return\n server.logger.info(\n \"RESPONSE {}: {}\".format(client.ip, response[:-1]))\n client.send(response)\n print(client.exit_status)\n else:\n not_found(client, server, cmd)", "def command(data):\n LOG.debug(f\"Received text from {data['user']['name']}: {data['command']}\")\n\n room_id = data[\"room\"]\n user_id = data[\"user\"][\"id\"]\n\n if user_id != self.user:\n timer = self.timers_per_room.get(room_id)\n timer.reset()\n\n message = data[\"command\"]\n for user in self.players_per_room[room_id]:\n if user[\"id\"] == user_id:\n user[\"msg_n\"] += 1\n # Let's do some message mangling, but only to every second message\n if user[\"msg_n\"] % 2 == 0:\n message = message[::-1]\n message = message.upper()\n\n # emit the message to all other users\n # (the user who sent will see the original; has already seen it)\n for user in self.players_per_room[room_id]:\n if user[\"id\"] != user_id:\n self.sio.emit(\n \"text\",\n {\n \"room\": data[\"room\"],\n \"receiver_id\": user[\"id\"],\n \"message\": message,\n \"impersonate\": user_id,\n },\n callback=self.message_callback,\n )", "def call_command_direct(self, name, args=None, kwargs=None):\n comm,_=self._commands[name]\n return comm(*(args or []),**(kwargs or {}))", "def remote_command(message):\n m = message.split(\"|\")\n if m[0][0] != \"/\":\n print \"something not right, command called with a non command\", message\n raise\n if m[0] in [\"/pp\", \"/print pic\", \"/print picture\"]:\n print_pic()\n if m[0] in [\"/wp\", \"/webpic\"]:\n print_remote_pic(m[1])\n if m[0] in [\"/roll\", \"/new-roll\"]:\n global print_counter\n print_counter = 0\n if m[0] in [\"/set\", \"/settings\"]:\n global brightness\n global exposure_mode\n global iso\n global mode_or_iso\n global print_direction\n global roll_length\n global each_print_length\n if len(m) > 1:\n pairs = m[1].split(\",\")\n for pair in pairs:\n p = pair.split(\":\")\n if p[0] == \"iso\":\n iso = int(p[1])\n if p[0] == \"exposure_mode\":\n exposure_mode = p[1]\n if p[0] == \"mode_or_iso\":\n mode_or_iso = p[1]\n if p[0] == \"print_direction\":\n print_direction = p[1]\n if p[0] == \"brightness\":\n brightness = p[1]\n if p[0] == \"roll_length\":\n roll_length = p[1]\n if p[0] == \"each_print_length\":\n each_print_length = p[1]\n print \"settings requested:\", m[0]\n print \"settings now:\"\n print \"iso {}, exposure_mode {}, mode_or_iso {}\".format(iso,\n exposure_mode,\n mode_or_iso)\n if m[0] in [\"/photo\", \"/pic\", \"/picture\"]:\n print \"taking a pic with settings:\"\n print \"iso {}, exposure_mode {}, mode_or_iso {}\".format(iso,\n exposure_mode,\n mode_or_iso)\n take_a_picture(filepath=\"live.jpg\",\n exposure_mode=exposure_mode,\n iso=iso,\n mode_or_iso=mode_or_iso)", "def receive(self, command_list):\n for cmd in command_list:\n self._send_cmd_with_mapped_ids(cmd)", "def test_sendMessage(self):\n self.p.sendMessage(\"CMD\", \"param1\", \"param2\")\n self.check(\"CMD param1 param2\\r\\n\")", "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def on_command(self, session, cmd_list):\n assert cmd_list\n\n cmd = cmd_list[0]\n if cmd in self._commands:\n return self._commands[cmd].function(session, cmd_list)\n else:\n self.reply_text(session, \"NG:Unknown command [%s]\" % cmd)\n return True", "def dispatch(self) -> None:\n while True:\n body = self.general_queue.pop()\n if \"CMD$\" in body:\n cmd = [part for part in body[body.find(\"$\") + 1:].split(\";\") if part]\n try:\n module, func = cmd[0], cmd[1]\n except IndexError:\n self.send_through_aprs(f\"CMDERR: Unable to parse Commnd {cmd}\")\n continue\n if self.validate_func(module, func):\n try:\n getattr(self.modules[module], func)()\n self.send_through_aprs(f\"CMDSUC: Command {cmd} executed successfully\")\n except Exception as e:\n self.send_through_aprs(f\"CMDERR: Command {cmd} failed with {e}\")", "def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo", "def handle_command(command, channel):\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n response = None\n\n if command.startswith(EXAMPLE_COMMAND):\n response = \"Sure...write some code then I can do that!\"\n elif command.startswith(\"date\"):\n response = currentDate()\n elif command.startswith(\"time\"):\n response = currentTime()\n elif command.startswith(\"your my best friend\") or command.startswith(\"you are my best friend\"):\n response = \"Thanks so much, buddy!!! \\n Your the best!!\"\n elif command.startswith(\"hello\") or command.startswith(\"hi\") or command.startswith(\"hey\"):\n response = \"Hello, My name is BackTalker\"\n elif command.startswith(\"thanks\") or command.startswith(\"thank you\"):\n response = \"Your Welcome\"\n elif command.startswith(\"math\"):\n problem = command[4:]\n response = \"The answer for {} is {}\".format(problem, str(eval(problem)))\n elif command.startswith(\"say something\"):\n response = compliments() \n elif command.startswith(\"weather\"):\n response = currentWeather()\n elif command.startswith(\"cmpt371\"):\n word = command[8:]\n response = cmpt371(word)\n\n\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def do_command(self, args = ()):\n if len(args) == 0:\n self.do_overview()\n elif len(args) != 1:\n raise ValueError('Wrong number of arguments.')\n elif args[0] in self.base.commands.keys():\n self.do_command_help(args[0])\n else:\n raise ValueError('No such command.')", "def cmd(self, context, message):\r\n return True", "async def _run_command(self, command, *args, **kwargs):\n pass", "def parse_commands(message, client=None):\n if(message[0] != COMMAND_START_SYMBOL):\n return False\n\n components = message[1:].split(\" \")\n command = components[0]\n\n if(command in commands.keys()):\n permission_level = SERVER_PERMISSION_ALL\n if(client is not None):\n permission_level = client.client_info.permissions\n\n if(check_permission(permission_level, commands[command][\"perm_required\"])):\n commands[command][\"on_run\"](args=components, client=client)\n else:\n to_client_or_console(\"Access to \" + command + \" denied.\", client)\n # We did attempt to use a command, so don't say this in chat...\n # return False\n else:\n to_client_or_console(\"Command \" + command + \" not found.\", client)\n # We did attempt to use a command, so don't say this in chat...\n # return False\n\n return True", "def _act_on(self, message):\n if 'PING' in self.ts.get_human_readable_message(message): # PING/PONG silliness\n self._add_to_chat_queue(self.ts.get_human_readable_message(message.replace('PING', 'PONG')))\n\n db_session = self.Session()\n command = self._get_command(message, db_session)\n if command is not None:\n user = self.ts.get_user(message)\n user_is_mod = self.ts.check_mod(message)\n if self._has_permission(user, user_is_mod, command, db_session):\n self._run_command(command, message, db_session)\n else:\n self._add_to_whisper_queue(user,\n 'Sorry {} you\\'re not authorized to use the command: !{}'\n .format(user, command[0]))\n db_session.commit()\n db_session.close()", "def handleMessage(msg):", "def call_command(self, name, args=None, kwargs=None, callback=None):\n return self._schedule_comm(name,args,kwargs,callback=callback)", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "async def do(ctx, times : int, *, command):\n msg = copy.copy(ctx.message)\n msg.content = command\n for i in range(times):\n await bot.process_commands(msg)", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def execute_cmd(self, text=None, session=None):\n if not self.ndb.ev_channel and self.db.ev_channel:\n # cache channel lookup\n self.ndb.ev_channel = self.db.ev_channel\n if self.ndb.ev_channel:\n self.ndb.ev_channel.msg(text, senders=self.id)", "def execute_cmd(self, text=None, session=None):\n if not self.ndb.ev_channel and self.db.ev_channel:\n # cache channel lookup\n self.ndb.ev_channel = self.db.ev_channel\n if self.ndb.ev_channel:\n self.ndb.ev_channel.msg(text, senders=self.id)", "def execute_cmd(self, text=None, session=None):\n if not self.ndb.ev_channel and self.db.ev_channel:\n # cache channel lookup\n self.ndb.ev_channel = self.db.ev_channel\n if self.ndb.ev_channel:\n self.ndb.ev_channel.msg(text, senders=self.id)", "def evecommands(self, irc, msg, args):\n desc = \"\\n\".join((\"EVESpai commands:\",\n \"{0} {1}\".format(ircutils.bold(\"'evecommands'\"), \"List available commands.\"),\n \"{0} {1}\".format(ircutils.bold(\"'pos [<system>]'\"), \"Lists all POSes.\"),\n \"{0} {1}\".format(ircutils.bold(\"'evetime'\"), \"Get current time on Tranquility.\"),\n \"{0} {1}\".format(ircutils.bold(\"'whereis <character>'\"), \"List the location and currently boarded ship of <character>.\"),\n \"{0} {1}\".format(ircutils.bold(\"'cache <calltype>'\"), \"List the cache time of given call type.\"),\n \"{0} {1}\".format(ircutils.bold(\"'whoat <system>'\"), \"List characters and their ships in <system>. If --all is given, ignore the max lines limitation.\"),\n \"{0} {1}\".format(ircutils.bold(\"'ship <shiptype>'\"), \"List characters in <shiptype>.\"),\n \"{0} {1}\".format(ircutils.bold(\"'chars <user>'\"), \"List all cha)racters belonging to <user>\"),\n \"{0} {1}\".format(ircutils.bold(\"'price [--location=(<solarsystem>|<region>)] <typeName>'\"), \"List buy/sell/volume of <type> in <location>, defaults to Jita.\"),\n \"{0} {1}\".format(ircutils.bold(\"'markets'\"), \"List all price indexed markets.\"),\n \"{0} {1}\".format(ircutils.bold(\"'player <character>'\"), \"List username of those who own *<character>*\")))\n\n for line in desc.splitlines():\n irc.reply(line.strip(), prefixNick=False)", "def process(supbot: Supbot, request_command: str):\n match = re.findall(r\"([^\\s\\\"']+|\\\"([^\\\"]*)\\\"|'([^']*)')\", request_command)\n parts = [x[0] if x[1] == \"\" else x[1] for x in match]\n try:\n if parts[0] == \"quit\":\n supbot.quit()\n elif parts[0] == \"send\":\n supbot.send_message(parts[1], parts[2])\n else:\n return \"Invalid command\"\n except IndexError:\n return \"Insufficient Arguments\"", "def process_cmd(config, cmd):\n # Separate command from arguments\n cmd_parts = cmd.split(' ', 1)\n head = cmd_parts[0]\n args = ''\n if len(cmd_parts) == 2:\n args = cmd_parts[1]\n\n # Call the command\n if not common.call_cmd(head, config, args):\n print(\"RabbitHole: Unknown command '{}'\".format(head))", "def command(self, msg):\n if msg.startswith('PRINT '):\n window.console.ori_log(msg[6:])\n elif msg.startswith('EVAL '):\n window._ = eval(msg[5:])\n window.flexx.ws.send('RET ' + window._) # send back result\n elif msg.startswith('EXEC '):\n eval(msg[5:]) # like eval, but do not return result\n elif msg.startswith('DEFINE-JS '):\n eval(msg[10:])\n #el = window.document.createElement(\"script\")\n #el.innerHTML = msg[10:]\n #window.document.body.appendChild(el)\n elif msg.startswith('DEFINE-CSS '):\n # http://stackoverflow.com/a/707580/2271927\n el = window.document.createElement(\"style\")\n el.type = \"text/css\"\n el.innerHTML = msg[11:]\n window.document.body.appendChild(el)\n elif msg.startswith('TITLE '):\n if not self.nodejs:\n window.document.title = msg[6:]\n elif msg.startswith('ICON '):\n if not self.nodejs:\n link = window.document.createElement('link')\n link.rel = 'icon'\n link.href = msg[5:]\n window.document.head.appendChild(link)\n #window.document.getElementsByTagName('head')[0].appendChild(link);\n elif msg.startswith('OPEN '):\n window.win1 = window.open(msg[5:], 'new', 'chrome')\n else:\n window.console.warn('Invalid command: \"' + msg + '\"')" ]
[ "0.77365535", "0.7425924", "0.70829546", "0.69705456", "0.6875241", "0.6782992", "0.67032224", "0.6695595", "0.6694858", "0.66467553", "0.664355", "0.66403383", "0.6640279", "0.6631685", "0.6593153", "0.65646577", "0.655279", "0.65517825", "0.6549944", "0.6543722", "0.6538904", "0.6536581", "0.65305996", "0.6503027", "0.64646196", "0.64552873", "0.64405733", "0.64342517", "0.6409162", "0.6409162", "0.6409162", "0.6409162", "0.63730836", "0.635548", "0.633568", "0.6321513", "0.629353", "0.62786096", "0.6270884", "0.6269109", "0.62665004", "0.6264533", "0.62498385", "0.6242568", "0.6242566", "0.6238235", "0.62343067", "0.622924", "0.6221515", "0.62101537", "0.62036735", "0.6202767", "0.62026256", "0.6180566", "0.61787385", "0.61681646", "0.6165902", "0.6161869", "0.6158337", "0.6137492", "0.6132609", "0.61277586", "0.61237305", "0.61230886", "0.6122751", "0.6119937", "0.6118072", "0.6108377", "0.6106636", "0.61040115", "0.6102476", "0.6093233", "0.608849", "0.6079115", "0.6075153", "0.6063385", "0.6061518", "0.6060227", "0.6053817", "0.60473603", "0.6045035", "0.60414535", "0.6040514", "0.6040236", "0.6036724", "0.6025061", "0.6024648", "0.6024616", "0.60220414", "0.6019658", "0.60192233", "0.60179687", "0.60157436", "0.6015335", "0.6014104", "0.6014104", "0.6014104", "0.60113525", "0.6007303", "0.6002946", "0.6002725" ]
0.0
-1
Publishes a created event object to the core.
def publish_event(self, event): self.channel.basic_publish(exchange='', routing_key='peripheral_event', body=json.dumps({self.name: dict(event)}))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_event(self):\n pass", "def signal_creation_event(bb_object):\n global EVENTS\n EVENTS.append( (creation_event, bb_object.data) )", "def publishEvent(eventName,publisher, msg):", "async def createEvent(self, event: Event) -> None:", "def create_event(self, **kwargs):\n events = self.variables['events']\n events.append(kwargs)\n self.variables['events'] = events", "def on_created(self, event):\n\n # the absolute path of the event file/folder\n abs_path = event.src_path\n # replace the root path with a '.' to build a relative path to be sent to server\n relative_event_path = abs_path.replace(self.root_path, \".\")\n\n # retrieve event type and the flag for directory/folder\n event_type = event.event_type\n is_directory = event.is_directory\n\n # only propagate changes if there is a connection with the server\n if self.protocol.connected:\n self.protocol.send_event(event_type, is_directory, relative_event_path)\n else:\n logging.warning(\"Connection with server has not been established, 'create' changes will not be propagated.\")", "def create_and_add_event(self, event_data):\n event = event_from_dict(event_data)\n self.add_event(event)", "def event_create(tenant_id, user_id=None):", "def send(self, event):\r\n try:\r\n self.collection.insert(event, manipulate=False)\r\n except PyMongoError:\r\n # The event will be lost in case of a connection error.\r\n # pymongo will re-connect/re-authenticate automatically\r\n # during the next event.\r\n msg = 'Error inserting to MongoDB event tracker backend'\r\n log.exception(msg)", "def create_event():\n json_data = request.get_json()\n data, error = EventSchema().load(json_data)\n if error:\n return make_response(jsonify({\"error\": error}), 400)\n oEvent = Event.create(data)\n return make_response(jsonify(oEvent.as_dict()))", "def publish(self, event):\n self.pubsub_router.send(event)", "def create_event(wrapped, instance, args, kwargs, start_time, response,\n exception):\n event = PyMongoEvent(\n wrapped,\n instance,\n args,\n kwargs,\n start_time,\n response,\n exception\n )\n trace_factory.add_event(event)", "def send_notification (event):\n Publisher.sendMessage (event)", "async def send_event_created(self, action_id: int):\n async with self.pg.acquire() as conn:\n data = await conn.fetchrow(\n \"\"\"\n SELECT a.company AS company_id, u.role AS host_role, u.id AS host_user_id,\n full_name(u.first_name, u.last_name, u.email) AS host_name,\n e.id AS event_id, e.name AS event_name,\n (e.start_ts AT TIME ZONE e.timezone)::date AS event_date,\n cat.name AS cat_name, cat.slug AS cat_slug,\n event_link(cat.slug, e.slug, e.public, $2) AS event_link\n FROM actions AS a\n JOIN users AS u ON a.user_id = u.id\n JOIN events AS e ON a.event = e.id\n JOIN categories AS cat ON e.category = cat.id\n WHERE a.id=$1\n \"\"\",\n action_id,\n self.settings.auth_key,\n )\n\n link = f'/dashboard/events/{data[\"event_id\"]}/'\n ctx = dict(\n summary='{host_name} created an event \"{event_name}\"'.format(**data),\n details=(\n 'Event \"{event_name}\" ({cat_name}) created by \"{host_name}\" ({host_role}), '\n 'click the link below to view the event.'\n ).format(**data),\n action_label='View Event',\n action_link=link,\n )\n users = [\n UserEmail(id=r['id'], ctx=ctx)\n for r in await conn.fetch(\"SELECT id FROM users WHERE role='admin' AND company=$1\", data['company_id'])\n ]\n await self.send_emails.direct(data['company_id'], Triggers.admin_notification, users)\n if data['host_role'] != 'admin':\n ctx = {\n 'event_link': data['event_link'],\n 'event_dashboard_link': link,\n 'event_name': data['event_name'],\n 'event_date': format_dt(data['event_date']),\n 'category_name': data['cat_name'],\n is_cat(data['cat_slug']): True,\n }\n await self.send_emails.direct(\n data['company_id'], Triggers.event_host_created, [UserEmail(data['host_user_id'], ctx)]\n )", "def create(self):\n o = self._create_impl()\n self.logger.debug(f\"created {o}\")\n self._notify(o)", "def createEvent(self):\n return _libsbml.Model_createEvent(self)", "def host_create_event():\n data = request.get_json(force=True)\n if not data:\n return jsonify(**{'succeed': False, 'data': []})\n new_event = Event()\n new_event.data = data\n new_event.data['registrants'] = {email: False for email in new_event.data['registrants']}\n all_event[data['event_name']] = new_event\n # create folder for this event\n folder_name = data['event_name'].replace(' ', '')\n absolute_folder_name = os.path.dirname(os.path.abspath(__file__)) + '/static/files/' + folder_name\n print absolute_folder_name\n if not os.path.exists(absolute_folder_name):\n os.makedirs(absolute_folder_name)\n for key,val in data['files'].iteritems():\n file_address = absolute_folder_name + '/' + key\n with open(file_address, \"w\") as text_file:\n text_file.write(val)\n data['files'][key] = '/static/files/' + folder_name + '/' + key\n return jsonify(**{'succeed': True, 'data': new_event.data})", "def publish(self, publisher):\n publisher._send(self.payload.event, self.info, *self.payload.args,\n **self.payload.kwargs)", "def publish(self, block, event_type, event_data):\n raise NotImplementedError(\"Runtime needs to provide publish()\")", "def publish():\n pass", "def fusion_api_create_events(self, body, api=None, headers=None):\n return self.event.create(body, api, headers)", "def publish_event(self, topic):\n topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(topic, qos=2)\n logger.info(\"Event published on topic %s\", topic)", "def event_create(event_id):\n schema = {\n \"type\": \"object\",\n\n \"definitions\": {\n \"traffic\": {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\"enum\": [\"host\", \"az\", \"dc\"]},\n \"value\": {\"type\": \"string\"}\n },\n \"required\": [\"type\", \"value\"]\n }\n },\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"started_at\": {\"type\": \"string\"},\n \"finished_at\": {\"type\": \"string\"},\n \"traffic_from\": {\"$ref\": \"#/definitions/traffic\"},\n \"traffic_to\": {\"$ref\": \"#/definitions/traffic\"}\n },\n \"required\": [\"started_at\", \"name\"],\n \"additionalProperties\": False\n }\n try:\n data = flask.request.get_json(silent=False, force=True)\n jsonschema.validate(data, schema)\n\n except (ValueError, jsonschema.exceptions.ValidationError) as e:\n return flask.jsonify({\"error\": \"Bad request: %s\" % e}), 400\n\n db.get().event_create(event_id, data)\n return flask.jsonify({\"message\": \"Event created %s\" % event_id}), 201", "def on_create(self, payload):\n pass", "def create_event(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVm_CreateEvent', self.handle))", "def create_and_submit(cls, response, kwargs, event_name=None, alert=None, partial_event=None, magen_logger=None):\n if not PolicyState().production_mode:\n return\n\n p_events_ctrl = cls(app_name=\"magen-ps\", magen_logger=magen_logger)\n p_events_ctrl.send_event(\n event_name=event_name or 'Policy Event',\n event_data=partial_event(response, **kwargs)\n if partial_event else DDPolicyEventsWrapper.construct_event(response, **kwargs),\n alert=alert or 'info'\n )", "def event(self, data: dict, source=None):\n ProjectEvent.objects.create(project=self, data=data, source=source)\n\n # TODO: Evaluate each project trigger\n # #for trigger in self.triggers.all():\n # trigger.evaluate(event=event, context=dict(event=event, source=source))", "def publish(self, name, **params):\n if self.__streaming:\n raise ImplementationError(\"Cannot publish event during stream.\")\n with self.stream() as publish:\n event = DTO(\n urn=\"%s:%s\" % (self.namespace, name.lower()),\n name=name,\n params=ImmutableDTO(params).as_dto(),\n version=self.__version,\n timestamp=sq.timezone.now()\n )\n publish(event)", "def create_event(data):\n event = EventModel(**data)\n db.session.add(event)\n db.session.commit()\n return event", "def publish(self):\n return", "def process_IN_CREATE(self, event):", "def send_event(event: dict):\n\n eventbridge.put_events(Entries=[event])", "def publish(self, message: str) -> None:", "def test_create_event(self):\n event_type = 'SERVICE NOTIFICATION'\n fields = EVENT_FIELDS.get(event_type, None)\n parts = [\n 'nagiosadmin',\n 'nagios4',\n 'Root Partition',\n 'CRITICAL',\n 'notify-service-by-email',\n 'DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]\n event = create_event(\n timestamp=1603813628, event_type=event_type, hostname='docker-desktop', fields=fields._make(parts)\n )\n\n assert event['timestamp'] == 1603813628\n assert event['event_type'] == 'SERVICE NOTIFICATION'\n assert event[\"msg_title\"] == 'Root Partition'\n assert event[\"source_type_name\"] == 'SERVICE NOTIFICATION'\n assert event[\"msg_text\"] == 'CRITICAL'\n assert event['tags'] == [\n 'contact:nagiosadmin',\n 'host:nagios4',\n 'check_name:Root Partition',\n 'event_state:CRITICAL',\n 'notification_type:notify-service-by-email',\n 'payload:DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]", "def registerEvent(eventName, publisher, msgInterface, exclusive=FALSE):", "def create(self, request):\n\n event = Event()\n event.title = request.data[\"title\"]\n event.datetime = request.data[\"datetime\"]\n event.cost = request.data[\"cost\"]\n event.location = request.data[\"location\"]\n event.address = request.data[\"address\"]\n event.description = request.data[\"description\"]\n event.hostname = request.data[\"hostname\"]\n\n try:\n event.save()\n hosts = request.auth.user\n event.hosts.add(hosts)\n # topics = Topic.objects.filter(pk__in=request.data[\"topicId\"])\n # event.topics.set(topics)\n serializer = EventSerializer(event, context={'request': request})\n return Response(serializer.data)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "def post(self):\n\n # we need a unique tx number so we can look these back up again\n # as well as for logging\n # FIXME: how can we guarantee uniqueness here?\n tx = int(time.time() * 100000) + random.randrange(10000, 99999)\n\n log.info(\"EVENTS [{}]: Creating events\".format(tx))\n\n try:\n user = self.jbody[\"user\"]\n if not EMAIL_REGEX.match(user):\n user += \"@\" + self.domain\n event_type_id = self.jbody.get(\"eventTypeId\", None)\n category = self.jbody.get(\"category\", None)\n state = self.jbody.get(\"state\", None)\n note = self.jbody.get(\"note\", None)\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n if not event_type_id and (not category and not state):\n raise exc.BadRequest(\n \"Must specify an event type id or both category and state\"\n )\n\n if event_type_id:\n event_type = self.session.query(EventType).get(event_type_id)\n else:\n event_type = self.session.query(EventType).filter(\n and_(\n EventType.category == category,\n EventType.state == state\n )\n ).one()\n\n if event_type is None:\n self.write_error(400, message=\"Bad event type\")\n return\n\n category = event_type.category\n state = event_type.state\n\n hostnames = (\n [self.jbody.get(\"hostname\", None)]\n if self.jbody.get(\"hostname\", None) else []\n )\n\n if \"hostnames\" in self.jbody:\n hostnames.extend(self.jbody.get(\"hostnames\"))\n\n log.info(\n \"EVENTS [{}]: Will create event {} {}\".format(\n tx, category, state\n )\n )\n\n log.info(\n \"EVENTS [{}]: Hostnames specified: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a host query was specified, we need to talk to the external\n # query server to resolve this into a list of hostnames\n if \"hostQuery\" in self.jbody:\n query = self.jbody[\"hostQuery\"]\n log.info(\"EVENTS [{}]: Running query {}\".format(tx, query))\n response = PluginHelper.request_get(params={\"query\": query})\n if response.json()[\"status\"] == \"ok\":\n hostnames.extend(response.json()[\"results\"])\n log.info(\n \"EVENTS [{}]: Hostnames after query: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a quest Id was given, look up the labors in that quest and\n # get all the hostnames for those labors.\n if \"questId\" in self.jbody:\n log.info(\"EVENTS [{}]: Looking up quest {}\".format(\n tx, self.jbody[\"questId\"])\n )\n quest = self.session.query(Quest).filter_by(\n id=self.jbody[\"questId\"]\n ).scalar()\n if not quest:\n raise exc.NotFound(\"No such Quest {} found\".format(id))\n for labor in quest.labors:\n hostnames.append(labor.host.hostname)\n\n log.info(\n \"EVENTS [{}]: Hostnames after quest expansion: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # We need to create a list of hostnames that don't have a Host record\n new_hosts_needed = set(hostnames)\n hosts = (\n self.session.query(Host).filter(Host.hostname.in_(hostnames)).all()\n )\n\n for host in hosts:\n new_hosts_needed.remove(str(host.hostname))\n\n # if we need to create hosts, do them all at once\n if new_hosts_needed:\n log.info(\"EVENTS [{}]: Creating hosts {}\".format(\n tx, \", \".join(new_hosts_needed)\n ))\n Host.create_many(self.session, new_hosts_needed)\n hosts = (\n self.session.query(Host).filter(\n Host.hostname.in_(hostnames)\n ).all()\n )\n\n if not hosts:\n raise exc.BadRequest(\"No hosts found with given list\")\n\n try:\n if len(hosts) > 1:\n # if we are supposed to create many events,\n # we want to do them as a giant batch\n log.info(\"EVENTS [{}]: Creating multiple events\".format(tx))\n events_to_create = []\n for host in hosts:\n events_to_create.append({\n \"host_id\": host.id,\n \"user\": user,\n \"event_type_id\": event_type.id,\n \"note\": note,\n \"tx\": tx\n })\n Event.create_many(self.session, events_to_create, tx)\n else:\n # if we are just creating one event, do it the simple way\n log.info(\"EVENTS [{}]: Creating 1 event\".format(tx))\n event = Event.create(\n self.session, hosts[0], user, event_type, note=note\n )\n\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"EVENTS [{}]: Flushing and committing\".format(tx))\n self.session.flush()\n log.info(\"EVENTS [{}]: Flushed\".format(tx))\n self.session.commit()\n log.info(\"EVENTS [{}]: Committed\".format(tx))\n\n if len(hosts) == 1:\n json = event.to_dict(self.href_prefix)\n json[\"href\"] = \"/api/v1/events/{}\".format(event.id)\n self.created(\n \"/api/v1/events/{}\".format(event.id), json\n )\n else:\n # if we created many events, we need to look them up by the TX\n # number to figure out what they were since the were created in bulk\n created_events = self.session.query(Event).filter(Event.tx == tx).all()\n self.created(\n data={\n \"events\": (\n [event.to_dict(self.href_prefix) for event in created_events]\n ),\n \"totalEvents\": len(created_events)\n }\n )\n\n log.info(\"EVENTS [{}]: Created event {} {} for {}\".format(\n tx, category, state,\n \", \".join(hostnames)\n ))", "def post_event(self, event):\r\n from evennia.scripts.models import ScriptDB\r\n\r\n if event.public_event:\r\n event_manager = ScriptDB.objects.get(db_key=\"Event Manager\")\r\n event_manager.post_event(event, self.owner.player, event.display())", "def create(self, validated_data):\n return Event.objects.create(**validated_data)", "def create_event() -> abc.Event:\n return get_asynclib().Event()", "def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )", "def send(self):\n event = gdata.calendar.CalendarEventEntry()\n event.title = atom.Title(text=self.title)\n event.content = atom.Content(text='')\n event.where.append(gdata.calendar.Where(value_string=self.location))\n # Set start time in 6 minutes\n start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 6 * 60))\n # Set end time in an hour\n end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 3600))\n event.when.append(gdata.calendar.When(start_time=start_time,\n end_time=end_time))\n minutes = 5\n for a_when in event.when:\n if len(a_when.reminder) > 0:\n # Adding reminder in 5 minutes before event (start_time)\n a_when.reminder[0].minutes = 5\n else:\n a_when.reminder.append(\n gdata.calendar.Reminder(minutes=minutes))\n # Insert new event\n new_event = self.calendar_service.InsertEvent(event,\n self.calendar_link)\n return new_event", "def event_create(req):\n try:\n utils.assert_keys(req.form, ['creator_id']+_event_args)\n event_id = db_conn.event_new(**req.form)\n json = {'event_id': event_id}\n except Exception as e:\n json = {'errors': [str(e)]}\n return req.Response(json=json)", "def create_event(self, event_type):\n setattr(self, event_type, lambda *args, **kwargs: None)\n self.register_event_type(event_type)", "def createEvent(event):\n event = {\n 'summary': event.description,\n 'location': \"\",\n 'description': \"\",\n 'start': {\n 'dateTime': event.datetime_start,\n 'timeZone': \"America/Los_Angeles\"\n },\n 'end': {\n 'dateTime': event.datetime_end,\n 'timeZone': \"America/Los_Angeles\"\n },\n }\n\n event = service.events().insert(calendarId=SF_FUNCHEAP_CAL_ID, body=event).execute()", "def _send_event(self, title, text, tags, type, aggregation_key, severity='info'):\n event_dict = {\n 'timestamp': int(time()),\n 'source_type_name': self.SOURCE_TYPE_NAME,\n 'msg_title': title,\n 'event_type': type,\n 'alert_type': severity,\n 'msg_text': text,\n 'tags': tags,\n 'aggregation_key': aggregation_key,\n }\n self.event(event_dict)", "def _create_event(\n project,\n creator_id,\n datetime_start,\n datetime_end,\n description=\"Test Event\",\n location=\"test_location\",\n is_public=False,\n event_type=\"MN\",\n coordinator=None\n):\n event = Event(\n project=project,\n description=description,\n location=location,\n is_public=is_public,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n coordinator=coordinator,\n creator_id=creator_id\n )\n event.save()\n return event", "def _send_event(self, title, text, tags, type, aggregation_key, severity='info'):\n event_dict = {\n 'timestamp': int(time.time()),\n 'source_type_name': self.SOURCE_TYPE_NAME,\n 'msg_title': title,\n 'event_type': type,\n 'alert_type': severity,\n 'msg_text': text,\n 'tags': tags,\n 'aggregation_key': aggregation_key,\n }\n self.event(event_dict)", "def create_product(sender, instance, **kwargs):\n if kwargs.get(\"created\"): # True just for first time when obj created\n logger.info(f\"Emails send to user with new product <{instance}>\")", "def create_event(klass, form, creator):\n\n if form.is_recurring.data:\n # Series\n return klass.create_series(form, creator)\n # Single event\n return klass.create_single_event(form, creator)", "def create_event(self, name, date):\n user = User.objects.create(username='userdemo')\n user.set_password('calnote24')\n user.save()\n Event.objects.create(name=name, date=date, user_id=user.id)", "def _send_event(self, event_id, data, serialize=False, binding_key=None,\n key=None):\n\n ev = self.sc.new_event(id=event_id, data=data)\n ev.key = key\n ev.sequence = serialize\n ev.binding_key = binding_key\n self.sc.post_event(ev)", "def _create_event(self, ph, category, name, pid, tid, timestamp):\n event = {}\n event['ph'] = ph\n event['cat'] = category\n event['name'] = name\n event['pid'] = pid\n event['tid'] = tid\n event['ts'] = timestamp\n return event", "async def send_event(\n self,\n payload: bytes,\n exchange_name: Optional[str] = None,\n routing_key: Optional[str] = None,\n **kwargs,\n ) -> None:\n exchange_name = exchange_name or os.getenv(\n \"PUBLISH_EXCHANGE_NAME\", \"default.in.exchange\"\n )\n routing_key = routing_key or os.getenv(\"PUBLISH_ROUTING_KEY\", \"#\")\n try:\n await self.channel.publish(\n payload=payload,\n exchange_name=exchange_name,\n routing_key=routing_key,\n **kwargs,\n )\n except ChannelClosed as err:\n await self.configure()\n if err.message.find(\"no exchange\") > 0:\n raise ExchangeNotFound(exchange_name) # type: ignore", "def create_event(organizer, description, location, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Event.objects.create(event_organizer=organizer, event_desctiption=description, event_location=loaction, event_date = time)", "def publish_event(self, event):\n event_priority = getattr(event, \"event_priority\", 99999)\n heappush(self.sprite_events, (event_priority, event))", "def create_event(self):\n self.driver.get(f'{self.base_url}/event')\n\n enter_event_name = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'eventName')))\n enter_event_name.send_keys(self.random_string)\n\n # self.driver.find_element_by_xpath('//*[@id=\"root\"]/div/div[3]/div/div[2]/div/div/div[1]/div/div[1]/div[1]/label[2]/span[1]').click()", "def create_single_event(klass, form, creator):\n\n # Generate the event and date data\n event_and_date_data = DataBuilder.event_and_date_data_from_form(form,\n creator=creator)\n event_and_date_data = klass._remove_none_fields(event_and_date_data)\n\n event = Event(**event_and_date_data)\n event.save()\n\n # Return the Google Calendar response\n return gcal_client.create_event(event)", "def broadcast_publisher_info(sender, instance, created, **kwargs):\n if created:\n # notify creation to generic channel\n broadcast('pulse', {\n 'status': instance.status,\n 'pk': instance.pk,\n 'slug': instance.slug,\n 'date_last_crawled': '{0}'.format(instance.date_last_crawled)\n }, event_type=settings.OBER_EVENTS_CREATE_PUBLISHER)\n else:\n # notify updates, with basic serialized instance\n broadcast('pulse', {\n 'status': instance.status,\n 'status_changed': instance.status != instance._original.get('status'),\n 'pk': instance.pk,\n 'slug': instance.slug,\n 'date_last_crawled': '{0}'.format(instance.date_last_crawled)\n }, event_type=settings.OBER_EVENTS_UPDATE_PUBLISHER)", "def on_publish(client, userdata, mid):\n print(\"Message Published.\")", "def add_event(self, event_data):\n event_data['_datetime'] = datetime.datetime.today()\n self.event_list.append(event_data)\n self._notify_all(event_data)", "def __publish_event(self, event_type, registration):\n # Get the import or export reference\n if event_type in rsa_beans.EXPORT_TYPES:\n reference = registration.get_export_reference()\n else:\n reference = registration.get_import_reference()\n\n # Prepare the event bean\n event = rsa_beans.RemoteServiceAdminEvent(event_type,\n self._context.get_bundle(),\n reference,\n registration.get_exception())\n\n # Call listeners in the current thread\n for listener in self._listeners[:]:\n listener.remoteAdminEvent(event)", "def CreateNewEvent(arguments: List[Tuple[str, type]] = [], event_name: str = '') -> Event:\n pass", "def log_create(sender, instance, created, **kwargs):\n if created:\n stracks.user(instance).log(\"? has been created\")", "def mark_as_new(self, message=None):\n from noc.core.service.pub import publish\n\n data = {\"source\": self.source}\n data.update(self.raw_vars)\n msg = {\n \"id\": str(self.id),\n \"ts\": time.mktime(self.timestamp.timetuple()),\n \"object\": self.managed_object.id,\n \"data\": data,\n }\n stream, partition = self.managed_object.events_stream_and_partition\n publish(\n orjson.dumps(msg),\n stream=stream,\n partition=partition,\n )\n\n self.delete()", "def write_event(self, event):\n self.events_written.append(event)", "async def create(\n self,\n ctx,\n title: typing.Union[str],\n role: typing.Union[str],\n description: typing.Union[str],\n color: typing.Union[str]\n ):\n\n # Skipping non-eventer users\n if not self._have_permission(ctx.author, ctx.guild):\n await ctx.send(embed=decoration.embeds.ERRORS[\"NO_PERM\"])\n return\n\n # Creating role\n event_role = await ctx.guild.create_role(name=role, colour=decoration.colors.NAMED[color])\n\n # Save new event-type in database\n created_at = datetime.now()\n connector.createNewEventType(title=title, role_id=event_role.id, created_at=created_at, description=description, role_color=color, enabled=False, guild_id=ctx.guild.id)\n\n # Send back info message\n message_payload = [\n {\"Название\": title, \"Описание\": description, \"Роль\": event_role},\n created_at,\n ctx.author.name,\n ctx.author.avatar_url\n ]\n await ctx.send(embed=decoration.embeds.INFO[\"EVENT_TYPE_CREATED\"](*message_payload))", "def post_event(\n api_key=None,\n app_key=None,\n title=None,\n text=None,\n date_happened=None,\n priority=None,\n host=None,\n tags=None,\n alert_type=None,\n aggregation_key=None,\n source_type_name=None,\n):\n _initialize_connection(api_key, app_key)\n if title is None:\n raise SaltInvocationError(\"title must be specified\")\n if text is None:\n raise SaltInvocationError(\"text must be specified\")\n if alert_type not in [None, \"error\", \"warning\", \"info\", \"success\"]:\n # Datadog only supports these alert types but the API doesn't return an\n # error for an incorrect alert_type, so we can do it here for now.\n # https://github.com/DataDog/datadogpy/issues/215\n message = 'alert_type must be one of \"error\", \"warning\", \"info\", or \"success\"'\n raise SaltInvocationError(message)\n\n ret = {\"result\": False, \"response\": None, \"comment\": \"\"}\n\n try:\n response = datadog.api.Event.create(\n title=title,\n text=text,\n date_happened=date_happened,\n priority=priority,\n host=host,\n tags=tags,\n alert_type=alert_type,\n aggregation_key=aggregation_key,\n source_type_name=source_type_name,\n )\n except ValueError:\n comment = (\n \"Unexpected exception in Datadog Post Event API \"\n \"call. Are your keys correct?\"\n )\n ret[\"comment\"] = comment\n return ret\n\n ret[\"response\"] = response\n if \"status\" in response.keys():\n ret[\"result\"] = True\n ret[\"comment\"] = \"Successfully sent event\"\n else:\n ret[\"comment\"] = \"Error in posting event.\"\n return ret", "def ticket_created(self, ticket):\n if 'ticket' not in self.sources:\n return\n gnp = GrowlNotificationPacket(notification='ticket',\n title='Ticket #%d created' % ticket.id,\n description=self._ticket_repr(ticket))\n gs = GrowlSender(self.env)\n gs.notify(self._get_hosts('ticket'), gnp)", "def __init__(self,\n event_id: str,\n event_type: str,\n event_data: dict = None,\n event_origin: str = None,\n event_timestamp: datetime.datetime = None,\n object_type: str = None,\n object_id: str = None,\n object_key: str = None):\n if event_timestamp is None:\n event_timestamp = datetime.datetime.utcnow().isoformat()\n self._event = dict(\n id=event_id,\n type=event_type,\n data=event_data,\n origin=event_origin,\n timestamp=event_timestamp,\n object_type=object_type,\n object_id=object_id,\n object_key=object_key\n )", "def add_object_created_notification(self, dest: \"IBucketNotificationDestination\", *filters: \"NotificationKeyFilter\") -> None:\n return jsii.invoke(self, \"addObjectCreatedNotification\", [dest, *filters])", "def create_event(self, title, start, airtable_record_id, duration=1, timezone=TIMEZONE) -> Dict:\n event_body = {\n 'summary': title,\n 'description': airtable_record_id + \" webhook\",\n 'start': {\n 'dateTime': start.isoformat(),\n 'timeZone': timezone,\n },\n 'end': {\n 'dateTime': (start + timedelta(hours=duration)).isoformat(),\n 'timeZone': timezone,\n }\n }\n\n created_event = self.service.events().insert(calendarId=self.calendar_id, body=event_body).execute()\n print('Event created: %s' % (created_event.get('htmlLink')))\n\n return created_event", "def register_to_event(request):\n pass", "def send(self, event):\r\n self.events.append(event)", "def create(self, *args, **kwargs):\n pass", "def kind(self) -> EventKind:\n return EventKind.CREATE", "def _make_event(self, event_type, code, value):\n secs, msecs = convert_timeval(time.time())\n data = struct.pack(EVENT_FORMAT,\n secs,\n msecs,\n event_type,\n code,\n value)\n self._write_device.write(data)\n self._write_device.flush()", "def create_subscription(self, client_URI_endpoint, event_destination_id,\n name, subscription_context):\n self.client_URI_endpoints[client_URI_endpoint] = \\\n Event(event_destination_id, name, subscription_context)\n self.write_subscriptions_to_tmp(self.client_URI_endpoints)", "def onNewEvent(self, event):\n self._logger.debug('Received event: %s' % event)", "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def createEvent(self, aid, time, bz, location):\n\n event = Event()\n\n a_id = EventId()\n a_id.setHashed(aid)\n admin = User.getById(a_id)\n event.admin = admin\n\n date = EventDatetime()\n date.fromString(time)\n event.datetime = date\n\n event.description = bz\n\n event.location = location\n\n event.create()\n\n return event.getAsDict()", "def create_event(self, event, idempotency_key=None):\n if len(event.get(\"metadata\", {})) > METADATA_LIMIT:\n raise ValueError(\n \"Number of metadata keys exceeds {}.\".format(METADATA_LIMIT)\n )\n\n headers = {\n \"idempotency-key\": idempotency_key,\n }\n\n self.request_helper.request(\n EVENTS_PATH,\n method=REQUEST_METHOD_POST,\n params=event,\n headers=headers,\n token=workos.api_key,\n )\n\n return True", "def test_new_general_event(client, transactional_db, mocker):\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n settings.SNS_ARN = arn\n mock = mocker.patch('coordinator.api.models.boto3.client')\n assert Event.objects.count() == 0\n\n ev = Event(event_type='error', message='test error event')\n ev.save()\n assert Event.objects.count() == 1\n assert mock().publish.call_count == 1\n message = {\n 'default': json.dumps({\n 'event_type': 'error',\n 'message': 'test error event',\n 'task_service': None,\n 'task': None,\n 'release': None\n })\n }\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n mock().publish.assert_called_with(Message=json.dumps(message),\n MessageStructure='json',\n TopicArn=arn)\n settings.SNS_ARN = None", "def test_publish_as_publisher(self):\n e = self.make_event()\n e.save()\n event_id = e.id\n resp = self.request_with_role('/admin/events/publish/%s' % event_id, role='publisher',\n method='POST', follow_redirects=True)\n self.assertIn('Event published', resp.data)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(Event.objects(published=True).count(), 1)", "def _InsertSingleEvent(self, title='One-time Tennis with Beth',\n content='Meet for a quick lesson', where='On the courts',\n start_time=None, end_time=None):\n\n new_event = self._InsertEvent(title, content, where, start_time, end_time,\n recurrence_data=None)\n\n print 'New single event inserted: %s' % (new_event.id.text,)\n print '\\tEvent edit URL: %s' % (new_event.GetEditLink().href,)\n print '\\tEvent HTML URL: %s' % (new_event.GetHtmlLink().href,)\n\n return new_event", "def event(name, callback=None):\n try:\n # event() is expected to be very rare -- e.g., called from\n # migrate_from_wordpress or wp_newCategory. If it becomes more common,\n # this will need work.\n try:\n # Size is in bytes; event documents are rare and very small\n yield motor.Op(\n _db.create_collection, 'events', size=100 * 1024, capped=True)\n logging.info(\n 'Created capped collection \"events\" in database \"%s\"',\n _db.name)\n except pymongo.errors.CollectionInvalid:\n # Collection already exists\n collection_options = yield motor.Op(_db.events.options)\n if 'capped' not in collection_options:\n logging.error(\n '%s.events exists and is not a capped collection,\\n'\n 'please drop the collection and start this app again.' %\n _db.name\n )\n\n result = yield motor.Op(_db.events.insert,\n {'ts': datetime.datetime.utcnow(), 'name': name},\n manipulate=False) # No need to add _id\n\n if callback:\n callback(result, None)\n except Exception, e:\n if callback:\n callback(None, e)", "def package_created_event(order, event_bus_name):\n\n return {\n \"Time\": datetime.datetime.now(),\n \"Source\": \"ecommerce.warehouse\",\n \"Resources\": [order[\"orderId\"]],\n \"DetailType\": \"PackageCreated\",\n \"Detail\": json.dumps(order),\n \"EventBusName\": event_bus_name\n }", "def send_to_external_api(instance, created, *args, **kwargs):\n if created:\n instance.submit_to_myswissalps()", "def _output_event(self, event, thumbnail_key, triggered):\n timestamp = event.content[\"timestamp\"]\n # Create event message\n message = {\n \"analyzerId\": self._anal_id,\n \"timestamp\": timestamp,\n \"type\": \"intrusion_detection.alert\",\n \"content\": {\n \"video\": event.content[\"video_key\"],\n \"metadata\": event.content[\"metadata_key\"],\n \"thumbnail\": thumbnail_key,\n \"triggered\": triggered\n }\n }\n\n # Save event to database\n date_obj = (datetime.datetime\n .utcfromtimestamp(timestamp)\n .replace(tzinfo=timezone(\"UTC\")))\n message.update({\"date\": date_obj})\n self._database.save_event(message)\n\n # Push notification\n mlsec = repr(timestamp).split(\".\")[1][:3]\n date_str = (datetime.datetime\n .utcfromtimestamp(timestamp)\n .replace(tzinfo=timezone(\"UTC\"))\n .strftime(\"%Y-%m-%dT%H:%M:%S.{}Z\".format(mlsec)))\n message.update({\"date\": date_str})\n self._notification.push(\"Analyzer\", message)", "def publish(self, domain_event: DomainEvent | list[DomainEvent]) -> None:\n raise NotImplementedError", "def buildEvent(data):", "def send(self, event, message):\n pass", "def post(self):\n required_keys = [\"event_name\", \"timestamp\"]\n\n if request.headers.get('Content-Encoding', '') == 'gzip':\n try:\n data = gzip.decompress(request.data)\n events = json.loads(data)\n except JSONDecodeError as e:\n log.info(f\"failed to decode compressed event data: {e.msg}\")\n abort(http_client.BAD_REQUEST, \"failed to decode compressed event data\")\n else:\n events = request.json\n\n verify_log_request(events, required_keys)\n\n # The event log API should enforce the player_id to the current player, unless\n # the user has role \"service\" in which case it should only set the player_id if\n # it's not passed in the event.\n player_id = current_user[\"player_id\"]\n is_service = \"service\" in current_user[\"roles\"]\n\n for event in events:\n if is_service:\n event.setdefault(\"player_id\", player_id)\n else:\n event[\"player_id\"] = player_id # Always override!\n eventlogger.info(\"eventlog\", extra={\"extra\": event})\n\n if request.headers.get(\"Accept\") == \"application/json\":\n return jsonify(status=\"OK\"), http_client.CREATED\n else:\n return \"OK\", http_client.CREATED", "def new_event(self, subject=None):\n return self.event_constructor(parent=self, subject=subject)", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def publish(event: dict):\n return kinesis.put_record(\n StreamName=DATA_STREAM,\n Data=json.dumps(event).encode('utf-8'),\n PartitionKey=randomize_arn(INVENTORY_ARN)\n )", "def customer_created_handler(event):\n obj = event.obj\n\n # submit customer after creation\n obj.workflow.submit()", "def ProduceEventWithEventData(self, event, event_data):\n # type: (dict, dict) -> None\n print(\"event produced\")\n print(\"Event:\")\n print(pprint(vars(event)))\n print(\"Event data:\")\n print(pprint(vars(event_data)))", "def test_create_event(\n event_manager: EventManager, subscriber: Mock, input: bytes, expected: tuple\n) -> None:\n event_manager.handler(input)\n assert subscriber.call_count == 1\n\n event: Event = subscriber.call_args[0][0]\n assert event.topic == expected[\"topic\"]\n assert event.source == expected[\"source\"]\n assert event.id == expected[\"source_idx\"]\n assert event.group == expected[\"group\"]\n assert event.state == expected[\"state\"]\n assert event.is_tripped is expected[\"tripped\"]" ]
[ "0.71787494", "0.698229", "0.692631", "0.66697675", "0.66172796", "0.6506185", "0.6419534", "0.6369318", "0.6355999", "0.63556665", "0.6323316", "0.63051665", "0.6303744", "0.6295329", "0.6221811", "0.6215113", "0.6196844", "0.61157924", "0.6028132", "0.60246474", "0.6015976", "0.6010348", "0.60055953", "0.59568", "0.5949049", "0.5917762", "0.59168625", "0.5911279", "0.59006107", "0.5891738", "0.5885339", "0.5882285", "0.5868619", "0.5859514", "0.58554965", "0.5853897", "0.58468467", "0.5843162", "0.5836493", "0.58286214", "0.5821397", "0.5812196", "0.5808608", "0.579809", "0.5791616", "0.5788517", "0.5786836", "0.57854867", "0.5776421", "0.57673806", "0.57597363", "0.57479304", "0.57432765", "0.5735895", "0.57324547", "0.572299", "0.5718751", "0.5710702", "0.57053125", "0.5694178", "0.5687026", "0.5686502", "0.5679257", "0.5676436", "0.56725353", "0.56719136", "0.5666645", "0.5666336", "0.56479603", "0.56449175", "0.5642589", "0.5637111", "0.5617584", "0.5611762", "0.56090415", "0.55983174", "0.559793", "0.5592379", "0.55909055", "0.5590723", "0.558402", "0.5576027", "0.5574516", "0.55734456", "0.5546128", "0.55396056", "0.5529022", "0.55252504", "0.55196226", "0.5518303", "0.55131066", "0.55112004", "0.5503165", "0.55023223", "0.54965055", "0.5494113", "0.5492343", "0.54901993", "0.54873097", "0.5481743" ]
0.60949755
18
Run the chat client application loop. When this function exists, the application will stop
def run_chat_client(): while must_run: print_menu() action = select_user_action() perform_user_action(action) print("Thanks for watching. Like and subscribe! 👍")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")", "def run_message_loop(self):\n raise NotImplementedError", "def startListening(self):\n \n self.listener_thread = threading.Thread(target=self.listening, daemon=True)\n self.listener_thread.start()\n\n # stateupdate = threading.Thread(target=self.showStatus, daemon=True)\n # stateupdate.start()\n\n # Main App Loop (Keeps the Client opened)\n while self.listener_thread.is_alive():\n time.sleep(1)\n else:\n print('Shutting Main Thread-1')\n sys.exit()", "def run(self):\n self.logger.info(\"Starting messenger.\")\n self.recv()", "def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "async def loop(self):\n\t\twhile self.active:\n\t\t\ttry:\n\t\t\t\tawait self.process_data(await self.websocket.recv())\n\t\t\texcept exceptions.ClientError as e:\n\t\t\t\tawait self.send(0, str(e))\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tawait self.unregister()", "def run(self):\n\n try:\n while True:\n self.log.info(\"Waiting for a connection...\")\n self.mc.events.post('client_disconnected')\n self.connection, client_address = self.socket.accept()\n\n self.log.info(\"Received connection from: %s:%s\",\n client_address[0], client_address[1])\n self.mc.events.post('client_connected',\n address=client_address[0],\n port=client_address[1])\n\n # Receive the data in small chunks and retransmit it\n while True:\n try:\n data = self.connection.recv(4096)\n if data:\n commands = data.split(\"\\n\")\n for cmd in commands:\n if cmd:\n self.process_received_message(cmd)\n else:\n # no more data\n break\n\n except:\n if self.mc.config['mediacontroller']['exit_on_disconnect']:\n self.mc.shutdown()\n else:\n break\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)", "def run_loop(self):\r\n server_log.info('Server now accepting client connections.')\r\n while not self.clients_done():\r\n asyncore.loop(timeout=config[\"server_timeout\"], count=config[\"server_loop_count\"])", "def loop_start( self ):\n self.client.loop_start()", "def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()", "def run(self):\n if not self.running:\n self.loop.run_forever()", "def _receive_message_loop(self):\n\n while True:\n try:\n message = self.connection_socket.recv(4096)\n if len(message) > 0:\n self.add_message_to_chat(message.decode('utf-8'))\n sleep(0.2)\n\n except ConnectionResetError:\n # messagebox.showerror(\"Client dropped\", \"The other person has dropped from the connection.\")\n self.root.destroy()", "def Listen(self):\n while True:\n time.sleep(1)", "def start(self):\n self.logger.debug(\"Starting loop\")\n self.client.loop_start()", "def run(self):\n while True:\n msg = self.recv()", "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def listening(app, conn_or_socket):\n time.sleep(1)\n conn_or_socket.settimeout(None)\n while app.connected:\n if app.quit:\n conn_or_socket.close()\n break\n try:\n # Receive Message from Partner\n data = conn_or_socket.recv(1024)\n except Exception as msg:\n #print(msg)\n #print(\"!!!!!!!!!!!!!!!!!\")\n break\n # Parse Message \n message = str(data, \"utf-8\")\n if not message:\n conn_or_socket.close()\n print(\"Partner disconnected.\")\n app.chat_content = app.chat_content + \"\\n\" + f\"{app.friend_name}: {message}\"\n app.gui.setMessage(\"chat_output\", app.chat_content)\n #print(f\"{app.friend_name}: {message}\")\n #print(\"Chat not listening anymore\")\n app.connected = False\n app.chat_content = \"Partner Disconnected\"\n try:\n app.gui.setMessage(\"chat_output\", app.chat_content)\n except:\n pass", "def run(self):\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5) # Allows up to 5 waiting clients\n\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n self.myView.updateStatus('... connected from ' + str(address))\n handler = ClientHandler(client, self.bank, self.myView)\n handler.start()\n\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus(\"Server shutting down.\")", "def useChat(self):\n # Implements a subprocess to run the Kuri robot simultaneously with the user input loop\n proc_stdin = io.TextIOWrapper(self.proc.stdin, encoding='utf-8', line_buffering=True)\n\n while True:\n txt = input(\"Talk to me! (Type 'q' to quit) \").lower()\n if txt == 'q':\n proc_stdin.write('q\\n')\n quit()\n else:\n sentiment = self.sd.getSentiment(txt)\n proc_stdin.write(sentiment + '\\n')\n print(\"Sentiment: \" + sentiment + '\\n')", "def run(self):\n self.connect()\n self.run_forever()", "def run(self):\n if self._main_loop:\n return\n self._main_loop = GObject.MainLoop()\n self._disconnect_all()\n self._register()\n logger.info(\"--- Mainloop started ---\")\n logger.info(\"Hub is ready for onboarding\")\n try:\n self._main_loop.run()\n except KeyboardInterrupt:\n # ignore exception as it is a valid way to exit the program\n # and skip to finally clause\n pass\n except Exception as e:\n logger.error(e)\n finally:\n logger.info(\"--- Mainloop finished ---\")\n self._unregister()\n self._main_loop.quit()\n self._main_loop = None", "async def run(self):\n self.add_msg(\"Type your nickname\")\n # Start the new thread that will listen to responses, while the main thread is sending answers\n start_new_thread(self.listenToRespone, ())", "def handle_chat(self):\n while True:\n if self.chat_breakout:\n return\n\n time.sleep(1)\n messages = \"\"\n for i in range(5):\n try:\n messages += f\"{self.queue.popleft()}\\n\"\n except IndexError:\n # Queue is empty but no worries\n continue\n\n if messages != \"\":\n self.loop.create_task(\n self.ingame_cog.send_chat_to_discord(\n self.bot, self.channel, messages\n )\n )", "def run(self):\n print \"Starting HumanGreeter\"\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print \"Interrupted by user, stopping HumanGreeter\"\n self.face_detection.unsubscribe(\"HumanGreeter\")\n # stop\n sys.exit(0)", "def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r", "def listen(self):\n\n\t\tprint(\"Connected to the room\")\n\n\t\t#: Watch for messages coming from the server.\n\t\twhile self.joined:\n\n\t\t\t#: Wait for a message to be recieved from the server.\n\t\t\ttry:\n\t\t\t\t#: Store a most recent message for testing purposes.\n\t\t\t\tself.most_recent_message = self.client.recv(1024).decode()\n\t\t\t\tself.messages.append(self.most_recent_message)\n\t\t\texcept OSError:\n\t\t\t\tprint(\"Connection to the server has been lost.\")\n\n\t\t\t\t#: Quit from the server to do cleanup.\n\t\t\t\tself.quit(False)", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)", "def run():\n listen_active_email_channel()", "def mainloop(self):\n self.app.mainloop()", "def run(self):\n try:\n while True:\n self.__listen()\n except (ConnectionResetError, ConnectionAbortedError):\n self.session.close()\n return", "def main_loop(self) -> None:\n while True:\n # Log a message to say that Wheatley is waiting for 'Look To!'\n self.logger.info(\"Waiting for 'Look To!'...\")\n # Sit in an infinite loop whilst we're not ringing, and exit Wheatley if enough time\n # has passed\n self._last_activity_time = time.time()\n while not self._is_ringing:\n time.sleep(0.01)\n if self._server_mode and time.time() > self._last_activity_time + INACTIVITY_EXIT_TIME:\n self.logger.info(f\"Timed out - no activity for {INACTIVITY_EXIT_TIME}s. Exiting.\")\n return\n\n self.logger.info(f\"Starting to ring {self.row_generator.summary_string()}\")\n if self._server_mode:\n self._tower.set_is_ringing(True)\n\n while self._is_ringing:\n self.tick()\n time.sleep(0.01)\n\n self.logger.info(\"Stopping ringing!\")\n if self._server_mode:\n self._tower.set_is_ringing(False)", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def run(self):\n\n self.dbg_state = \"running\"\n\n while self.active:\n try:\n sel_in, sel_out, sel_err = \\\n select.select(self.sockets(), [], self.sockets(), 1)\n except:\n print( sys.exc_info())\n self.logger.error(\"Select error, disconnecting\")\n self.disconnect()\n\n for s in sel_err:\n self.logger.error(\"Got socket error on: \" + str(s) + \", disconnecting\")\n self.disconnect()\n\n for s in sel_in:\n if self._socket_ready_handle(s) == -1:\n self.disconnect()\n\n # End of main loop\n self.dbg_state = \"closing\"\n self.logger.info(\"Exiting controller thread\")\n self.shutdown()", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def run(self):\n t = Thread(target=self._listen)\n t.start()", "def listen(self):\n while self.active:\n Quartz.CFRunLoopRunInMode(\n Quartz.kCFRunLoopDefaultMode, 5, False)", "def listen(self):\n if not self._logged_in:\n raise Exception('The bot is not logged in yet')\n log.debug('Starting the timeout daemon...')\n timeout_daemon = threading.Thread(\n target=self._timeout_daemon,\n name='TimeoutThread',\n daemon=True\n )\n timeout_daemon.start()\n log.info('Starting listening...')\n self.fbchat_client.listen()", "def _do_start(self, chat_id, user_id, args, update):\n \n self.tclient.send_message('Hallo! Ich bin ein Bot, um dir zu helfen, dir deine Nasensprüche zu merken!', chat_id)", "def loop(self):\n disconnect = False\n\n while not disconnect:\n print(\"[CONNECTION] waiting for new connection\")\n # To use accept(), server must be bound to an address and listening for connections\n # conn is a new socket object usable to send and receive data\n # addr is address bound to socket on other side of the connection\n conn, addr = self.server.accept() # accept() is blocking method untill client connects\n print(\"[CONNECTION] new connection:\", addr[0], \"accepted.\")\n client_thread = threading.Thread(target=self.__manage_client_thread, args=(conn, addr), daemon=True)\n client_thread.start()", "def receive():\r\n while True:\r\n try:\r\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\r\n msg_list.insert(tkinter.END, msg)\r\n except OSError: # Possibly client has left the chat.\r\n break", "def run(self):\n if self.polling:\n self.updater.start_polling()\n\n else:\n # webhook\n webhook_url = f\"{self.host.rstrip('/')}:{self.port}/{self.token}\"\n print(f\"Starting webhook, sending {webhook_url} to telegram servers\")\n self.updater.start_webhook(listen='0.0.0.0',\n port=self.port,\n url_path=self.token,\n key=self.key,\n cert=self.cert,\n webhook_url=webhook_url)\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n self.logger.info(f\"Bot started running, polling={self.polling}, number of threads={self.num_threads}, \"\n f\"port={self.port}\")\n self.logger.info(f\"current timezone is {datetime.datetime.now()}\")\n self.updater.idle()", "def on_start(self):\n self.run_in_background(self.__run_client)", "async def _run(self) -> None:\n\n while True:\n # The \"Exiting event loop\" checks are a bit ugly. They're in place\n # so that the event loop exits on its own at predefined positions\n # instead of randomly getting thrown a CancelledError.\n #\n # Now that I think about it, the whole function looks kinda ugly.\n # Maybe one day (yeah, right), I'll clean this up. I want to get it\n # working first though.\n\n if self._state != self._RUNNING:\n logger.debug(\"Exiting event loop\")\n return\n\n if self._ws is not None:\n try:\n logger.debug(\"Receiving ws packets\")\n async for packet in self._ws:\n logger.debug(f\"Received packet {packet}\")\n packet_data = json.loads(packet)\n self._process_packet(packet_data)\n except websockets.ConnectionClosed:\n logger.debug(\"Stopped receiving ws packets\")\n else:\n logger.debug(\"No ws connection found\")\n\n if self._state != self._RUNNING:\n logger.debug(\"Exiting event loop\")\n return\n\n logger.debug(\"Attempting to reconnect\")\n while not await self._reconnect():\n logger.debug(\"Reconnect attempt not successful\")\n\n if self._state != self._RUNNING:\n logger.debug(\"Exiting event loop\")\n return\n\n logger.debug(f\"Sleeping for {self.RECONNECT_DELAY}s and retrying\")\n await asyncio.sleep(self.RECONNECT_DELAY)", "def run(self):\n self.cmdloop()", "def run(self):\n\n listen_port = DEBUGGER_PORT if \"RENPY_DEBUGGER_PORT\" not in os.environ else os.environ[\"RENPY_DEBUGGER_PORT\"]\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((\"0.0.0.0\", listen_port))\n server.listen(0)\n\n while True:\n client, client_address = server.accept()\n self.attach_one_client(client)", "def run_chat(self, auto_send_receipts=False):\n signal.signal(signal.SIGINT, self._stop_handler)\n signal.signal(signal.SIGQUIT, self._stop_handler)\n messages_iterator = self.receive_messages()\n while self._run:\n try:\n message = next(messages_iterator)\n\n except ConnectionResetError as e:\n self.logger.exception(\"Got an error attempting to get a message from signal!\")\n raise\n except Exception as e:\n self.logger.exception(\"Got an error attempting to get a message from signal!\")\n continue\n\n self.logger.info(f\"Receiving message {message}\")\n\n\n if message.payment:\n for func in self._payment_handlers:\n func(message.source, message.payment)\n continue\n\n if not message.text:\n continue\n\n for _, regex, func in self._chat_handlers:\n match = re.search(regex, message.text)\n if not match:\n continue\n\n try:\n reply = func(message, match)\n except Exception as e: # noqa - We don't care why this failed.\n self.logger.exception(f\"Failed to process message {message}\")\n continue\n\n if isinstance(reply, tuple):\n stop, reply = reply\n else:\n stop = True\n\n # In case a message came from a group chat\n group_id = message.group_v2 and message.group_v2.get(\"id\") # TODO - not tested\n\n # mark read and get that sweet filled checkbox\n try:\n if auto_send_receipts and not group_id:\n self.send_read_receipt(recipient=message.source['number'], timestamps=[message.timestamp])\n\n if group_id:\n self.send_group_message(recipient_group_id=group_id, text=reply)\n else:\n self.send_message(recipient=message.source['number'], text=reply)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n if stop:\n # We don't want to continue matching things.\n break\n return", "def receive():\n while True:\n try:\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n msg_list.insert(tkinter.END, msg)\n \n except OSError: # Possibly client has left the chat.\n break", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def update(self):\n asyncio.set_event_loop(asyncio.new_event_loop())\n self.listen(self.port)\n self.loop = IOLoop.instance()\n self.loop.start()", "def reactor_loop():\n def on_running():\n \"\"\"\n called when the twisted reactor is running\n \"\"\"\n log.msg('reactor_loop Starting')\n try:\n conn = client.connect(reactor)\n si446x_do = Si446xComponent(conn)\n conn.addCallback(si446x_do.start)\n conn.addErrback(si446x_do.on_error)\n except error.DBusException, e:\n log.msg('reactor_loop Setup Error: {}'.format(e))\n reactor.stop()\n\n signal.signal(signal.SIGINT, SIGINT_CustomEventHandler)\n signal.signal(signal.SIGHUP, SIGINT_CustomEventHandler)\n reactor.callWhenRunning(on_running)\n reactor.run()", "async def _main(self):\n while True:\n time.sleep(1)", "def run(self):\n self._app.processEvents()\n try:\n while not self._stop:\n # GRobot._app.processEvents()\n while self._app.hasPendingEvents():\n self._app.processEvents()\n gevent.sleep(0.01)\n except Exception, e:\n logger.error(e)\n logger.debug('Goodbye GRobot')", "def start():\n if not cfg.irc:\n logging.warning(\"Skipping IRC module: no configuration provided\")\n return\n\n server = cfg.irc.server\n port = cfg.irc.port\n ssl = cfg.irc.ssl\n nick = cfg.irc.nick\n channels = cfg.irc.channels\n\n logging.info(\n \"Starting IRC client: server=%r port=%d ssl=%s nick=%r \" \"channels=%r\",\n server,\n port,\n ssl,\n nick,\n channels,\n )\n\n bot = Bot(cfg.irc)\n utils.DaemonThread(target=bot.start).start()\n\n evt_target = EventTarget(bot)\n events.dispatcher.register_target(evt_target)\n utils.DaemonThread(target=evt_target.run).start()", "def main():\n\n bus_controller = BusController()\n steve = TelegramController(\"990223452:AAHrln4bCzwGpkR2w-5pqesPHpuMjGKuJUI\")\n message_sender = MessagesSender()\n db = DBManager()\n gui = GUI()\n\n message_sender.connect(bus_controller=bus_controller)\n bus_controller.connect(telegram_bot=steve, message_sender=message_sender)\n steve.connect(bus_controller=bus_controller, gui=gui, message_sender=message_sender, data_base=db)\n gui.connect(bus_controller=bus_controller, telegram_controller=steve, message_sender=message_sender, data_base=db)\n\n message_sender.start()\n bus_controller.start()\n steve.start()\n gui.start()", "def run_forever(self):\n reactor.run()", "def run_forever(self):\n reactor.run()", "def start(self):\n\n self.keep_running = True # Set running flag to true\n self.th = threading.Thread(target=self.listenSocket)\n self.th.daemon = True # Thread will terminate with the main\n self.th.start()\n self.th.join(0)", "def run(self):\n # I want to reload the list of scripts on each\n # disconnect/reconnect: it's easier to debug. :)\n scripts = self.list_scripts()\n while True:\n if 0 != self.wm.state['buttons']:\n [self.run_handler(script) for script in scripts]\n if self.omg_please_stop:\n self.omg_please_stop = False\n break\n # On timeout, close wiimote connection\n if self.inactive():\n self.wm.close()\n sleep(5)\n break\n sleep(0.1)", "def run(self):\n try:\n self.eventloop.run_forever()\n except KeyboardInterrupt:\n self.logger.info(\"Interrupt received, shutting down.\")\n except Exception:\n self.logger.exception(\"Unhandled exception raised, shutting down.\")\n finally:\n self._shutdown()\n self.logger.debug(\"Closing event loop\")\n self.eventloop.close()\n if self._restarting:\n self.logger.info(f\"Restarting with command line: {sys.argv}\")\n os.execl(sys.executable, sys.executable, *sys.argv)", "def main():\n # Clear the terminal before a new run\n os.system('cls') \n\n # Create the server_socket object and bind it to the desired address\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(SERVER_ADDRESS)\n \n # Start listening for new connections\n server_socket.listen()\n print(f\"[LISTENING] SERVER IS NOW LISTENING FOR NEW CONNECTIONS ON {SERVER_ADDRESS}\")\n\n while True:\n # Accept a new connection\n conn, addr = server_socket.accept()\n # Start a new thread handling the new connection\n client_thread = threading.Thread(target=handle_client, args=(conn, addr))\n client_thread.start()", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def run(self):\n ioloop.IOLoop.current().start()", "def on_running():\n log.msg('reactor_loop Starting')\n try:\n conn = client.connect(reactor)\n si446x_do = Si446xComponent(conn)\n conn.addCallback(si446x_do.start)\n conn.addErrback(si446x_do.on_error)\n except error.DBusException, e:\n log.msg('reactor_loop Setup Error: {}'.format(e))\n reactor.stop()", "def __loop(self):\n\n self.__update_table()\n self.__update_labels()\n if self.remote_stop:\n self.__stop(\"remote telegram admin\")\n else:\n self.__main_window.after(1000, self.__loop)", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def main():\n usage = \"usage: %prog [options] channels\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) < 1:\n parser.print_help()\n return 2\n\n # do stuff\n # This runs the program in the foreground. We tell the reactor to connect\n # over TCP using a given factory, and once the reactor is started, it will\n # open that connection.\n reactor.connectTCP(HOST, PORT, MyFirstIRCFactory(args))\n # Since we're running in the foreground anyway, show what's happening by\n # logging to stdout.\n log.startLogging(sys.stdout)\n # And this starts the reactor running. This call blocks until everything is\n # done, because this runs the whole twisted mainloop.\n reactor.run()", "def run(self):\n run1=0\n while (run1==0):\n Publisher().sendMessage(\"updatetext\", \"\")\n time.sleep(3)", "def run(self):\n\n if reactor.running:\n misc.formatted_print('RosBridgeWebSockComms\\t|\\tTwisted reactor is already running', None, 'error')\n return\n\n self._thread = threading.Thread(target=reactor.run, args=(False,))\n self._thread.daemon = True\n self._thread.start()", "def mainloop(self):\n self.master.mainloop()", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def run(self):\n # self.root.mainloop()\n # self.root.destroy()", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def run_in_background(self):\n threading.Thread(target=self._run_loop).start()", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "def run(self):\n receiver = threading.Thread(target=self.receive_data)\n # Setting daemon to True means that this Thread will be terminated when the main program ends.\n receiver.daemon = True\n receiver.start()", "def start(self):\n self.running = True\n while self.running:\n self.update_prompt()\n with exception_handler(self.engine):\n self.cmdloop()\n self.engine.reset()", "def main_loop(self) -> None:\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self._impl.main_loop()", "def run(self):\n while True:\n self.connect_lock.acquire()\n if self.stopped():\n return\n self.__connect()\n self.connect_lock.release()\n self.ws.run_forever()", "def update(self):\n asyncio.set_event_loop(asyncio.new_event_loop())\n self.listen(self.port)\n IOLoop.instance().start()", "def startLoop(self):\n if(self.loop is not None):\n raise Exception(\"Event loop is already started!\")\n self.loop = asyncio.new_event_loop()\n self.thread = Thread(target=start_thread_loop, args=(self.loop,))\n self.thread.setDaemon(True)\n self.thread.start()", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def run(self):\n if has_GUI:\n self.GUI(self.buffer)\n else:\n while True:\n message = input(\"Write your command:\\n\")\n # print(message)\n self.buffer.append(message)", "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise", "def run(self):\n self.root.mainloop()\n #self.root.destroy()", "def run_main_loop():\n mainloop = GObject.MainLoop()", "def _bg_thread_main(self) -> None:\n while not self._done:\n self._run_server_cycle()", "def run(self):\n\n print('Listening for client connections...')\n\n while not self.shutdownEvent.is_set():\n readyToRead, readyToWrite, inputError = select.select(self._socketList, [], [], self._selectTimeout)\n\n # Iterate over input sockets\n for sock in readyToRead:\n # Received new connection request\n if sock is self._serverSocket:\n print('Received connection request. Establishing connection with client.')\n\n # Accept the connection and append it to the socket list\n clientSocket, address = self._serverSocket.accept()\n\n #TODO: Add this if there's a timeout blocking issue, or make the sockets non-blocking\n #clientSocket.settimeout(0.5)\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.append(clientSocket)\n finally:\n self._socketListMutex.release()\n # Received message from client\n else:\n # Read a message off of the socket\n msgData = MessageHandler.recvMsg(sock)\n\n # Process the message\n if msgData is not None:\n self.__processMsg(sock, msgData)\n # The client disconnected\n else:\n print('Client disconnected')\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.remove(sock)\n finally:\n self._socketListMutex.release()\n\n sock.close()\n\n # Cleanup\n self.__shutdown()", "def serveThread(self):\r\n while True:\r\n try:\r\n client = self.clients.get()\r\n self.serveClient(client)\r\n except Exception, x:\r\n logging.exception(x)", "def main():\n global discuss_bot_id, discussion_chat_id\n r = requests.get('https://slack.com/api/rtm.connect', {'token': bot_token})\n discuss_bot_id = r.json()['self']['id']\n url = r.json()['url']\n r = requests.get('https://slack.com/api/conversations.list',\n {'token': bot_token})\n for channel in r.json()['channels']:\n if channel['name'] == 'discussion':\n discussion_chat_id = channel['id']\n print(discussion_chat_id)\n ws = websocket.WebSocketApp(\n url=url, on_message=on_message, on_error=on_error, on_close=on_close)\n ws.on_open = on_open\n ws.run_forever()", "def main():\n global APP\n APP = make_app()\n APP.clients = [] # global list of all connected websocket clients\n APP.printer = Serial('/dev/ttyUSB0', baudrate=19200)\n APP.listen('1337', '0.0.0.0')\n log('Listening on http://0.0.0.0:1337')\n tornado.ioloop.IOLoop.current().start()", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)", "async def pubsub_loop(self) -> None:\n logged_method = \"pubsub_loop\"\n\n while 1:\n have_message = await self.pubsub_channel.wait_message()\n if not have_message:\n break\n msg = await self.pubsub_channel.get(encoding=\"utf-8\", decoder=loads)\n self.logger.debug(logged_method, f\"got message {msg}\")\n if msg[\"cmd\"] == \"start\":\n await self.add_browser(msg[\"reqid\"])\n elif msg[\"cmd\"] == \"stop\":\n await self.remove_browser(msg[\"reqid\"])\n self.logger.debug(logged_method, \"waiting for another message\")\n\n self.logger.debug(logged_method, \"stopped\")" ]
[ "0.6931434", "0.688994", "0.68419933", "0.6826242", "0.6802301", "0.67320764", "0.6727489", "0.668575", "0.6628602", "0.6600071", "0.656014", "0.6525707", "0.6512084", "0.6486234", "0.6479219", "0.64297473", "0.6423571", "0.64088786", "0.6408701", "0.6398141", "0.6387875", "0.6324894", "0.63012457", "0.62996036", "0.6294083", "0.62831503", "0.6280414", "0.62797695", "0.6252174", "0.62505287", "0.62500674", "0.6241509", "0.62340486", "0.62336475", "0.6215228", "0.62048304", "0.61978996", "0.6194384", "0.61876976", "0.6176623", "0.61664575", "0.6154829", "0.61458975", "0.61318874", "0.61250037", "0.6123706", "0.61168915", "0.61167604", "0.61133134", "0.6111742", "0.61069745", "0.61069745", "0.60962373", "0.6079575", "0.6077177", "0.60721236", "0.6069323", "0.60689706", "0.60626024", "0.60626024", "0.60558254", "0.6045382", "0.60400724", "0.60393256", "0.6037627", "0.60336006", "0.6028566", "0.6027352", "0.602398", "0.60176075", "0.6008894", "0.6007809", "0.6003055", "0.59978247", "0.5992172", "0.5987762", "0.5981285", "0.59791803", "0.59779316", "0.5975824", "0.5971101", "0.5970877", "0.5970521", "0.5966957", "0.5960023", "0.5956529", "0.59550714", "0.5945903", "0.59419465", "0.5939492", "0.5934947", "0.5923729", "0.59224814", "0.59182084", "0.5915832", "0.59109056", "0.59109056", "0.5910519", "0.590931", "0.59087867" ]
0.78608006
0
Print the menu showing the available options
def print_menu(): print("==============================================") print("What do you want to do now? ") print("==============================================") print("Available options:") i = 1 for a in available_actions: if current_state in a["valid_states"]: # Only hint about the action if the current state allows it print(" %i) %s" % (i, a["description"])) i += 1 print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n self._print_custom()\n\n # Display the options alphabetically.\n option_names = list(self.__options.keys())\n option_names.sort()\n for option in option_names:\n desc, command = self.__options[option]\n print(\"\\t%s: %s\" % (option, desc))", "def print_menu():\r\n \r\n print('Menu: \\n\\n[1] Load Inventory from File\\n[2] Add CD\\n[3] Display Current Inventory')\r\n print('[4] Delete CD from Inventory\\n[5] Save Inventory to file\\n[0] Exit Program\\n')", "def print_menu():\r\n\r\n print('Menu\\n\\n[l] load Inventory from file\\n[a] Add CD\\n[i] Display Current Inventory')\r\n print('[d] Delete CD from Inventory\\n[s] Save Inventory to file\\n[x] exit\\n')", "def print_menu():\r\n\r\n print('Menu\\n\\n[l] load Inventory from file\\n[a] Add CD\\n[i] Display Current Inventory')\r\n print('[d] delete CD from Inventory\\n[s] Save Inventory to file\\n[x] exit\\n')", "def OutputMenuItems():\r\n print('''\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Save Data to File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r", "def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))", "def print_menu():\n print(\"\\nMenu:\")\n print(\"\\t\" + colored('+', 'red') + \" for adding a complex number\")\n print(\"\\t\" + colored('s', 'red') + \" for showing the list of all complex numbers\")\n print(\"\\t\" + colored('f', 'red') + \" for filtering the list\")\n print(\"\\t\\t-the new list will contain only the numbers between indices `start` and `end`\")\n print(\"\\t\" + colored('u', 'red') + \" to undo the last operation\")\n print(\"\\t\" + colored('x', 'red') + \" to close the calculator\")", "def print_menu():\n print()\n print(\"Main Menu\")\n print(\"---------\")\n print(\"1 - Process a new data file\")\n print(\"2 - Choose units\")\n print(\"3 - Edit room filter\")\n print(\"4 - Show summary statistics\")\n print(\"5 - Show temperature by date and time\")\n print(\"6 - Show histogram of temperatures\")\n print(\"7 - Quit\")\n print()", "def print_menu_Tasks():\r\n print(\"\"\"\r\n Menu of Options\r\n 1) Add a new keyboard\r\n 2) Save Keyboards to File\r\n 3) Show current keyboard list\r\n 4) Exit Program\r\n \"\"\")", "def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')", "def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')", "def printMenu():\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Cargar Catalogo de peliculas\")\n print(\"3- Buscar productoras\")\n print(\"0- Salir\")", "def printMenu():\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Contar los elementos de la Lista\")\n print(\"3- Contar elementos filtrados por palabra clave\")\n print(\"4- Consultar elementos a partir de dos listas\")\n print(\"5- Consultar buenas peliculas\")\n print(\"0- Salir\")", "def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display", "def print_menu(title, list_options, exit_message):\n print((\"\\n\" + title + \":\"))\n for i in range(1, len(list_options) + 1):\n print(\"(\" + str(i) + \") \" + list_options[i - 1])\n print(\"(0) \" + exit_message)", "def display_menu():\n print(\"\"\"\\nChoose option:\n (1) List statistics\n (2) Display 3 cities with longest names\n (3) Display county's name with the largest number of communities\n (4) Display locations, that belong to more than one category\n (5) Advanced search\n (0) Exit program\"\"\")", "def print_menu():\r\n clear()\r\n print(\"Ratatouille Server\")\r\n print(\"---------------------------\")\r\n print(\"\")\r\n\r\n for (index, func) in MENU.items():\r\n print(\"%d - %s\" % (index, func.__name__))\r\n\r\n return raw_input(\"Choose an option: \").lstrip()", "def menu_eng(self):\n intro = \"Here are the options available for you to choose from\"\n option1 = \"[1] UNLOCK BY CREDENTIALS\"\n option2 = \"[2] UNLOCK BY QR CODE\"\n option3 = \"[3] UNLOCK WITH BLUETOOTH\"\n option4 = \"[4] BACK\"\n print(intro, option1, option2, option3, option4, sep='\\n')", "def display_menu_options(length):\r\n print('\\n***********************************************\\nVeuillez choisir une option entre 1 et', str(length))", "def display_menu(self):\n print(\"\"\"\nLogistic System Menu\n1. Add Vehicles\n2. Add Item To The Cart\n3. Complete The Order\n4. Track The Order\n5. Quit \"\"\")", "def options_menu(title, options):\n\tprint width_screen * \"-\"\n\tprint(title.center(width_screen))\n #\tprint '{:^{width_screen}}'.format(title,width_screen)\n\tprint width_screen * \"-\"\n\tfor x in range(len(options)):\n\t\tprint str(x+1) + \". {}\".format(options[x])\n\tprint width_screen * \"-\"\n\treturn(options)", "def print_product_menu():\r\n print(\"\"\"\r\n Menu\r\n 1 - Display Product Price Inventory\r\n 2 - Add New Product\r\n 3 - Save Session\r\n 4 - Exit Session \r\n \"\"\")", "def henhouseDisplayMenu () :\r\n print('1.Predict egg production')\r\n print('2.Display needs')\r\n print('0.Exit henhouse management')\r\n print()\r\n print('Please choose an option from the above menu')", "def menu():\n\tprint (\"\\n\\tSeleccionar una opcion\")\n\n\tprint (\"\\t1.- Resistencia en un Alambre \")\n\n\tprint (\"\\t2.- Voltaje\")\n\n\tprint (\"\\t3.- Corriente\")\n\n print (\"\\t4.- Resistencia\")\n\n\tprint (\"\\t5.- salir\")", "def display_menu():\n print()\n print(\"Commands:\")\n print(\" quit - Quit\")\n print(\" new - Create new account\")\n print(\" display - Display account information\")\n print(\" deposit - Desposit money\")\n print(\" check - Write a check\")", "def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)", "def showMenu():\n print( \"1. Create New User\" )\n print( \"2. Authorize\" )\n print( \"3. Send SMS\" )\n print( \"4. Send Email\" )\n print( \"5. Get Recently Sent Message\" )\n print( \"6. Exit\" )", "def display_menu(self):\n print(\"\\n{}\".format(self.message))\n for i, h in enumerate(self.menu_handlers):\n # iterate through handlers and display menu text\n print(\"\\t{}. {}\".format(i+1, h.get_display_text()))\n # add option for exiting the program\n print(\"\\t{}. {}\".format(0, \"Exit\"))", "def DisplayMenu():\n print(\"\\n\\t\\t\\t Video Store \\n\")\n print(\"\"\" 1. Add new Title\n 2. Check out a video\n 3. Return a video\n 4. Receive a rating\n 5. Delete title\n 6. List Inventory\n E. Exit\n \"\"\")", "def main_menu():\n print('\\n', '='*50, sep='')\n print(\"Choose an option by number: \")\n print(\"\\t 1 = Create or Connect to a new file database\")\n print(\"\\t 2 = Create a new memory database\")\n print('Type exit to quit program!')\n print('='*50, '\\n', sep='')", "def menu(self):\n print(f\"{str(self)}\")", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def print_options(order_list, option_list):\n menu = ''\n for order, text in zip(order_list, option_list):\n menu += (str(order) + ' - ' + text + '\\n')\n return menu", "def printMenu():\n print (\"Calculator menu:\")\n print (\" + for adding a rational number\")\n print (\" c to clear the calculator\")\n print (\" u to undo the last operation\")\n print (\" x to close the calculator\")", "def display_menu(self):\n\t\t\n\t\tmenu = {\n\t\t\t'1' : self.jouerMusique,\n\t\t\t'2' : self.enregistrerMusique,\n\t\t\t'3' : self.notesCmd,\n\t\t\t'4' : self.notesGraphical,\n\t\t\t'5' : self.changeInstrument,\n\t\t\t'6' : self.switchDisplay,\n\t\t\t'7' : self.stop\n\t\t}\n\t\t\n\t\tif self.display:\n\t\t\tstatut = \"activé\"\n\t\telse:\n\t\t\tstatut = \"désactivé\"\n\t\t\n\t\tprint \"################\"\n\t\tprint \"##### MENU #####\"\n\t\tprint \"################\"\n\t\tprint\n\t\tprint \"1. Jouer une musique écrite\"\n\t\tprint \"2. Enregistrer une musique écrite\"\n\t\tprint \"3. Jouer des notes en ligne de commande\"\n\t\tprint \"4. Jouer des notes sur un clavier graphique\"\n\t\tprint \"5. Changer d'instrument\"\n\t\tprint \"6. Activer/désactiver les affichages et enregistrements. (actuellement \" + statut + \")\"\n\t\tprint \"7. Quitter\"\n\t\tprint\n\t\t\n\t\tn = str(raw_input('Choix : '))\n\t\tmenu.get(n,self.default)()", "def display_options(self):\n print()\n options = list(self.get_commands().values())\n options.sort(key=lambda op: int(op.name))\n\n for option in options:\n print(f'{\"%3d\" % int(option.name)}. {option.description}')", "def print_options(self):\n for option in self._options.items():\n print \"{0} = {1}\".format(option[0], option[1])", "def Infor_menu():\n \n import sys\n d = ''\n msg = '' \n while d == '':\n print('\\nINFORMATION MENU')\n print('1. Display coordinate sequence')\n print('2. Display SEQRES sequence')\n print('3. Display Alignment sequence')\n print('4. Display all non-water ligands in the protein(if any)')\n print('q. Quit')\n option = input('Select an option: ')\n if option.lower() == 'q':\n sys.exit()\n elif option == '1':\n msg = 'Option 1'\n d = display_cord_seq()\n elif option == '2':\n msg = 'Option 2'\n d = display_seqres_seq()\n elif option == '3':\n msg = 'Option 3'\n d = display_algn_seq()\n elif option == '4':\n msg = 'Option 4'\n d = display_all_nonwater_L()\n else:\n print ('Invalid selection!')\n return msg, d", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values\n # You may change the menu if you'd like to add an experimental method\n menu = {\"n\": (\"Navigate forward\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"c\": (\"Calibrate\", self.calibrate),\n \"t\": (\"test restore\", self.calibrate),\n \"s\": (\"Check status\", self.status),\n \"q\": (\"Quit\", quit_now)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = raw_input(\"Your selection: \")\n # activate the item selected\n menu.get(ans, [None, error])[1]()", "def main_menu(self):\n welcome = \"\"\"\n ************************\n * WELCOME TO CARSHARE! *\n ************************\n \"\"\"\n intro = \"Are you a USER or an ENGINEER?\"\n option1 = \"[1] USER\"\n option2 = \"[2] ENGINEER\"\n print(welcome, intro, option1, option2, sep='\\n')", "def menuItem(*args):\n\toptionsWindow()", "def print_menu(self):\n for i,x in enumerate(self.menu):\n print(\"%i. %s\"%(i+1,x))\n return self.get_int()", "def menuPrint(self):\n print(self.menu[['name', 'review_count', 'rating']].sort_values(['rating'], ascending = False).sort_index())\n \n menuCheck = str(input(\"\\nIs this what you're looking for? (Yes (y) or No (n)) \"))\n #If no, prompt ask question again\n while menuCheck not in ['yes', 'y']:\n menuPrompt.ask(self)\n print(self.menu[['name', 'review_count', 'rating']].sort_values(['rating'], ascending = False).sort_index())\n menuCheck = str(input(\"\\nIs this what you're looking for? (Yes (y) or No (n)) \"))", "def display(self):\n while (True):\n self.print()\n choice = self.get_choice()\n if (choice == len(self.options)):\n break\n else:\n self.options[choice].function()", "def get_all_menu():", "def print_main_menu():\n print(\"\\nWelcome to the Zendesk Ticket Viewing System!\\nInstructions:\")\n print(\"~ Enter '1' to view all tickets\")\n print(\"~ Enter '2' to view a certain ticket\")\n print(\"~ Enter '3' to view these options again\")\n print(\"To exit the ticketing system enter 'quit'\")", "def menu():\n ########################## HELLO #############################\n print(\"\"\" \n \n mm db 7MM db \n MM MM \n`7MMpdMAo. `7M' `MF' ,pW\"Wq. mmMMmm `7MM ,M\"\"bMM ,pP\"Ybd ,6\"Yb. 7MM \n MM `Wb VA ,V 6W' `Wb MM MM ,AP MM 8I `\" 8) MM MM \n MM M8 VA ,V 8M M8 MM MM 8MI MM `YMMMa. ,pm9MM MM \n MM ,AP VVV YA. ,A9 MM MM `Mb MM L. I8 8M MM MM \n MMbmmd' ,V `Ybmd9' `Mbmo .JMML. `Wbmd\"MML.M9mmmP' `Moo9^Yo..JMML.\n MM ,V \n.JMML. OOb\" \n\nVersion: 1.1 Autor: Paloma Sánchez y Juan Pablo Egido OS: Linux/Debian\n\"\"\")\n\n ######################### MENU ############################\n print(\"\\n [!] Bienvenid@ a Pyotidsai.\")\n print('\\n [!] Introduce la opción deseada '\n '\\n [!] (1) Crear reglas'\n '\\n [!] (2) Detectar malware(SNORT)'\n '\\n [!] (3) Machine Learning Classifier'\n '\\n [!] (0) Salir')", "def menu():\n print('''\nMenu for deposits:\n 'n' - deposit a nickel\n 'd' - deposit a dime\n 'q' - deposit a quarter\n 'o' - deposit one dollar\n 'f' - deposit five dollars\n 'c' - cancel the purchase\n''')", "def present_menu(self, options, title='Menu:'):\n output = ''\n for count, option in enumerate(options):\n output += '%d) %s\\n' % (count+1, option)\n output += '\\nh) Help\\ns) Status\\nq) Quit\\n'\n user_input = 0\n while user_input <= 0 or user_input > len(options):\n header_print(title)\n print(output)\n print(\n \"Select an option from above (1-%d, h, s, or q):\" % len(\n options\n ),\n end=''\n )\n user_input = raw_input()\n if user_input.isdigit():\n user_input = int(user_input)\n elif user_input == 'h':\n header_print(self.data['help'])\n elif user_input == 's':\n self.present_status()\n elif user_input == 'q':\n sys.exit()\n else:\n print(\"Not a valid option\")\n return user_input - 1", "def callMenu():\n print(\"Menu: \\\n \\n Area of a triangle (enter 'triangleArea') \\\n \\n Area of a square (enter 'squareArea') \\\n \\n Area of a parallelogram (enter 'paraArea') \\\n \\n Area of an ellipse (enter 'ellipseArea')\\\n \\n Area of a circle (enter 'circleArea')\\\n \\n Circumference of a circle (enter 'circleCirc')\\\n \\n Enter 'quit' to quit.\\\n \\n Enter 'menu' to show the menu again.\")", "def displaymenu(option):\n DebugMessage(f\"\"\"def:displaymenu | option={option}\"\"\")\n print(question)\n print(\"Options:\" + str(option))\n response = input(\"$> \")\n\n for opt in option:\n if response.lower() == opt.lower():\n DebugMessage(f\"User selected a valid option:{opt}\")\n if opt == 'Quit':\n exit(0)\n return opt\n print(f\"{response}, is not a valid option\")\n print(gui_bar)\n displaymenu(option)", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def menus(self):\r\n return []", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tt = self.options['tab']\r\n\t\tp = self.options['sep']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.text)\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def back_to_menu_info(cls):\n print(\n \"\"\"\n ________________________________________________\n\n HABITSBOX\n ________________________________________________\n Hint: Press 0 (zero) to return to the main menu\n ------------------------------------------------\"\"\")", "def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tp = self.options['sep']\r\n\t\tt = self.options['tab']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.abbrev+s+p+s+self.text)", "def display_menu(self):\n # Gets the number of habits that exist in the habits table\n number_of_habits = len(self.analytics.habits_table())\n # Gets the number of trackings that exist in the trackings table\n number_of_trackings = len(self.analytics.trackings_table())\n\n print(\n \"\"\"\n ________________________________________\n\n WELCOME TO YOUR HABITSBOX\n ________________________________________\n\n Everything can be achieved\n with perseverance and commitment\n\n ---------- Let's get started -----------\n\n Choose a number\n\n 0. Exit\n ----------------------------------------\n 1. Add a new habit\n ----------------------------------------\n \"\"\")\n\n if number_of_habits >= 1:\n print(\n \"\"\"\n 2. Check a habit off\n 3. Delete a habit\n ----------------------------------------\n \"\"\")\n if (number_of_habits == 1):\n print(\"\"\"\n Analysis -------------------------------\n\n 4. See my habit\n ----------------------------------------\n \"\"\")\n elif (number_of_habits > 1): #or (number_of_trackings >= 0):\n print(\n \"\"\"\n Analysis -------------------------------\n\n 4. See a habit\n 5. See all habits registered\n 6. See habits with same periodicity\n \"\"\")\n if (number_of_trackings > 0):\n print(\"\"\" \n 7. See my longest streak of all habits\n ----------------------------------------\n \"\"\")", "def show(self):\n # Display the menu.\n self._print_menu()\n\n # Wait for input.\n selection = None\n while selection not in self.__options:\n selection = input(\"(Choose an option): \")\n\n # Perform the command.\n _, command = self.__options[selection]\n return command(selection)", "def menu_principal(self):\r\n print('=== Bem Vindo ao Sistema de Cadastro de Clientes ===')\r\n print('\\nEscolha uma das opções abaixo.')\r\n print('1. Visualizar contatos')\r\n print('2. Busca contatos')\r\n print('3. Adicionar contato')\r\n print('4. Remover contato')\r\n print('5. Alterar contato')\r\n print('6. Carregar lista Parte I') \r\n print('7. Visualizar grupo')\r\n print('8. Gestão de grupos')\r\n print('9. Sair')", "def create_menu():", "def help_menu():\n print('\\n##################################################')\n print('################ Help Menu ###############') \n print('##################################################')\n print(' Type move or examine for each turn') \n print(' If moving, type up, down, left, or right')\n print(' If examining, you may need to answer yes or no')\n print('##################################################\\n')\n title_screen_selections()", "def main_menu():\n print(\"{} albums loaded\".format(len(inform)))\n print(menu)\n choice = input(\">>>\").upper()\n while choice != \"Q\" and \"4\":\n if choice == \"L\" or choice == \"1\":\n lists()\n elif choice == \"A\" or choice == \"2\":\n add()\n elif choice == \"M\" or choice == \"3\":\n mark()\n else:\n print(\"Invalid menu choice\")\n print(menu)\n choice = input(\">>>\").upper()\n print(\"{} albums saved to albums.csv\".format(len(inform)))", "def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)", "def Write_menu():\n \n import sys\n d = ''\n msg = '' \n while d == '':\n print('\\nW R I T E M E N U')\n print('1. Write out coordinate file')\n print('2. Write out sequence(Fasta format)')\n print('q. Quit')\n option = input('Select an option: ')\n if option.lower() == 'q':\n sys.exit()\n elif option == '1':\n msg = 'Option 1'\n d = submenu1()\n elif option == '2':\n msg = 'Option 2'\n d = submenu2()\n else:\n print ('Invalid selection!')\n return msg, d", "def print_choice_msg(self) -> None:\n pass", "def basic_menu(dict_of_options, back=False):\n choose = True\n dict_of_options = final_option(dict_of_options, back)\n list_of_options = list(dict_of_options.keys())\n\n while choose:\n print('The following options are available:\\n')\n for option in enumerate(list_of_options):\n print('\\t{} - {}'.format(option[0], option[1]))\n pick = input('\\nType the numeric code you wish to run\\n\\n')\n if pick in [str(i) for i in range((len(dict_of_options)))]:\n choose = dict_of_options[list_of_options[int(pick)]]()\n else:\n print('{} is not currently an option!\\n'.format(pick))", "def display_menu(self):\n return ', '.join(menu.name for menu in self.menu.all()[:3])", "def printOptions():\n\n # For each group, create a group option\n print(\"default\")", "def main_menu_input(self):\n if self.choice_menu == \"1\":\n self.category_choice()\n elif self.choice_menu == \"2\":\n print(fr.FR[9])\n for element in self.substitution_table.get_substitution():\n for substitution in element:\n sub_prod = self.product_table.get_product(substitution)\n print(sub_prod[0][1] + \" - \"\n + sub_prod[0][2] + \" - \"\n + sub_prod[0][3] + \" - \"\n + sub_prod[0][4])\n print(\"\\n\")\n elif self.choice_menu == \"3\":\n self.initialise_bdd()\n self.save_product_bdd()\n elif self.choice_menu == \"4\":\n self.leave_main_menu -= 1", "def display_collected():\n os.system('clear') # clearscreen\n print('BS4 widget generator')\n print('-' * 20)\n print('options selected:')\n for col in collected:\n print(col)\n\n print('-' * 20)\n\n return", "def display_menu(self, title: str, subtitle: str = \"\\n\", question: dict = None):\n self.clean()\n print(f\"{title}\")\n print(f\"{subtitle}\\n\")\n for key, value in question.items():\n print(f\"\\t{key} - {value[1]}\")\n print(\"\\n\" * 2)", "def get_items(self):\n options = \"\"\n for item in self.menu:\n options += f\"{item.name}/\"\n return options", "def _print_enum_opt(self, option, choices):\n for key in choices:\n if key == self.conf[option]:\n print(\"* %s\" % key)\n else:\n print(\" %s\" % key)", "def menu():\n print('\\n----------------------------- MENU ------------------------------')\n print('0 - EXIT PROGRAM | 10 - Create user')\n print('1 - Read temperature | 11 - Check users info')\n print('2 - Read air humidity | 12 - Update user infos')\n print('3 - Read soil humidity | 13 - Remove user')\n print('4 - Visualize the last record | 14 - Read both (temp. and umid.) ')\n print('5 - Visualize all record | 15 - Delete record from collection by id')\n print('6 - Delete last record | 16 - *')\n print('7 - Delete all record | 17 - *')\n print('8 - Visualize insertions by user | 18 - *')\n print('C - CLEAR SCREEN | 19 - *')\n print('-----------------------------------------------------------------\\n')\n # * to be implemented", "def get_one_menu_option():", "def menu(self):\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()", "def show_menu(menulist):\n text = \"0 ... Cancel\\n\"\n for item in menulist:\n text += \"{} ... {}\\n\".format(menulist.index(item)+1, item)\n return text", "def menu():\n ferme_fenetre()\n Menu()", "def main_menu(self):\n menu_string = \"Main menu\\n\"\n menu_string += \"\\t1. Modify a list\\n\"\n menu_string += \"\\t2. Grade submenu\\n\"\n menu_string += \"\\t3. Search for something\\n\"\n menu_string += \"\\t4. Get a statistic\\n\"\n menu_string += \"\\t5. Undo/Redo\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {'0': self.__no_command,\n '1': self.__modify_submenu,\n '2': self.__grade_submenu,\n '3': self.__search_submenu,\n '4': self.__statistics_submenu,\n '5': self.__undo_submenu\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def drawMenu(self):\n try:\n for key in self.order_of_keys:\n print(\"\\r[key {:8}] : {}\".format(key, self.keybindings[key][self.KEY_DESCRIPTION]))\n except KeyError:\n print(\"Error: Keys found GoPiGo3WithKeyboard.order_of_keys don't match with those in GoPiGo3WithKeyboard.keybindings.\")", "def help_opt(self):\n print(OPTIONS)", "def menu_system(self):\n # main menu and user interaction\n while True:\n # print menu\n print()\n print(\"Welcome to your Smart Plant. Please select an option.\")\n print(\"1. Begin local data collection.\")\n print(\"2. Cloud configuration center.\")\n print(\"3. Exit.\")\n # print(\"2. Calibrate moisture sensor.\") - would be an extra feature\n select = input(\"Selection: \")\n\n # local collection\n if select == \"1\":\n period = input(\"Please type a sample rate (in seconds): \")\n try:\n period_t = float(period)\n self.start_sampling(period_t)\n # notify of error\n except ValueError:\n print(\"Invalid number. Try again.\")\n # cloud config\n elif select == \"2\":\n self.configure_cloud()\n # exit\n elif select == \"3\":\n self.clean_exit()\n else:\n # retry\n print(\"Invalid selection. Try again.\")", "def help_select(self):\n print(SELECT)", "def menu_select_option(self, app: object) -> None:\n while True:\n self.back = False\n print(\"-\" * 50)\n for key, element in self.cmd_select_option.items():\n print(f\"{key} : {element}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner l'option correspondante : \"\n )\n if entry == \"1\":\n self.menu_categories(app)\n elif entry == \"2\":\n save = app.view_save()\n print(\"-\" * 50 + \"\\nSubstitut(s) enregistré(s) :\\n\")\n for prod, sub in save.items():\n print(f\"Produit {prod} substitué par {sub} \")\n elif entry == \"0\":\n break\n else:\n print(\"\\nCommande incorrecte\")", "def main_menu(ftp):\n print(\"What would you like to do?\")\n for key in sorted(MAIN_MENU_SELECTIONS):\n print(\"[%s] %s\" % (key, MAIN_MENU_SELECTIONS[key][0]))\n choice = raw_input(\"> \")\n while choice not in list(MAIN_MENU_SELECTIONS.keys()):\n choice = raw_input(\"> \")\n handle_main_menu_choice(choice, ftp)", "def show_employee_menu(self):\n \n action_str = \"\"\n\n while True:\n print(self.LENGTH_STAR * \"*\")\n print(\"EMPLOYEES MENU\\n\")\n print(\"1 Print overview of all employees\")\n print(\"2 Pilots\")\n print(\"3 Cabin Crew\")\n print(\"B Back\\n\")\n\n action_str = self.choose_action([\"1\", \"2\" ,\"3\" ,\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"1\", \"2\", \"3\", \"b\"])\n\n if action_str == \"1\":\n self.show_overview_of_all_employees()\n\n elif action_str == \"2\":\n self.show_pilot_or_crew_menu(self.PILOT)\n\n elif action_str == \"3\":\n self.show_pilot_or_crew_menu(self.CREW)\n\n elif action_str == \"b\":\n return", "def print_help_menu():\n\n print(\"\"\"This program contains two different tools for calculating values\n [--rerolls, -r] p n t\n Find the expected number of rolls/rerolls to achieve all successes\n p is the probability of success on a given die roll\n n is the number of objects we are rolling\n t is the number of trials we are performing to find the average\n [--success, -s] p, r, n\n Directly calculates the probability of achieving all successes\n p is the probability of success on a given die roll\n r is the number of rolls/rerolls we are allowed to perform\n n is the number of objects we are rolling\n \"\"\")", "def showGUI(self,**kwargs):\n self.baxter.menu.select(self.modes[0])", "def showMenu():\n print '''\\nIndica una opció:\n 1 Afegir contacte\n 2 Modificar contacte\n 3 Eliminar contacte\n 4 Cercar contacte\n 5 Info de l'agenda\n 0 Sortir\\n'''\n\n try:\n global menu_option\n menu_option = int(raw_input('Opció escollida: '))\n except ValueError:\n print 'Error al escollir l\\'opció'", "def get_menus():\n\n pass", "def menu_display1(filename):\n file = filename\n x1 = '*' \n x2 = '*' + \" \"\n print(x1 * 80)\n print('\\033[1;47m* PDB FILE ANALYZER \\033[1;m{0:58s}*'.format(\"\"))\n print(x1 * 80)\n print('*\\033[1;30m Select an option from below:\\033[1;m{0:49s}*'.format(\"\"))\n print('\\033[1;30m* \\033[1;m{0:76s}*'.format(\"\"))\n print('\\033[1;30m* 1) \\033[1;m Open a PDB File{0:26s}(O){0:25s}*'.format(\"\"))\n print('\\033[1;30m* 2) \\033[1;m Information{0:29s} (I){0:25s}*'.format(\"\"))\n print('\\033[1;30m* 3) \\033[1;m Show histogram of amino acids{0:11s} (H){0:25s}*'.format(\"\"))\n print('\\033[1;30m* 4) \\033[1;m Display Secondary Structure{0:13s} (S){0:25s}*'.format(\"\"))\n print('\\033[1;30m* 5) \\033[1;m Export PDB File{0:25s} (X){0:25s}*'.format(\"\"))\n print('\\033[1;30m* 6) \\033[1;m Exit{0:36s} (Q){0:25s}*'.format(\"\"))\n print('\\033[1;30m* \\033[1;m{0:76s}*'.format(\"\"))\n print('* {0:54s}Current PDB:\\033[1;31m %s \\033[1;m *'.format(\"\")%file)\n print(x1 * 80)\n print(':\\033[1;31m O\\033[1;m')", "def create_menus( self ):", "def main_menu(self):\n\n clear_screen()\n print('\\nWork Log With Database\\n')\n\n options = {'1': 'Add a new task', '2': 'Find a task', '3': 'Quit'}\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n while True:\n print()\n user_choice = input(\"Please enter the number of choice: \").lower().strip()\n\n if user_choice == '1':\n task = self.get_task_info()\n self.task.add_task(task)\n print('Task successfully added')\n self.main_menu()\n elif user_choice == '2':\n search_method_choice = self.search_method_menu()\n self.search_tasks(search_method_choice)\n elif user_choice == '3':\n print(\"\\nExiting Work Logger\")\n exit()\n else:\n print(\"\\nInvalid choice, please try again.\")", "def display_main(self):\n self.clear_terminal()\n self.main_menu()\n self.handle_selection_main()", "def main(self):\n while self.leave_main_menu:\n print(fr.FR[4], fr.FR[5], fr.FR[6], fr.FR[7])\n self.choice_menu = input(fr.FR[8])\n self.main_menu_input()", "def printhelp():", "def do_show(self, args):\n eprint(colorize('\\nAvailable exploits:', 'green'))\n for key in sorted(ACsploit.exploits):\n eprint(colorize(' ' + key, 'green'))\n eprint()" ]
[ "0.83484006", "0.80100524", "0.79167485", "0.79120266", "0.7911782", "0.7821441", "0.7816", "0.7809606", "0.7785762", "0.77756435", "0.77276397", "0.7672742", "0.7655172", "0.75879997", "0.7581685", "0.7477574", "0.74568194", "0.73829365", "0.73222256", "0.7274187", "0.7227577", "0.7220892", "0.7205434", "0.7184923", "0.7176142", "0.71540016", "0.71343493", "0.7099784", "0.7087489", "0.70736736", "0.7064931", "0.70565265", "0.70483", "0.70483", "0.7041977", "0.70392364", "0.69702744", "0.69529855", "0.6952046", "0.6930696", "0.6912638", "0.68969667", "0.6891327", "0.68912244", "0.68891144", "0.6862813", "0.68511236", "0.6835489", "0.68071896", "0.67914116", "0.6780432", "0.675906", "0.6755053", "0.67530173", "0.6731058", "0.6718742", "0.6663191", "0.66231585", "0.66014886", "0.6574912", "0.65678835", "0.65664667", "0.6558209", "0.6535941", "0.65147096", "0.6507484", "0.64986885", "0.6493069", "0.6490379", "0.64893407", "0.6474903", "0.64470625", "0.64174306", "0.64135253", "0.64096135", "0.64044666", "0.6401719", "0.6397091", "0.63891196", "0.6385495", "0.6367894", "0.63620514", "0.63528854", "0.6352884", "0.6345091", "0.63386047", "0.63361824", "0.6330495", "0.633006", "0.63279957", "0.63145703", "0.6314216", "0.6295155", "0.6290992", "0.6288769", "0.62834567", "0.6280732", "0.62611115", "0.6254544", "0.6241839" ]
0.828377
1
Ask the user to choose and action by entering the index of the action
def select_user_action(): number_of_actions = len(available_actions) hint = "Enter the number of your choice (1..%i):" % number_of_actions choice = input(hint) # Try to convert the input to an integer try: choice_int = int(choice) except ValueError: choice_int = -1 if 1 <= choice_int <= number_of_actions: action = choice_int - 1 else: action = None return action
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_action(self):\r\n pass", "def get_action(player):\n print_action(player)\n chosen_action_index = int(input('Please indicate your selection from the following list by inputting the number: '))\n return player.available_actions[chosen_action_index]", "def select_action(self):\n pass", "def choose_action(self, board, possible_actions):\r\n self._print_board(board)\r\n while True:\r\n user_choice = int(input(\"Which Field?\"))\r\n if user_choice in possible_actions:\r\n return user_choice\r\n else:\r\n print('Action not possible!')", "def obtain_action(self, timestep):\r\n\t\t# Loops constantly until a valid input is obtained.\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\t# Tries to obtain a valid input manually and convert it to an\r\n\t\t\t\t# integer.\r\n\t\t\t\taction = int(input('Please provide an input action index between 0 and (number of actions - 1): %i: ' % (self.num_actions-1)))\r\n\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint('Invalid input detected, try again.')\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# Checks if the input is within the acceptable range of action\r\n\t\t\t# index values.\r\n\t\t\tif 0 <= action < self.num_actions:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint('Action should be an index between 0 and (number of actions - 1): %i' % (self.num_actions-1))\r\n\r\n\t\treturn action", "def askForAction(self, role, index, currPos, possActions):\r\n message = str(role) + \" -> \" + str(index)\r\n if role == AgentRole.COP or Settings.isDebug():\r\n message += \"\\nCurrent position : \" + str(currPos)\r\n message += \"\\nPossible actions :\\n \" + \"\\n \".join([a.__repr__() for a in possActions])\r\n self.showMessage(message, surface=self.infoPanel, bg_color=gu.INFO_PANEL_COLOR)\r\n request = raw_input(\"Destination, TicketType = \")\r\n while \",\" not in request:\r\n print \"You missed the comma!\"\r\n request = raw_input(\"Destination, TicketType = \")\r\n fields = request.split(\",\")\r\n dest, ticket = int(fields[0]), fields[1].strip().upper()\r\n return dest, ticket", "def choose_action(self, board, possible_actions):\r\n pass", "def askForAction(self, role, index, currPos, possActions):\r\n print str(role) + \" -> \" + str(index)\r\n print \"Current position : \" + str(currPos)\r\n print \"Possible actions : \" + str(possActions)\r\n # request = raw_input(\"Destination, TicketType = \").split(',')\r\n request = raw_input(\"Destination, TicketType = \")\r\n while \",\" not in request:\r\n print \"You missed the comma!\"\r\n request = raw_input(\"Destination, TicketType = \")\r\n fields = request.split(\",\")\r\n dest, ticket = int(fields[0]), fields[1].strip().upper()\r\n return dest, ticket", "def get_next_action(self):\n chosen_action = None\n while chosen_action is None:\n choice = input('Action: ')\n chosen_action = KEY_MAP.get(choice)\n\n return chosen_action", "def select_action(self, state):", "def choose_action(self, valid_list):\n \n action_str = input(\"Choose action: \").lower()\n print()\n \n if action_str in valid_list:\n return action_str\n \n else:\n print(\"Invalid action!\")\n return False", "def main_menu_selection():\n action = input('''\n Pleaes select one:\n\n a - Send a thank you\n b - Create a report\n c - Quit\n >''')\n\n return action.strip()", "def choose_action(self, obs, **kwargs):\n pass", "def action(self,input,session,context):\n #index = int(input) - 1\n #if index < 0:\n # raise IndexError('Menu option can not be less than 1')\n def make_index(elt):\n idx, item = elt\n if item.custom_index is not None: return str(item.custom_index)\n else: return str(idx)\n\n valid_inputs = map(make_index, enumerate(self.menu_items))\n index = valid_inputs.index(input)\n\n return self.menu_items[index].next_screen", "def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]", "def ChooseAction(self):\n self.lastAction = None\n self.lastState = None\n if(self.attention is None or self.attention == \"\"): return\n # find best action for the currently attended node\n actions = list(self.vi.Q[self.states.index(self.attention)])\n actionIndex = actions.index(max(actions))\n actionName = self.actions[actionIndex]\n # execute the best action for the currently attended node\n self.nodes[actionName].Activate()\n self.lastAction = actionName\n self.lastState = self.attention", "def chooseAction(self):\n print \"nothing\"\n pass", "def action(self, option):\n try:\n i = int(option) - 1\n try:\n task = self.tasks[i]\n print(\"\\n*** Steps for\", task.name, \"P\" + str(task.priority), \"***\")\n s = 0\n for step in task.steps:\n s += 1\n print(\"\\t\", s, \". \", step)\n input()\n print(\"*********************\" + len(task.name)*\"*\")\n except IndexError as e:\n print(\"\\n\\\"\" + str(i) + \"\\\" is not a valid task index.\", type(e))\n except ValueError:\n if option in (\":A\", \"A\"):\n self.assign()\n elif option in (\":D\", \"D\"):\n self.delete()\n elif option in (\":Q\", \"Q\"):\n pass\n else:\n print(\"\\n\\\"\" + option + \"\\\" is not a valid option.\")", "def action(self,input,session,context):\n index = int(input) - 1\n if index < 0:\n raise IndexError('Menu option can not be less than 1')\n return self.menu_items[index].next_screen", "def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action", "def choose_action(self, game_state):\n util.raise_not_defined()", "def select_action(self, state):\n pass", "def choose_action(self, board):\n raise NotImplementedError", "def choose_action(self, state, task=0):\n pass", "def onActionChosen(self, agent, action):\n\n pass", "def select_action(self, state):\n\t\treturn sample(range(0, self.action_space), 1)[0]", "def _take_action(self, action_idx: ActionType) -> None:\n raise NotImplementedError(\"\")", "def choose_action(self, *args, **kwargs):\n return NotImplementedError", "def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def choice_stay_return(self, text, action):\n while True:\n print(\"\"\"\n 0. Back to the main menu\n 1. {}\n \"\"\".format(text))\n choice = pyip.inputNum('Enter a number: ')\n if choice == 0:\n # Clean up the console\n self.clear_console()\n # Gives the options that can be selected in the menu\n self.run()\n elif choice == 1:\n action()\n else:\n print('Please, choose number 0 or 1')", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def select_action(self, **kwargs):\n raise NotImplementedError('This method should be overriden.')", "def action():\n while True:\n act = input('Enter O - to open cell / F - to mark as FLAG').upper()\n if act == 'O':\n return 'O'\n elif act == 'F':\n return 'F'\n elif act == 'test':\n return 'test'\n else:\n continue", "def perform_user_action(action_index):\r\n if action_index is not None:\r\n print()\r\n action = available_actions[action_index]\r\n if current_state in action[\"valid_states\"]:\r\n function_to_run = available_actions[action_index][\"function\"]\r\n if function_to_run is not None:\r\n function_to_run()\r\n else:\r\n print(\"Internal error: NOT IMPLEMENTED (no function assigned for the action)!\")\r\n else:\r\n print(\"This function is not allowed in the current system state (%s)\" % current_state)\r\n else:\r\n print(\"Invalid input, please choose a valid action\")\r\n print()\r\n return None", "def getAction1(self, state):\n # Pick Action\n ############################################################################################################ Eric Changed state to self.index\n\n legalActions = state.getLegalActions(self.index)\n #print \"LEGAL ACTIONS IN GETACTION1 IN QLEARNINGAGENT: \", legalActions\n action = None\n \"*** YOUR CODE HERE ***\"\n if len(legalActions) == 0:\n return None\n coinTruth = util.flipCoin(self.epsilon)\n if coinTruth:\n acToReturn = random.choice(legalActions)\n #print \"GETACTION1 IN QLEARNINGAGENT COINTRUTH IS TRUE, ACTION IS : \", acToReturn\n return acToReturn\n\n\n\n #util.raiseNotDefined()\n acToReturn = self.computeActionFromQValues(state)\n #print \"GETACTION1 IN QLEARNINGAGENT COINTRUTH IS FALSE< ACTION IS : \", acToReturn\n return acToReturn", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def step(self, action):\n # Call the webdriver and perform the action\n if type(action) is str:\n cmd = action\n else:\n action = int(action)\n cmd = self.action_number_to_cmd[action]\n #print(\"action type:\", type(action))\n #raise Exception(\"Wrong the action type\")\n #sys.exit()\n\n print(\"cmd:\", cmd)\n #cmd = self.action_number_to_cmd[action]\n reward = 0\n discovered_elements = self.driver.get_discovered_elements()\n current_element = None\n\n if cmd == \"WAIT\":\n pass\n\n elif cmd in {\"CHOOSE_FIRST_CLICK\", \"CHOOSE_FIRST_SELECT\", \"CHOOSE_FIRST_ENTER\"}:\n cmd_to_chosen_type = {\n \"CHOOSE_FIRST_CLICK\": \"clickables\",\n \"CHOOSE_FIRST_SELECT\": \"selectables\",\n \"CHOOSE_FIRST_ENTER\": \"enterables\"\n }\n self.chosen_type = cmd_to_chosen_type[cmd]\n if len(discovered_elements[self.chosen_type]) > 0:\n #current_element = discovered_elements[self.chosen_type][0]\n self.chosen_number = 0\n else:\n reward = wrong_movement()\n\n elif cmd == \"NEXT\":\n if self.chosen_type:\n if len(discovered_elements[self.chosen_type]) > self.chosen_number + 1:\n self.chosen_number += 1\n #current_element = discovered_elements[self.chosen_type][self.chosen_number]\n else:\n reward = wrong_movement()\n else:\n reward = wrong_movement()\n\n elif cmd in {\"CLICK\", \"ENTER\", \"SELECT\"}:\n\n if not (self.chosen_type and self.chosen_number < len(discovered_elements[self.chosen_type])):\n reward = wrong_movement()\n else:\n current_element = discovered_elements[self.chosen_type][self.chosen_number]\n if cmd == \"CLICK\":\n self.driver.click(current_element)\n elif cmd == \"ENTER\":\n self.driver.enter(current_element, data=\"Hello world\")\n elif cmd == \"SELECT\":\n pass\n\n done = self.have_winner() or len(self.legal_actions()) == 0\n\n #reward = 1 if self.have_winner() else 0\n if self.have_winner():\n reward = 5\n\n return self.get_observation(), reward, done, {}", "def executeAction(self,**kwargs):\n try:\n action = kwargs[\"fname\"]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n entries = {}\n pose_offset = 'empty'\n if action in self.bl.getAllSavedActions():\n pose_offset = self.bl.baxter_actions[str(action)]['joint_position']\n entries['Show action only'] = [self.moveBy, pose_offset]\n entries['Show pick up action'] = [self.pickUpActionColour, pose_offset]\n# entries['Add condition'] = self.addEmptyCondition\n# entries['Rename '+str(action)] = [self.renameAction, action]\n entries['Learn '+str(action)] = getattr(self.bl, 'demoAction')\n\n self.mm.addGenericMenu(\"learnMenu\", self.mm.cur_page,\"Action saved as: %s\" % (str(pose_offset)),entries)\n self.mm.loadMenu(\"learnMenu\")", "def chooseAction(self, gameState):\n\n '''\n You should change this in your own agent.\n '''\n problem = foodsearchproblem(gameState,self)\n return self.astarsearch(problem,gameState,self.foodhuristic)[0]", "def action_menu(player):\n\n\tclear_screen()\n\n\trun_game_log(player, game_log)\t# runs the run_game_log function which gets output from game_log class\n\n\tprint('1. Move')\n\tprint('2. Show Map')\n\tprint('3. Show Player Inventory')\n\tprint('4. Show Player Stats')\n\tprint('5. Exit to Main Menu')\n\n\tpossible_choices = ['1','2','3','4','5']\n\tactive = True\n\n\twhile active:\n\t\tselection = input('\\nNow I shall... ')\n\t\tif selection in possible_choices:\n\t\t\tactive = False\n\t\t\treturn int(selection)\t# return always exits a function, right? so active = False is redundant?\n\t\telse:\n\t\t\tprint('That\\'s not one of the menu options!')", "def getAction(self, gameState):\r\n \"*** YOUR CODE HERE ***\"\r\n util.raiseNotDefined()", "def getAction(self, gameState):\r\n \"*** YOUR CODE HERE ***\"\r\n util.raiseNotDefined()", "def actions():\n pass", "def choose_action(self):\n\n def is_random_exploration():\n\n # 5. Return whether do random choice\n # hint: generate a random number, and compare\n # it with epsilon\n if random.random() < self.epsilon:\n return True\n else:\n return False\n\n final_action = ''\n if self.learning:\n if is_random_exploration():\n # 6. Return random choose aciton\n final_action = self.valid_actions[random.randint(0, 3)]\n else:\n # 7. Return action with highest q value\n final_action = max(\n self.Qtable[self.state].items(),\n key=operator.itemgetter(1))[0]\n elif self.testing:\n # 7. choose action with highest q value\n final_action = max(\n self.Qtable[self.state].items(),\n key=operator.itemgetter(1))[0]\n else:\n # 6. Return random choose aciton\n final_action = self.valid_actions[random.randint(0, 3)]\n\n return final_action", "def get_human_action(self):\n choice = input(\n \"Ingrese la acción a realizar por el jugador {}: \".format(self.get_current_player())\n )\n while choice not in [str(action) for action in self.get_legal_actions()]:\n choice = input(\"Acción no válida. Ingrese otra opción: \")\n return int(choice)", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def getAction(self, state):\n # Pick Action\n \"*** YOUR CODE HERE ***\"\n # Epsilon greedy\n if util.flipCoin(self.epsilon) is True:\n self.lastAction = random.choice(self.legalActions)\n else:\n self.lastAction = self.computeActionFromQValues(state)\n return self.lastAction", "def getAction(self, state):\n # Pick Action\n \"*** YOUR CODE HERE ***\"\n # Epsilon greedy\n if util.flipCoin(self.epsilon) is True:\n self.lastAction = random.choice(self.legalActions)\n else:\n self.lastAction = self.computeActionFromQValues(state)\n return self.lastAction", "def find_next_action(self, obs, agents, i):\n return None", "def act(self, x):\n return self.action", "def process_menu_page(self):\r\n self.print_options(self.menu,1)\r\n\r\n \"\"\"\r\n Asks for user input. Then redirects to the appropriate function.\r\n \"\"\"\r\n n = (input(\"What would you like to do? Please input the correpsonding integer:\"))\r\n\r\n if n == str(1):\r\n self.file_import()\r\n elif n == str(2):\r\n self.view_data()\r\n elif n == str(3):\r\n self.analysis()\r\n elif n == str(4):\r\n self.save()\r\n elif n == str('q'):\r\n quit()\r\n else:\r\n raise InputError(\"Please input a valid digit or 'q'\")", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values\n # You may change the menu if you'd like to add an experimental method\n menu = {\"n\": (\"Navigate forward\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"c\": (\"Calibrate\", self.calibrate),\n \"t\": (\"test restore\", self.calibrate),\n \"s\": (\"Check status\", self.status),\n \"q\": (\"Quit\", quit_now)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = raw_input(\"Your selection: \")\n # activate the item selected\n menu.get(ans, [None, error])[1]()", "def act(self, action):\n action_name = action.op\n args = action.args\n list_action = first(a for a in self.actions if a.name == action_name)\n if list_action is None:\n raise Exception(\"Action '{}' not found\".format(action_name))\n if not list_action.check_precond(self.kb, args):\n raise Exception(\"Action '{}' pre-conditions not satisfied\".format(action))\n list_action(self.kb, args)", "def process_user_choice():\n msg = \"\\033[1m\" + \"选择一个小屋进去,请输入 (1-5): \" + \"\\033[0m\"\n user_choice = input(\"\\n\" + msg)\n idx = int(user_choice)\n return idx", "def main_menu():\n\n # Determines action taken by application.\n action = input(\"Would you like to check your balance (b), make a deposit (d) or make a withdrawal (w)? Enter b, d, or w. \\n\")\n return action", "def choose_from(self,index_list):\r\n\r\n if len(index_list)==1:\r\n return index_list[0]\r\n\r\n if len(index_list)==2:\r\n while True:\r\n imp_temp = input('< >')\r\n if imp_temp in ['<','>',EMPTYCHAR]:\r\n return index_list[{'<':0,\r\n '>':1,\r\n EMPTYCHAR:1}[imp_temp]]\r\n\r\n showtext = []\r\n for counter,index_temp in enumerate(index_list):\r\n if index_temp in self.indexes():\r\n showtext.append(str(counter+1)\\\r\n +' '+str(index_temp)+' : '\\\r\n +abridge(nformat\\\r\n .format_keys(self.get_keys_from_note(index_temp))))\r\n display.noteprint(('/C/NOTES',EOL.join(showtext)))\r\n\r\n choice_temp = input('?')\r\n if choice_temp.isnumeric() \\\r\n and 1 <= int(choice_temp) <= len(index_list):\r\n return index_list[int(choice_temp)-1]\r\n return index_list[-1]", "def action(self):\n pass", "def action(self):\n pass", "def get_action(self, action):\n actions = {\n self.GO_ACTION: self.go,\n self.CLICK_ACTION: self.click,\n self.CHECK_ACTION: self.check,\n self.WAIT_ACTION: self.wait,\n self.FILL_FORM_ACTION: self.fill,\n self.SELECT_FORM_ACTION: self.select\n }\n try:\n return actions[action]\n except KeyError:\n raise Exception('{0} is not a valid action, the valid actions are: {1}'.format(action,\n \", \".join(actions.keys())))", "def get_action(self, state):\n time.sleep(2.0)\n return random.choice(state.get_legal_actions(self.index))", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n\n action_choose = uct_for_pacman(gameState, self)\n if action_choose not in actions:\n return random.choice(actions)\n print(\"此步来自UCT\")\n self.action_last = action_choose\n return action_choose", "def getAction1(self, state):\n util.raiseNotDefined()", "def _interpret_action(self, action_idx: int, team: int):\n if action_idx < 4:\n return action_idx\n\n noops = self._allow_noops[team]\n diags = self._allow_diagonals[team]\n assert noops or diags\n\n if noops and not diags:\n assert action_idx == 4\n return NOOP\n\n if not noops and diags:\n assert action_idx < 8\n return action_idx\n\n if noops and diags:\n assert action_idx < 9\n return action_idx", "def get_action_from_user(self) -> ActionType:\n if self.wait_for_explicit_human_action:\n while len(self.renderer.pressed_keys) == 0:\n self.renderer.get_events()\n\n if self.key_to_action == {}:\n # the keys are the numbers on the keyboard corresponding to the action index\n if len(self.renderer.pressed_keys) > 0:\n action_idx = self.renderer.pressed_keys[0] - ord(\"1\")\n if 0 <= action_idx < self.action_space.shape[0]:\n return action_idx\n else:\n # the keys are mapped through the environment to more intuitive keyboard keys\n # key = tuple(self.renderer.pressed_keys)\n # for key in self.renderer.pressed_keys:\n for env_keys in self.key_to_action.keys():\n if set(env_keys) == set(self.renderer.pressed_keys):\n return self.action_space.actions[self.key_to_action[env_keys]]\n\n # return the default action 0 so that the environment will continue running\n return self.action_space.default_action", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def user_action():\n\t### This is the function that takes and executes the users choices\n\twhile battle_on:\n\t\tchoosing = True\n\t\twhile choosing:\n\t\t\tmenu(\"general\")\n\t\t\tanswer()\n\t\t\tif ans == \"attack\":\n\t\t\t\tattack(my_pokemon, enemy)\n\t\t\t\tcalc_hp(enemy, \"attack\")\n\t\t\t\tshow_hp(enemy)\n\t\t\t\tprint \" \"\n\t\t\t\treturn\n\t\t\telif ans == \"flee\":\n\t\t\t\tchance = uniform(0, 100)\n\t\t\t\tif chance > 90:\n\t\t\t\t\twin(\"flee\")\n\t\t\t\telse:\n\t\t\t\t\tprint \"You failed to escape!\"\n\t\t\t\t\treturn\n\t\t\telif ans == \"potion\":\n\t\t\t\tuse_potion(my_pokemon)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint \"i dont know what you mean :)\"\n\t\t\t\tprint \"lets try again!\"\n\t\t\t\tchoosing = True", "def do_action_for_input(self, user_input):\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()", "def _action(self):\n pass", "def action_type(self):", "def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")", "def select_action(self, state,allowExploration=True):\n actions = self.environment.all_actions(forExploration=allowExploration)\n #Returns any action\n return actions[0]", "def chooseAction(self, gameState):\n\n return \"Stop\"\n\n actions = gameState.getLegalActions(self.index)\n\n return random.choice(actions)", "def getAction(self, gameState):", "def choose_action(self, action, params, event):\n if REQ_AND_PARAMS.get(action) != len(params):\n print(\"invalid request\")\n self.send_message(INVALID_REQ)\n return\n if action == STREAM_ACTION:\n path = choose_song(params[0])\n self.stream_song(path, event)\n elif action == LOGIN_ACTION:\n self.login_check(params[0], params[1])\n elif action == ADD_ACTION:\n self.add_check(params[0], params[1])\n elif action == DOWNLOAD_ACTION:\n self.download_song(params[0])\n elif action == PAUSE_ACTION:\n self.pause = True\n event.clear()\n elif action == UN_PAUSE_ACTION:\n self.pause = False\n event.set()\n elif action == FORWARD_ACTION:\n self.skip_q.put(FORWARD_ACTION)\n elif action == BACKWARD_ACTION:\n self.skip_q.put(BACKWARD_ACTION)\n elif action == STOP:\n self.skip_q.put(STOP)\n elif action == CREATE_PL_ACTION:\n self.create_new_pl(params)\n elif action == GET_ALL_SONGS:\n self.get_all_songs()\n elif action == GET_ALL_PLS_OF_USER:\n self.get_all_pls_of_user(params[0])\n elif action == GET_SONGS_IN_PL:\n self.get_all_songs_in_pl(params[0])\n elif action == REMOVE_SONG_FROM_PL:\n self.remove_song_from_pl(params[0], params[1])\n elif action == ADD_SONG_TO_PL:\n self.add_song_to_pl(params[0], params[1])\n elif action == UNLINK_PLAYLIST:\n self.delete_pl(params[0], params[1])", "def actions() -> None:\n pass", "def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)", "def choose_action(self, observation):\n observation = observation[np.newaxis, :]\n \n prob_weights = self.sess.run(\n self.all_act_prob,\n feed_dict={self.tf_obs: observation})\n\n action = npr.choice(range(prob_weights.shape[1]), p=prob_weights.ravel())\n\n return action", "def act(self, infoset):\n assert self.action in infoset.legal_actions\n return self.action", "def next_action():\n while True:\n next = input('Enter Q to quit programme. M to return to main menu \\n')\n if next.lower() == 'q':\n logout()\n elif next.lower() == 'm':\n hr_main()\n is_invalid()", "def choose_action():\n\n def find_suma(a,b):\n return a+b\n\n def find_rizn(a,b):\n return a - b \n \n def find_ostacha(a,b):\n return a % b\n\n def find_stepin(a,b):\n return a ** b\n \n def find_sqrt(a):\n return math.sqrt(a)\n \n def find_factorial(a):\n return math.factorial(a) \n\n def find_dobutok(a,b):\n \n return a * b\n \n def find_chastku(a,b):\n if b == 0 :\n print('Dilennia na \"0\" nemozluve!')\n else:\n return a/b\n \n if x == '+':\n res = find_suma(a,b)\n return res\n \n elif x == '-':\n res = find_rizn(a,b)\n return res\n\n elif x == '*':\n res = find_dobutok(a,b)\n return res\n\n elif x == '/':\n res = find_chastku(a,b)\n return res\n\n elif x == '%':\n res = find_ostacha(a,b)\n return res\n\n elif x == '&':\n res = find_sqrt(a)\n return res\n\n elif x == '!':\n res = find_factorial(a)\n return res\n\n elif x == '^':\n res = find_stepin(a,b)\n return res", "def do_step(self, action_ind):\n action_ind = action_ind.item()\n if len(self.last_actions) < self.last_action_capacity:\n self.last_actions.append(action_ind)\n self.last_actions[self.last_action_ind] = action_ind\n self.last_action_ind = (\n self.last_action_ind + 1) % self.last_action_capacity\n robot_max_vel = self.sim.getAgentMaxSpeed(self.robot_num)\n # Decode the action selection:\n # 0 => do nothing\n # 1-16 => set velocity to `robot_max_vel/2` at angle\n # `(action_ind-1) * 2pi/16`\n # 17-32 => velocity to `robot_max_vel` at angle\n # `(action_ind-17) * 2pi/16`\n # 33-34 => change heading by\n # else => do nothing\n vel = (0, 0)\n angle = self.headings[self.robot_num]\n if 1 <= action_ind <= 16:\n angle += (action_ind - 1)*(math.pi / 8)\n vel = (\n (robot_max_vel/2) * math.cos(angle),\n (robot_max_vel/2) * math.sin(angle)\n )\n elif 17 <= action_ind <= 32:\n angle += (action_ind - 17)*(math.pi / 8)\n vel = (\n robot_max_vel * math.cos(angle),\n robot_max_vel * math.sin(angle)\n )\n elif action_ind == 33:\n self.headings[self.robot_num] += self.rot_speed\n elif action_ind == 34:\n self.headings[self.robot_num] -= self.rot_speed\n self.headings[self.robot_num] = normalize(self.headings[\n self.robot_num])\n # Set the robot's goal given the action that was selected\n ts = self.sim.getTimeStep()\n pos = self.sim.getAgentPosition(self.robot_num)\n self.goals[self.robot_num] = (\n pos[0] + vel[0] * ts, pos[1] + vel[1] * ts\n )\n self.advance_simulation()", "def obtain_action(self):\r\n\t\treturn", "def get_action(self, state):\r\n if len (state.actions()) == 1:\r\n # dbstate = DebugState.from_state(state)\r\n # print (dbstate)\r\n self.queue.put(state.actions()[0])\r\n return\r\n if state.ply_count < 2:\r\n action = random.choice(state.actions())\r\n else:\r\n action = self.uct_search(state).action\r\n # dbstate = DebugState.from_state(state)\r\n # print (dbstate)\r\n if action is None:\r\n print(\"Incorrect action\")\r\n action = random.choice(state.actions())\r\n self.queue.put(action)", "def choose_action(self, state):\n if random.random() < self.explore:\n action = random.choice(list(self.Q[state].keys()))\n else:\n action = self._best_action(state)\n\n # learn from the previous action, if there was one\n self._learn(state)\n\n # remember this state and action\n self.prev = (state, action)\n\n return action", "def get_user_input():\n while True:\n try:\n user_action = int(input(\"Enter (1) Search Books (2) Move Books (3) Exit\\n\"))\n for index, value in enumerate(USER_INPUT_SERVICE(), 1):\n if user_action == index:\n return value\n if user_action > 3:\n raise IndexError(\"Please enter 1, 2, or 3.\")\n except ValueError:\n print(\"Please enter the number.\")", "def _do_studio_prompt_action(intent, action):\r\n assert intent in ['warning', 'error', 'confirmation', 'announcement',\r\n 'step-required', 'help', 'mini']\r\n assert action in ['primary', 'secondary']\r\n\r\n world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))\r\n\r\n action_css = 'li.nav-item > a.action-{}'.format(action)\r\n world.trigger_event(action_css, event='focus')\r\n world.browser.execute_script(\"$('{}').click()\".format(action_css))\r\n\r\n world.wait_for_ajax_complete()\r\n world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))", "def act(self):\n self.features = self.next_features\n self.choose_random = np.random.choice(2,p=(1-self.epsilon,self.epsilon)) # Chooses whether to explore or exploit with probability 1-self.epsilon\n # Selects the best action index in current state\n if self.choose_random:\n self.chosenA = np.random.choice(4)\n else:\n self.chosenA = self.argmaxQsa(self.features)\n # Records reward for printing and performs action\n self.action = self.idx2act[self.chosenA]\n # Execute the action and get the received reward signal\n self.reward = self.move(self.action)\n self.total_reward += self.reward\n # IMPORTANT NOTE:\n # 'action' must be one of the values in the actions set,\n # i.e. Action.LEFT, Action.RIGHT, Action.ACCELERATE or Action.BRAKE\n # Do not use plain integers between 0 - 3 as it will not work", "def get_action():\n print(\"What do you do next?\")\n print(\" m) move\")\n print(\" a) fire an arrow\")\n action = input(\"> \")\n if action == \"m\" or action == \"a\":\n return action\n else:\n print(action + \"?\")\n print(\"That's not an action that I know about\")\n return False", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def action(self, gstate, actions=None):\n raise NotImplementedError", "def pickUpActionColour(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n\n try:\n action = kwargs[\"fname\"]\n except:\n rospy.logwarn(\"Could not get the current action selection\")\n\n# position = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colours = self.locator.tetris_blocks.keys() \n entries = {}\n\n for block in colours:\n entries[str(block)] = [self.pickUpAction, pose_offset]\n entries['any'] = [self.pickUpActionAny, pose_offset]\n self.mm.addGenericMenu(\"colourMenu\",self.mm.cur_page,\"Select the block colour for %s\" %action, entries)\n self.mm.loadMenu(\"colourMenu\")" ]
[ "0.77464926", "0.7433682", "0.72635955", "0.7202781", "0.71235913", "0.70796835", "0.70719695", "0.70426613", "0.69958633", "0.69911724", "0.69026655", "0.69006485", "0.68855083", "0.6853275", "0.68034893", "0.67918736", "0.67697215", "0.6714882", "0.6669915", "0.66696125", "0.66655326", "0.6625247", "0.6599461", "0.65930176", "0.65028065", "0.64664567", "0.64221084", "0.64196086", "0.6401485", "0.6369605", "0.63613856", "0.6309435", "0.6309435", "0.63069576", "0.6291224", "0.6288939", "0.6262262", "0.6235667", "0.6235667", "0.6235667", "0.6235667", "0.6235667", "0.6235667", "0.6235667", "0.6230544", "0.6227225", "0.6222436", "0.621997", "0.61865854", "0.61865854", "0.61763024", "0.61706793", "0.6168011", "0.61619747", "0.614293", "0.614293", "0.61153156", "0.6110383", "0.61053693", "0.61021656", "0.6095632", "0.6084927", "0.60809577", "0.6075898", "0.6070545", "0.6070545", "0.60676676", "0.6061477", "0.60606414", "0.6055384", "0.60529196", "0.6036755", "0.6034004", "0.6034004", "0.6022596", "0.6013391", "0.6005518", "0.5996795", "0.59863317", "0.59859383", "0.5978624", "0.5974664", "0.5974108", "0.5973245", "0.5963301", "0.5954987", "0.5952504", "0.595194", "0.5950693", "0.5949313", "0.59489995", "0.5934575", "0.59328234", "0.5925182", "0.59222776", "0.59180623", "0.59132755", "0.5900468", "0.5899256", "0.58844686" ]
0.77177477
1
Perform the desired user action
def perform_user_action(action_index): if action_index is not None: print() action = available_actions[action_index] if current_state in action["valid_states"]: function_to_run = available_actions[action_index]["function"] if function_to_run is not None: function_to_run() else: print("Internal error: NOT IMPLEMENTED (no function assigned for the action)!") else: print("This function is not allowed in the current system state (%s)" % current_state) else: print("Invalid input, please choose a valid action") print() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_action(self, action_data):\n pass", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def take_action(self, *args, **kwargs):\r\n pass", "def do_action_for_input(self, user_input):\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()", "def user_action():\n\t### This is the function that takes and executes the users choices\n\twhile battle_on:\n\t\tchoosing = True\n\t\twhile choosing:\n\t\t\tmenu(\"general\")\n\t\t\tanswer()\n\t\t\tif ans == \"attack\":\n\t\t\t\tattack(my_pokemon, enemy)\n\t\t\t\tcalc_hp(enemy, \"attack\")\n\t\t\t\tshow_hp(enemy)\n\t\t\t\tprint \" \"\n\t\t\t\treturn\n\t\t\telif ans == \"flee\":\n\t\t\t\tchance = uniform(0, 100)\n\t\t\t\tif chance > 90:\n\t\t\t\t\twin(\"flee\")\n\t\t\t\telse:\n\t\t\t\t\tprint \"You failed to escape!\"\n\t\t\t\t\treturn\n\t\t\telif ans == \"potion\":\n\t\t\t\tuse_potion(my_pokemon)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint \"i dont know what you mean :)\"\n\t\t\t\tprint \"lets try again!\"\n\t\t\t\tchoosing = True", "def perform_action(self, action_id: int) -> None:\r\n ...", "def take_action(self, action):\n\t\traise NotImplementedError", "def perform_actual_action(self, action):\n self.game.perform_action(action)", "async def perform_action(self) -> None:", "def do_action(self):\n func = self._get_action_func()\n func(self)", "def perform_action(self, action):\n method_name = action.text().lower()\n method_name = method_name + \"_action\"\n action_method = getattr(self, method_name)\n action_method()", "def do_action(self, action, a=None, b=None):\n pass", "def call_action(self, action):\n pass", "def perform_step(self, action):\n pass", "def action(self):\n pass", "def action(self):\n pass", "def action_run(self):\n pass", "def choose_action(self):\r\n pass", "def _action(self):\n pass", "def act(self):\n pass", "def run(self):\n\n self._action.execute()", "def execute(self, user):\n pass", "def execute_action(self, agent, action):\n abstract", "def act(self) -> None:\n pass", "def processUserAction(self, user_action):\n self.history[\"user_action\"] = user_action\n dialogue_act = user_action[\"action\"]\n self.current_function = None\n self.dont_know = False\n\n\n def provideQuery():\n self.query = user_action[\"query\"]\n self.query_vector = self.dataset.getVectorForQuery(self.query)\n self.dataset.updateResults(query = self.query)\n self.result_index=0\n self.list_current = False\n return user_action\n\n def provideKw():\n self.keywords[\"provided\"].add(user_action[\"keyword\"])\n self.keywords[\"rejected\"].discard(user_action[\"keyword\"])\n self.dataset.updateResults(keywords = self.keywords)\n self.list_current = False\n self.result_index=0\n return user_action\n\n def rejectKws():\n self.keywords[\"provided\"].difference_update(user_action[\"keywords\"])\n self.keywords[\"rejected\"].update(user_action[\"keywords\"])\n self.dataset.updateResults(keywords = self.keywords)\n self.list_current = False\n return user_action\n\n def rejectFunctions():\n self.functions_rejected.update(user_action[\"functions\"])\n self.dataset.updateResults(not_functions = self.functions_rejected)\n self.list_current = False\n return user_action\n\n def eliSugg():\n return user_action\n\n def eliInfo():\n self.current_function = user_action[\"function\"]\n return user_action\n\n def eliInfoAll():\n self.current_function = user_action[\"function\"]\n return user_action\n\n def changePage():\n return user_action\n\n def dontKnow():\n self.dont_know = True\n\n\n switcher = {\n 'provide-query':provideQuery,\n 'provide-kw':provideKw,\n 'reject-kws':rejectKws,\n 'reject-functions':rejectFunctions,\n 'eli-sugg':eliSugg,\n 'eli-sugg-all':eliSugg,\n 'eli-info':eliInfo,\n 'eli-info-all':eliInfo,\n 'change-page':changePage,\n 'dont-know':dontKnow\n }\n\n if dialogue_act in switcher:\n return switcher[dialogue_act]()\n else: return user_action", "def execute_action(self, agent, action):\n raise NotImplementedError", "def execute_action(self, agent, action):\n raise NotImplementedError", "def perform_action(self, action):\n if action[0] == 10: # Query\n return self.process_query(action)\n elif action[0] == 20: # Look at a document\n return self.examine_document(action)", "def test_user_actions_post(self):\n pass", "def _execute_action(self, action):\n if action['type'] == 'http':\n self._execute_action_http(action)\n elif action['type'] == 'mail':\n self._execute_action_mail(action)\n elif action['type'] == 'chat':\n pass\n elif action['type'] == 'printer':\n self._execute_action_printer(action)\n elif action['type'] == 'smb':\n self._execute_action_smb(action)\n\n # Wait for a randomized interval.\n time.sleep(random.randint(1, 5))", "def _run_actions(self):\n\n if \"install-bento\" in self.actions:\n self._do_action_bento_setup()\n\n if \"create-tables\" in self.actions:\n self._do_action_tables_create()\n\n if \"import-ratings\" in self.actions:\n self._do_action_import_ratings()\n\n if \"import-user-info\" in self.actions:\n self._do_action_import_user_info()\n\n if \"import-movie-info\" in self.actions:\n self._do_action_import_movie_info()\n\n if \"train-item-item-cf\" in self.actions:\n self._do_action_train()\n\n if \"register-freshener\" in self.actions:\n self._do_action_register_freshener()", "def do_action(self, action, **kwargs):\r\n print(action)\r\n action_method = getattr(self, action._method.__name__)\r\n if action_method:\r\n action_method(**kwargs)", "def _do_action(self, handler: 'Handler') -> CanDo:\n pass", "def chooseAction(self):\n print \"nothing\"\n pass", "def perform ( self, action, action_event = None ):\r\n getattr( self.editor, action.action )()", "def action_done(self):", "def process_action(*args, **kwargs):\n raise NotImplementedError()", "def act(self):\n raise NotImplementedError", "def select_action(self):\n pass", "def act(self, x):\n return self.action", "def _act(self, action):\n self._set_action(action)", "def take_action(self, state):", "def call_actions_for_users():\n cur.execute(\"SELECT * FROM LoginStatus WHERE loggedin = true;\")\n logged_in_users = cur.fetchall()\n logged_no = len(logged_in_users)\n assert(logged_no <= 1)\n if args.logout:\n if logged_no != 1:\n print \"No one is logged in! Can't logout!\"\n exit()\n logout(logged_in_users[0][0])\n exit()\n if logged_no == 1:\n if args.signup:\n print \"User already logged in. Please logout and then signup.\"\n exit()\n username = welcome_user(logged_in_users[0][0])\n else:\n if args.signup:\n username = signup_and_login()\n else:\n username = login_user()\n if username == None:\n print \"Exiting. If you wish to sign up use -su command line option.\"\n exit()\n if args.deleteacc:\n print \"Are you sure you want to delete your account(y/n).(All your info including all your activities will be permanently lost.):\",\n reply = get_reply([\"y\",\"n\"])\n if reply == \"y\":\n del_user(username)\n exit()\n else:\n print \"Be careful next time! Bye\"\n exit()\n return username", "def actions():\n pass", "def __handle_act_as_user(self, conduit_proxy_data):\n act_as_user = None\n if conduit_proxy_data:\n # Note that we may throw here if conduit_proxy_data is not a dict,\n # this is ok because the BaseHTTPRequestHandler will handle it for\n # us.\n # TODO: check assumption that it handles this for us\n act_as_user = conduit_proxy_data.get('actAsUser', None)\n if act_as_user:\n self.__conduit.set_act_as_user(act_as_user)\n else:\n if self.__conduit.get_act_as_user():\n self.__conduit.clear_act_as_user()", "def choose_action(self, *args, **kwargs):\n return NotImplementedError", "def execute_action(self, action, values=None):\r\n raise NotImplementedError('Subclass must override execute_action method')", "def take_action(self, action):\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def __call__(self,action=None):\n raise NYI", "def perform(self):\n pass", "def act(self, action):\n action_name = action.op\n args = action.args\n list_action = first(a for a in self.actions if a.name == action_name)\n if list_action is None:\n raise Exception(\"Action '{}' not found\".format(action_name))\n if not list_action.check_precond(self.kb, args):\n raise Exception(\"Action '{}' pre-conditions not satisfied\".format(action))\n list_action(self.kb, args)", "def action(self):\n current_action = self.get_script_entry()\n if current_action[\"type\"] == \"request\":\n self._handle_request(current_action)\n elif current_action[\"type\"] == \"event\":\n self._handle_event(current_action)\n elif current_action[\"type\"] == \"response\":\n self._handle_response(current_action)\n else:\n raise AttributeError(\"Wrong action type!\" +\n \" Scenario: \" + str(self._loaded_sc[\"name\"]) +\n \" Action: \" + str(self._scenario_script_cur))", "def obtain_action(self):\r\n\t\treturn", "def executeAction(self,**kwargs):\n try:\n action = kwargs[\"fname\"]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n entries = {}\n pose_offset = 'empty'\n if action in self.bl.getAllSavedActions():\n pose_offset = self.bl.baxter_actions[str(action)]['joint_position']\n entries['Show action only'] = [self.moveBy, pose_offset]\n entries['Show pick up action'] = [self.pickUpActionColour, pose_offset]\n# entries['Add condition'] = self.addEmptyCondition\n# entries['Rename '+str(action)] = [self.renameAction, action]\n entries['Learn '+str(action)] = getattr(self.bl, 'demoAction')\n\n self.mm.addGenericMenu(\"learnMenu\", self.mm.cur_page,\"Action saved as: %s\" % (str(pose_offset)),entries)\n self.mm.loadMenu(\"learnMenu\")", "def post(self, request, *args, **kwargs):\n getattr(self, kwargs['action'])()\n return HttpResponse()", "def _take_action(self, action_idx: ActionType) -> None:\n raise NotImplementedError(\"\")", "def action(self,item):\r\n pass", "def actions() -> None:\n pass", "def proceed_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # Store user entries into computer object.\n self._store_user_entries()\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # Set proceed flag\n self.proceed = True\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # Destroy the view.\n self.entry_view.destroy()", "def dispatch(self, request, *args, **kwargs):\n messages.success(request, GOODBYE_MSG.format(request.user.username))\n return super().dispatch(request, *args, **kwargs)", "def performCommand(self, game, command):\r\n game.currentTurn.perform(command)", "def _execute_action_chat(self, action):\n raise NotImplementedError(\"This function is not implemented yet.\")", "def perform_action(self, current_player, action):\n self.inputs_[action] = current_player\n if Config.USER['debug']['enabled']:\n print \"---\"\n print str(self.inputs_[0:3])\n print str(self.inputs_[3:6])\n print str(self.inputs_[6:9])", "def get_player_action(self) -> None:\n print(f\"\\nYou have: {self.user.hand.cards} totalling to {self.user.hand.value}\")\n while not self.get_game_ending_hands():\n action = self.validate_input(\"Do you want to 1. hit or 2. stand?\", ('1', '2'))\n if action == '1':\n self.action_hit()\n elif action == '2':\n self.action_stand()\n break", "def choose_action(self, obs, **kwargs):\n pass", "def execute(self):\r\n for pilot in self._game.pilots_by_skill():\r\n pilot.active = True\r\n\r\n # Apply this pilot's maneuver\r\n pilot.chosen_maneuver.apply(pilot)\r\n\r\n # Choose an action to perform\r\n if pilot.can_perform_action():\r\n chosen_action = self._game.player(pilot.faction).choose_action(pilot)\r\n\r\n # TODO: Do something with this\r\n\r\n pilot.active = False", "def action_done(self):\n pass", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def action():\n if str(value).lower() == \"taxi\":\n self.taxi()\n elif str(value).lower() == \"fly\":\n self.pre_fly()\n self.fly()\n self.post_fly()\n elif str(value).lower() == \"return\":\n self.motor.move(self.return_position)", "def do_action(self, _action: action.Action) -> None:\n if isinstance(_action, action.Attack):\n self.do_attack_action(_action)\n elif isinstance(_action, action.Move):\n self.do_move_action(_action)\n else:\n raise NotImplementedError(f\"Action {type(_action)} not implemented!\")", "def action(self, target, text):\n raise NotImplementedError", "def onActionTaken(self, agent):\n\n pass", "def act(self, infoset):\n assert self.action in infoset.legal_actions\n return self.action", "def step(self, action):", "def main():\n user_interaction()", "def takeAction(self, action):\n return self.env.step(action)", "def execute_action(self, agent, action):\n agent.bump = False\n agent.performance_measure -= 1\n \n if action == 'TurnRight':\n agent.heading = self.turn_heading(agent.heading, -1)\n elif action == 'TurnLeft':\n agent.heading = self.turn_heading(agent.heading, +1)\n elif action == 'Forward':\n self.move_to(agent, vector_add(self.heading_to_vector(agent.heading),\n agent.location))\n elif action == 'Grab':\n if self.some_things_at(agent.location, tclass=Gold):\n try:\n gold = self.list_things_at(agent.location, tclass=Gold)[0]\n agent.has_gold = True\n self.delete_thing(gold)\n except:\n print \"Error: Gold should be here, but couldn't find it!\"\n print 'All things:', self.list_things_at(agent.location)\n print 'Gold?:', self.list_things_at(agent.location, tclass=Gold)\n sys.exit(-1)\n\n elif action == 'Release':\n if agent.location == self.entrance:\n if agent.has_gold:\n agent.performance_measure += 1000\n self.done = True\n elif action == 'Shoot':\n if agent.has_arrow:\n agent.has_arrow = False\n agent.performance_measure -= 10\n self.shoot_arrow(agent)\n elif action == 'Stop':\n self.done = True\n \n print '\\nCurrent Location: ', agent.location\n print 'Heading: ', self.heading_to_str(agent.heading)\n print 'Reminder- Start Location:', self.entrance\n print ''\n print 'Percepts:'", "def message_user_results(self, request, successes, failures, action):\n\n self.message_user_success(request, successes, action)\n self.message_user_failure(request, failures, action)", "def get_action(self, context):\n pass", "def click(cls, user, link):\r\n pass", "def perform(self):\n return", "def perform(self):\n return", "def _do_studio_prompt_action(intent, action):\r\n assert intent in ['warning', 'error', 'confirmation', 'announcement',\r\n 'step-required', 'help', 'mini']\r\n assert action in ['primary', 'secondary']\r\n\r\n world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))\r\n\r\n action_css = 'li.nav-item > a.action-{}'.format(action)\r\n world.trigger_event(action_css, event='focus')\r\n world.browser.execute_script(\"$('{}').click()\".format(action_css))\r\n\r\n world.wait_for_ajax_complete()\r\n world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))", "def test_action_called(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n req.user = Mock()\n\n self.view(req)\n\n model_get = self.mock_model._base_manager.get\n model_get.assert_called_with(pk=\"3\")\n\n instance = model_get.return_value\n instance.doit.assert_called_with(user=req.user)", "def dispatch(self, request, *args, **kwargs):\n if self.same_user_or_shiftleader(request.user):\n return super(UpdateRun, self).dispatch(request, *args, **kwargs)\n return redirect_to_login(\n request.get_full_path(), login_url=reverse(\"admin:login\")\n )", "def change(login):\n try:\n manager = Actions()\n manager.change_user(login)\n except Exception as e:\n print(e)", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def execute(UserMessage,player):\n if UserMessage['Action'] == \"Kill\": #This list represents the commands file I had before, neater that way?\n SendKill(UserMessage,player)\n if UserMessage['Action'] == 'Vote1':\n Vote1(UserMessage)\n if UserMessage['Action'] == 'Vote2':\n Vote2(UserMessage)\n if UserMessage['Action'] == \"DataPLZ\":\n DataPLZ(UserMessage)\n pass", "def performAction(self, action):\n self.action = action\n self.t += self.dt \n self.step()", "def result(self, state, action):\n print \"Ashish\"\n return 1", "def action_hit(self) -> None:\n print(self.deal_card(self.user))", "def process(self, do_print=True):\n\n target = self.result()\n # Run own action here\n\n self.reset()\n pass", "def home_edituser():\n\tpass", "def choose_action(self, board):\n raise NotImplementedError", "def apply_action(self, cmd_name, *args):\n\n action = Action(self.tahoma_device.url)\n action.add_command(cmd_name, *args)\n self.controller.apply_actions(\"HomeAssistant\", [action])" ]
[ "0.77129984", "0.7649776", "0.7649776", "0.7479478", "0.7274967", "0.71987295", "0.7164105", "0.70513093", "0.70129836", "0.69782907", "0.69473976", "0.6920116", "0.6919698", "0.69076014", "0.6898974", "0.6837431", "0.6837431", "0.6832801", "0.6799796", "0.6762275", "0.67328244", "0.6730584", "0.6677561", "0.66706973", "0.667049", "0.66618", "0.66068417", "0.66068417", "0.6525126", "0.6504823", "0.6479538", "0.64371043", "0.6420156", "0.64171624", "0.6400158", "0.6390543", "0.63800484", "0.637038", "0.63393396", "0.63048995", "0.6302198", "0.62712914", "0.6249768", "0.6213721", "0.61948097", "0.61750275", "0.6169205", "0.6163278", "0.6135676", "0.61172646", "0.61172646", "0.61172646", "0.61172646", "0.61025935", "0.608915", "0.60876703", "0.608469", "0.6080374", "0.6064903", "0.60645103", "0.60420734", "0.60417664", "0.6036055", "0.60293204", "0.6028841", "0.60248125", "0.60072964", "0.6005379", "0.59947366", "0.59939826", "0.59910125", "0.5982289", "0.5981231", "0.59703374", "0.5958173", "0.5947169", "0.5946006", "0.59277505", "0.5916075", "0.5911092", "0.5903496", "0.5899864", "0.58905923", "0.58894974", "0.58894515", "0.58807737", "0.58807737", "0.5880525", "0.5880129", "0.58770615", "0.5876362", "0.58718", "0.58713466", "0.5869024", "0.5862046", "0.5846907", "0.58464456", "0.58436054", "0.58394307", "0.5835454" ]
0.7465346
4
Expects a `config` with the settings found in pertestcoverage/configs/config_fixed_by_commit_rawdata.yml Throws errors if something is missing, all the settings are listed at the top of the script.
def run(args=None, config=None): if args: parser = AnalysisParser('config') args = parser.parse_analysis_args(args) config = args.config if not config: raise Exception("Missing `config` dict argument.") numpatches = config['numpatches'] changesets_list = config['changesets'] outputdir = config['outputdir'] analyze_all = config['analyze_all'] if 'analyze_all' in config else False mozcentral_path = config['mozcentral_path'] if 'mozcentral_path' in config else None runname = config['runname'] if 'runname' in config else None include_guaranteed = config['include_guaranteed'] if 'include_guaranteed' in config else False use_active_data = config['use_active_data'] if 'use_active_data' in config else False skip_py = config['skip_py'] if 'skip_py' in config else True suites_to_analyze = config['suites_to_analyze'] platforms_to_analyze = config['platforms_to_analyze'] from_date = config['from_date'] timestr = str(int(time.time())) custom_script = config['custom_scheduling'] custom_classname = config['custom_classname'] custom_class = import_class(custom_script, custom_classname) custom_class_obj = custom_class(config) failed_tests_query_json = { "from":"unittest", "where":{ "and":[ {"eq":{"repo.changeset.id12":None}}, {"eq":{"repo.branch.name":None}}, {"eq":{"task.state":"failed"}}, {"eq":{"result.ok":"false"}}, {"or":[ {"regex":{"job.type.name":".*%s.*" % suite}} for suite in suites_to_analyze ]}, {"or": [ {"regex":{"job.type.name":".*%s.*" % platform}} for platform in platforms_to_analyze ]}, ] }, "limit":100000, "select":[{"name":"test","value":"result.test"}] } log.info("Getting FBC entries...") changesets = get_fixed_by_commit_entries( localdata=not use_active_data, activedata=use_active_data, suites_to_analyze=suites_to_analyze, platforms_to_analyze=platforms_to_analyze, from_date=from_date, local_datasets_list=changesets_list, save_fbc_entries=outputdir ) # For each patch histogram1_datalist = [] tests_for_changeset = {} changesets_counts = {} count_changesets_processed = 0 all_changesets = [] for count, tp in enumerate(changesets): if count_changesets_processed >= numpatches: continue if len(tp) == 4: changeset, suite, repo, test_fixed = tp else: continue orig_test_fixed = test_fixed test_fixed = test_fixed.split('ini:')[-1] if 'mochitest' not in suite and 'xpcshell' not in suite: test_fixed = format_testname(test_fixed) changeset = changeset[:12] log.info("") log.info("On changeset " + "(" + str(count) + "): " + changeset) log.info("Running analysis: %s" % str(runname)) log.info("Test name: %s" % test_fixed) # Get patch currhg_analysisbranch = hg_branch(repo) files_url = HG_URL + currhg_analysisbranch + "json-info/" + changeset data = get_http_json(files_url) files_modified = data[changeset]['files'] orig_files_modified = files_modified.copy() # Get tests that use this patch failed_tests_query_json['where']['and'][0] = {"eq": {"repo.changeset.id12": changeset}} failed_tests_query_json['where']['and'][1] = {"eq": {"repo.branch.name": repo}} log.info("Checking for test failures...") all_tests = [] failed_tests = [] try: failed_tests = query_activedata(failed_tests_query_json) except Exception as e: log.info("Error running query: " + str(failed_tests_query_json)) all_failed_tests = [] if 'test' in failed_tests: all_failed_tests = [test for test in failed_tests['test']] if pattern_find(test_fixed, all_failed_tests): log.info("Test was not completely fixed by commit: " + str(test_fixed)) continue log.info("Test was truly fixed. Failed tests: " + str(all_failed_tests)) # Perform scheduling all_tests_not_run = [] returned_data = custom_class_obj.analyze_fbc_entry( (changeset, suite, repo, orig_test_fixed), test_fixed ) if 'skip' in returned_data and returned_data['skip']: continue if not returned_data['success']: all_tests_not_run.append(test_fixed) log.info("Number of tests: " + str(len(all_tests))) log.info("Number of failed tests: " + str(len([test_fixed]))) log.info("Number of files: " + str(len(files_modified))) log.info("Number of tests not scheduled by per-test: " + str(len(all_tests_not_run))) log.info("Tests not scheduled: \n" + str(all_tests_not_run)) cset_count = 1 if changeset not in changesets_counts: changesets_counts[changeset] = cset_count else: changesets_counts[changeset] += 1 cset_count = changesets_counts[changeset] changeset_name = changeset + "_" + str(cset_count) tests_for_changeset[changeset_name] = { 'patch-link': HG_URL + currhg_analysisbranch + "rev/" + changeset, 'numfiles': len(files_modified), 'numtests': len(all_tests), 'numtestsfailed': 1, 'numtestsnotrun': len(all_tests_not_run), 'files_modified': files_modified, 'suite': suite, 'runname': runname, 'orig-test-related': orig_test_fixed, 'test-related': test_fixed, 'testsnotrun': all_tests_not_run, } for entry in returned_data: tests_for_changeset[entry] = returned_data[entry] all_changesets.append(changeset) histogram1_datalist.append((1, 1-len(all_tests_not_run), changeset)) count_changesets_processed += 1 numchangesets = len(all_changesets) total_correct = sum([ 1 if not tests_for_changeset[cset + "_" + str(cset_count)]['testsnotrun'] else 0 for cset in all_changesets ]) log.info("Running success rate = {:3.2f}%".format(float((100 * (total_correct/numchangesets))))) log.info("") ## Save results (number, and all tests scheduled) if outputdir: log.info("\nSaving results to output directory: " + outputdir) timestr = str(int(time.time())) save_json(tests_for_changeset, outputdir, timestr + '_per_changeset_breakdown.json') f = plt.figure() numchangesets = len(all_changesets) total_correct = sum([ 1 if not tests_for_changeset[cset + "_1"]['testsnotrun'] else 0 for cset in all_changesets ]) total_incorrect = sum([ 1 if tests_for_changeset[cset + "_1"]['testsnotrun'] else 0 for cset in all_changesets ]) b2 = plt.pie( [ 100 * (total_correct/numchangesets), 100 * (total_no_coverage_data/numchangesets) ], colors=['green', 'red'], labels=[ 'Successfully scheduled', 'Not successfully scheduled' ], autopct='%1.1f%%' ) plt.legend() log.info("Completed analysis for run: %s" % str(runname)) log.info("Total number of changesets in pie chart: " + str(numchangesets)) log.info("Close figures to end analysis.") log.info("Changesets analyzed (use these in other analysis types if possible): \n" + str(all_changesets)) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_root_values_on_existing_file(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n fail_fast = true\n blabla = \"what\"\n something = true\n another_thing = \"yep\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - hooks:\n - id: whatever\n something: false\n another_thing: \"nope\"\n \"\"\"\n ).api_check_then_fix(\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 338,\n \" has missing values:\",\n \"\"\"\n blabla: what\n fail_fast: true\n \"\"\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \" has different values. Use this:\",\n \"\"\"\n another_thing: yep\n something: true\n \"\"\",\n ),\n )", "def check_config(config):\n pass", "def test_pre_commit_referenced_in_style(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n fail_fast = true\n \"\"\"\n ).pre_commit(\"\").api_check_then_fix(Fuss(False, PRE_COMMIT_CONFIG_YAML, 331, \" doesn't have the 'repos' root key\"))", "def test_pre_commit_has_no_configuration(tmp_path):\n ProjectMock(tmp_path).style(\"\").pre_commit(\"\").api_check_then_fix()", "def test_root_values_on_missing_file(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n bla_bla = \"oh yeah\"\n fail_fast = true\n whatever = \"1\"\n \"\"\"\n ).api_check_then_fix(\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 331,\n \" was not found. Create it with this content:\",\n \"\"\"\n bla_bla: oh yeah\n fail_fast: true\n whatever: '1'\n \"\"\",\n )\n )", "def test_missing_key_from_config_fails_with_error(self, custom_config):\n del custom_config['subject']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run({'commits': [{'message': 'xxxxx', 'sha': 'aa', 'url': ''}]})[\n 0\n ]\n\n assert result.success is False\n assert result.status is 'error'\n assert result.error_code is 'invalid_content'\n assert \"Missing key: 'stats'\" in result.details['message']", "def test_broken_config(broken_config):\n with pytest.raises(RuntimeError, match=\"Error reading config.yml\"):\n abcconfig.get_config(broken_config)", "def check_config(config):\n rq = {\"name\", \"description\", \"region\", \"user\", \"instance_type\",\n \"base_image\", \"uploads\", \"commands\"}\n diff = rq - set(config.keys())\n if diff:\n raise(BadConfigFile(\"Missing keys {} in config\".format(diff)))", "def check_config(cfg):", "def test_config(setup_debug, tmp_path):\n os.chdir(tmp_path)\n \n ssh_tunnels = SSHTunnels(users=[\"bbeeson\"])\n c0 = (TEST_DATA / \"config\").read_text()\n # run and add 'queen'\n c1 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n # run and do nothing\n c2 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n assert len(c1) > len(c0)\n assert len(c1) == len(c2)\n \n # c_ref = (TEST_DATA / \"test_ssh_config2\").read_text()\n # should have just added queen\n #assert c2 == c_ref", "def ConfigurationFixups(self, config):\n fixed_q_value = config.GetValue('fixed-q')\n if int(config.GetValue('gold-q')) > int(fixed_q_value):\n config = config.ChangeValue('gold-q', fixed_q_value)\n if int(config.GetValue('key-q')) > int(fixed_q_value):\n config = config.ChangeValue('key-q', fixed_q_value)\n\n return config", "def test_missing_hooks_in_repo(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n repo = \"whatever\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: whatever\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 334, \": missing 'hooks' in repo 'whatever'\")\n )", "def test_expected_config(expectedconfig):\n expected = expectedconfig.read_text()\n config = CONFIGSDIR / expectedconfig.name\n\n assert dumpconfig(config) == expected", "def test_config_from_file(self):\n parser = Parser()\n args = parser.parser.parse_args(['-c'])\n if args.config:\n config = Config()\n config.config_file = \"./config\"\n config.config = test_config\n config.config_from_file()\n self.assertTrue(config.config)\n os.remove(config.config_file)", "def check_config():\n\n if not config_instance:\n LOG.error(\"Failed to load the config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"CONFIG_VERSION\"):\n LOG.warning( \"The config file does not specify CONFIG_VERSION! I will \"\n \"try to continue anyway, but this field is recommended to allow \"\n \"some internal tests to work. I will assume the value '(1,0)'!\" )\n config_instance.CONFIG_VERSION = (1, 0)\n\n major, minor = config_instance.CONFIG_VERSION\n expected_major, expected_minor = EXPECTED_CONFIG_VERSION\n\n if major < expected_major:\n LOG.critical(\"The config system has undergone a major change! \"\n \"I cannot continue without an upgrade!\")\n sys.exit(9)\n\n if minor < expected_minor:\n LOG.warning(\"The config system has undergone a minor change! \"\n \"It should work, but you still should review the docs!\")\n\n if major == expected_major and minor == expected_minor:\n LOG.debug( \"Config version OK!\" )\n\n if not hasattr(config_instance, \"GENERATORS\"):\n LOG.critical(\"Variable 'GENERATORS' not found in config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"TARGETS\"):\n LOG.critical(\"Variable 'TARGETS' not found in config!\")\n sys.exit(9)", "def test_config(self):\n if self.config.get('base_url')[-1] is '/':\n raise exceptions.ScidashClientWrongConfigException('Remove last '\n 'slash '\n 'from base_url')", "def test_style_missing_hooks_in_repo(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n repo = \"another\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: another\n hooks:\n - id: isort\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 335, \": style file is missing 'hooks' in repo 'another'\")\n )", "def check_for_deprecated_config(config):\n\n # key is the name of the depreacted variable that is no longer allowed in any config files\n # value is a dictionary containing information about what to do with the deprecated config\n # 'sec' is the section of the config file where the replacement resides, i.e. config, dir,\n # filename_templates\n # 'alt' is the alternative name for the deprecated config. this can be a single variable name or\n # text to describe multiple variables or how to handle it. Set to None to tell the user to\n # just remove the variable\n # 'copy' is an optional item (defaults to True). set this to False if one cannot simply replace\n # the deprecated config variable name with the value in 'alt'\n # 'req' is an optional item (defaults to True). this to False to report a warning for the\n # deprecated config and allow execution to continue. this is generally no longer used\n # because we are requiring users to update the config files. if used, the developer must\n # modify the code to handle both variables accordingly\n deprecated_dict = {\n 'LOOP_BY_INIT' : {'sec' : 'config', 'alt' : 'LOOP_BY', 'copy': False},\n 'LOOP_METHOD' : {'sec' : 'config', 'alt' : 'LOOP_ORDER'},\n 'PREPBUFR_DIR_REGEX' : {'sec' : 'regex_pattern', 'alt' : None},\n 'PREPBUFR_FILE_REGEX' : {'sec' : 'regex_pattern', 'alt' : None},\n 'OBS_INPUT_DIR_REGEX' : {'sec' : 'regex_pattern', 'alt' : 'OBS_POINT_STAT_INPUT_DIR', 'copy': False},\n 'FCST_INPUT_DIR_REGEX' : {'sec' : 'regex_pattern', 'alt' : 'FCST_POINT_STAT_INPUT_DIR', 'copy': False},\n 'FCST_INPUT_FILE_REGEX' :\n {'sec' : 'regex_pattern', 'alt' : 'FCST_POINT_STAT_INPUT_TEMPLATE', 'copy': False},\n 'OBS_INPUT_FILE_REGEX' : {'sec' : 'regex_pattern', 'alt' : 'OBS_POINT_STAT_INPUT_TEMPLATE', 'copy': False},\n 'PREPBUFR_DATA_DIR' : {'sec' : 'dir', 'alt' : 'PB2NC_INPUT_DIR'},\n 'PREPBUFR_MODEL_DIR_NAME' : {'sec' : 'dir', 'alt' : 'PB2NC_INPUT_DIR', 'copy': False},\n 'OBS_INPUT_FILE_TMPL' :\n {'sec' : 'filename_templates', 'alt' : 'OBS_POINT_STAT_INPUT_TEMPLATE'},\n 'FCST_INPUT_FILE_TMPL' :\n {'sec' : 'filename_templates', 'alt' : 'FCST_POINT_STAT_INPUT_TEMPLATE'},\n 'NC_FILE_TMPL' : {'sec' : 'filename_templates', 'alt' : 'PB2NC_OUTPUT_TEMPLATE'},\n 'FCST_INPUT_DIR' : {'sec' : 'dir', 'alt' : 'FCST_POINT_STAT_INPUT_DIR'},\n 'OBS_INPUT_DIR' : {'sec' : 'dir', 'alt' : 'OBS_POINT_STAT_INPUT_DIR'},\n 'REGRID_TO_GRID' : {'sec' : 'config', 'alt' : 'POINT_STAT_REGRID_TO_GRID'},\n 'FCST_HR_START' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FCST_HR_END' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FCST_HR_INTERVAL' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'START_DATE' : {'sec' : 'config', 'alt' : 'INIT_BEG or VALID_BEG', 'copy': False},\n 'END_DATE' : {'sec' : 'config', 'alt' : 'INIT_END or VALID_END', 'copy': False},\n 'INTERVAL_TIME' : {'sec' : 'config', 'alt' : 'INIT_INCREMENT or VALID_INCREMENT', 'copy': False},\n 'BEG_TIME' : {'sec' : 'config', 'alt' : 'INIT_BEG or VALID_BEG', 'copy': False},\n 'END_TIME' : {'sec' : 'config', 'alt' : 'INIT_END or VALID_END', 'copy': False},\n 'START_HOUR' : {'sec' : 'config', 'alt' : 'INIT_BEG or VALID_BEG', 'copy': False},\n 'END_HOUR' : {'sec' : 'config', 'alt' : 'INIT_END or VALID_END', 'copy': False},\n 'OBS_BUFR_VAR_LIST' : {'sec' : 'config', 'alt' : 'PB2NC_OBS_BUFR_VAR_LIST'},\n 'TIME_SUMMARY_FLAG' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_FLAG'},\n 'TIME_SUMMARY_BEG' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_BEG'},\n 'TIME_SUMMARY_END' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_END'},\n 'TIME_SUMMARY_VAR_NAMES' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_VAR_NAMES'},\n 'TIME_SUMMARY_TYPE' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_TYPE'},\n 'OVERWRITE_NC_OUTPUT' : {'sec' : 'config', 'alt' : 'PB2NC_SKIP_IF_OUTPUT_EXISTS', 'copy': False},\n 'VERTICAL_LOCATION' : {'sec' : 'config', 'alt' : 'PB2NC_VERTICAL_LOCATION'},\n 'VERIFICATION_GRID' : {'sec' : 'config', 'alt' : 'REGRID_DATA_PLANE_VERIF_GRID'},\n 'WINDOW_RANGE_BEG' : {'sec' : 'config', 'alt' : 'OBS_WINDOW_BEGIN'},\n 'WINDOW_RANGE_END' : {'sec' : 'config', 'alt' : 'OBS_WINDOW_END'},\n 'OBS_EXACT_VALID_TIME' :\n {'sec' : 'config', 'alt' : 'OBS_WINDOW_BEGIN and OBS_WINDOW_END', 'copy': False},\n 'FCST_EXACT_VALID_TIME' :\n {'sec' : 'config', 'alt' : 'FCST_WINDOW_BEGIN and FCST_WINDOW_END', 'copy': False},\n 'PCP_COMBINE_METHOD' :\n {'sec' : 'config', 'alt' : 'FCST_PCP_COMBINE_METHOD and/or OBS_PCP_COMBINE_METHOD', 'copy': False},\n 'FHR_BEG' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FHR_END' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FHR_INC' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FHR_GROUP_BEG' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_[N]', 'copy': False},\n 'FHR_GROUP_END' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_[N]', 'copy': False},\n 'FHR_GROUP_LABELS' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_[N]_LABEL', 'copy': False},\n 'CYCLONE_OUT_DIR' : {'sec' : 'dir', 'alt' : 'CYCLONE_OUTPUT_DIR'},\n 'ENSEMBLE_STAT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'ENSEMBLE_STAT_OUTPUT_DIR'},\n 'EXTRACT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'EXTRACT_TILES_OUTPUT_DIR'},\n 'GRID_STAT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'GRID_STAT_OUTPUT_DIR'},\n 'MODE_OUT_DIR' : {'sec' : 'dir', 'alt' : 'MODE_OUTPUT_DIR'},\n 'MTD_OUT_DIR' : {'sec' : 'dir', 'alt' : 'MTD_OUTPUT_DIR'},\n 'SERIES_INIT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_LEAD_OUT_DIR' : {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_INIT_FILTERED_OUT_DIR' :\n {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'SERIES_LEAD_FILTERED_OUT_DIR' :\n {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'STAT_ANALYSIS_OUT_DIR' :\n {'sec' : 'dir', 'alt' : 'STAT_ANALYSIS_OUTPUT_DIR'},\n 'TCMPR_PLOT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'TCMPR_PLOT_OUTPUT_DIR'},\n 'FCST_MIN_FORECAST' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_MIN'},\n 'FCST_MAX_FORECAST' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_MAX'},\n 'OBS_MIN_FORECAST' : {'sec' : 'config', 'alt' : 'OBS_PCP_COMBINE_MIN_LEAD'},\n 'OBS_MAX_FORECAST' : {'sec' : 'config', 'alt' : 'OBS_PCP_COMBINE_MAX_LEAD'},\n 'FCST_INIT_INTERVAL' : {'sec' : 'config', 'alt' : None},\n 'OBS_INIT_INTERVAL' : {'sec' : 'config', 'alt' : None},\n 'FCST_DATA_INTERVAL' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_DATA_INTERVAL'},\n 'OBS_DATA_INTERVAL' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_DATA_INTERVAL'},\n 'FCST_IS_DAILY_FILE' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_IS_DAILY_FILE'},\n 'OBS_IS_DAILY_FILE' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_IS_DAILY_FILE'},\n 'FCST_TIMES_PER_FILE' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_TIMES_PER_FILE'},\n 'OBS_TIMES_PER_FILE' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_TIMES_PER_FILE'},\n 'FCST_LEVEL' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_INPUT_ACCUMS', 'copy': False},\n 'OBS_LEVEL' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_INPUT_ACCUMS', 'copy': False},\n 'MODE_FCST_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'FCST_MODE_CONV_RADIUS'},\n 'MODE_FCST_CONV_THRESH' : {'sec' : 'config', 'alt' : 'FCST_MODE_CONV_THRESH'},\n 'MODE_FCST_MERGE_FLAG' : {'sec' : 'config', 'alt' : 'FCST_MODE_MERGE_FLAG'},\n 'MODE_FCST_MERGE_THRESH' : {'sec' : 'config', 'alt' : 'FCST_MODE_MERGE_THRESH'},\n 'MODE_OBS_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'OBS_MODE_CONV_RADIUS'},\n 'MODE_OBS_CONV_THRESH' : {'sec' : 'config', 'alt' : 'OBS_MODE_CONV_THRESH'},\n 'MODE_OBS_MERGE_FLAG' : {'sec' : 'config', 'alt' : 'OBS_MODE_MERGE_FLAG'},\n 'MODE_OBS_MERGE_THRESH' : {'sec' : 'config', 'alt' : 'OBS_MODE_MERGE_THRESH'},\n 'MTD_FCST_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'FCST_MTD_CONV_RADIUS'},\n 'MTD_FCST_CONV_THRESH' : {'sec' : 'config', 'alt' : 'FCST_MTD_CONV_THRESH'},\n 'MTD_OBS_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'OBS_MTD_CONV_RADIUS'},\n 'MTD_OBS_CONV_THRESH' : {'sec' : 'config', 'alt' : 'OBS_MTD_CONV_THRESH'},\n 'RM_EXE' : {'sec' : 'exe', 'alt' : 'RM'},\n 'CUT_EXE' : {'sec' : 'exe', 'alt' : 'CUT'},\n 'TR_EXE' : {'sec' : 'exe', 'alt' : 'TR'},\n 'NCAP2_EXE' : {'sec' : 'exe', 'alt' : 'NCAP2'},\n 'CONVERT_EXE' : {'sec' : 'exe', 'alt' : 'CONVERT'},\n 'NCDUMP_EXE' : {'sec' : 'exe', 'alt' : 'NCDUMP'},\n 'EGREP_EXE' : {'sec' : 'exe', 'alt' : 'EGREP'},\n 'ADECK_TRACK_DATA_DIR' : {'sec' : 'dir', 'alt' : 'TC_PAIRS_ADECK_INPUT_DIR'},\n 'BDECK_TRACK_DATA_DIR' : {'sec' : 'dir', 'alt' : 'TC_PAIRS_BDECK_INPUT_DIR'},\n 'MISSING_VAL_TO_REPLACE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_MISSING_VAL_TO_REPLACE'},\n 'MISSING_VAL' : {'sec' : 'config', 'alt' : 'TC_PAIRS_MISSING_VAL'},\n 'TRACK_DATA_SUBDIR_MOD' : {'sec' : 'dir', 'alt' : None},\n 'ADECK_FILE_PREFIX' : {'sec' : 'config', 'alt' : 'TC_PAIRS_ADECK_TEMPLATE', 'copy': False},\n 'BDECK_FILE_PREFIX' : {'sec' : 'config', 'alt' : 'TC_PAIRS_BDECK_TEMPLATE', 'copy': False},\n 'TOP_LEVEL_DIRS' : {'sec' : 'config', 'alt' : 'TC_PAIRS_READ_ALL_FILES'},\n 'TC_PAIRS_DIR' : {'sec' : 'dir', 'alt' : 'TC_PAIRS_OUTPUT_DIR'},\n 'CYCLONE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_CYCLONE'},\n 'STORM_ID' : {'sec' : 'config', 'alt' : 'TC_PAIRS_STORM_ID'},\n 'BASIN' : {'sec' : 'config', 'alt' : 'TC_PAIRS_BASIN'},\n 'STORM_NAME' : {'sec' : 'config', 'alt' : 'TC_PAIRS_STORM_NAME'},\n 'DLAND_FILE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_DLAND_FILE'},\n 'TRACK_TYPE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_REFORMAT_DECK'},\n 'FORECAST_TMPL' : {'sec' : 'filename_templates', 'alt' : 'TC_PAIRS_ADECK_TEMPLATE'},\n 'REFERENCE_TMPL' : {'sec' : 'filename_templates', 'alt' : 'TC_PAIRS_BDECK_TEMPLATE'},\n 'TRACK_DATA_MOD_FORCE_OVERWRITE' :\n {'sec' : 'config', 'alt' : 'TC_PAIRS_SKIP_IF_REFORMAT_EXISTS', 'copy': False},\n 'TC_PAIRS_FORCE_OVERWRITE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_SKIP_IF_OUTPUT_EXISTS', 'copy': False},\n 'GRID_STAT_CONFIG' : {'sec' : 'config', 'alt' : 'GRID_STAT_CONFIG_FILE'},\n 'MODE_CONFIG' : {'sec' : 'config', 'alt': 'MODE_CONFIG_FILE'},\n 'FCST_PCP_COMBINE_INPUT_LEVEL': {'sec': 'config', 'alt' : 'FCST_PCP_COMBINE_INPUT_ACCUMS'},\n 'OBS_PCP_COMBINE_INPUT_LEVEL': {'sec': 'config', 'alt' : 'OBS_PCP_COMBINE_INPUT_ACCUMS'},\n 'TIME_METHOD': {'sec': 'config', 'alt': 'LOOP_BY', 'copy': False},\n 'MODEL_DATA_DIR': {'sec': 'dir', 'alt': 'EXTRACT_TILES_GRID_INPUT_DIR'},\n 'STAT_LIST': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_STAT_LIST'},\n 'NLAT': {'sec': 'config', 'alt': 'EXTRACT_TILES_NLAT'},\n 'NLON': {'sec': 'config', 'alt': 'EXTRACT_TILES_NLON'},\n 'DLAT': {'sec': 'config', 'alt': 'EXTRACT_TILES_DLAT'},\n 'DLON': {'sec': 'config', 'alt': 'EXTRACT_TILES_DLON'},\n 'LON_ADJ': {'sec': 'config', 'alt': 'EXTRACT_TILES_LON_ADJ'},\n 'LAT_ADJ': {'sec': 'config', 'alt': 'EXTRACT_TILES_LAT_ADJ'},\n 'OVERWRITE_TRACK': {'sec': 'config', 'alt': 'EXTRACT_TILES_OVERWRITE_TRACK'},\n 'BACKGROUND_MAP': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_BACKGROUND_MAP'},\n 'GFS_FCST_FILE_TMPL': {'sec': 'filename_templates', 'alt': 'FCST_EXTRACT_TILES_INPUT_TEMPLATE'},\n 'GFS_ANLY_FILE_TMPL': {'sec': 'filename_templates', 'alt': 'OBS_EXTRACT_TILES_INPUT_TEMPLATE'},\n 'SERIES_BY_LEAD_FILTERED_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'SERIES_BY_INIT_FILTERED_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'SERIES_BY_LEAD_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_BY_INIT_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_BY_LEAD_GROUP_FCSTS': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_GROUP_FCSTS'},\n 'SERIES_ANALYSIS_BY_LEAD_CONFIG_FILE': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_CONFIG_FILE'},\n 'SERIES_ANALYSIS_BY_INIT_CONFIG_FILE': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_CONFIG_FILE'},\n 'ENSEMBLE_STAT_MET_OBS_ERROR_TABLE': {'sec': 'config', 'alt': 'ENSEMBLE_STAT_MET_OBS_ERR_TABLE'},\n 'VAR_LIST': {'sec': 'config', 'alt': 'BOTH_VAR<n>_NAME BOTH_VAR<n>_LEVELS or SERIES_ANALYSIS_VAR_LIST', 'copy': False},\n 'SERIES_ANALYSIS_VAR_LIST': {'sec': 'config', 'alt': 'BOTH_VAR<n>_NAME BOTH_VAR<n>_LEVELS', 'copy': False},\n 'EXTRACT_TILES_VAR_LIST': {'sec': 'config', 'alt': ''},\n 'STAT_ANALYSIS_LOOKIN_DIR': {'sec': 'dir', 'alt': 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR'},\n 'VALID_HOUR_METHOD': {'sec': 'config', 'alt': None},\n 'VALID_HOUR_BEG': {'sec': 'config', 'alt': None},\n 'VALID_HOUR_END': {'sec': 'config', 'alt': None},\n 'VALID_HOUR_INCREMENT': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_METHOD': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_BEG': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_END': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_INCREMENT': {'sec': 'config', 'alt': None},\n 'STAT_ANALYSIS_CONFIG': {'sec': 'config', 'alt': 'STAT_ANALYSIS_CONFIG_FILE'},\n 'JOB_NAME': {'sec': 'config', 'alt': 'STAT_ANALYSIS_JOB_NAME'},\n 'JOB_ARGS': {'sec': 'config', 'alt': 'STAT_ANALYSIS_JOB_ARGS'},\n 'FCST_LEAD': {'sec': 'config', 'alt': 'FCST_LEAD_LIST'},\n 'FCST_VAR_NAME': {'sec': 'config', 'alt': 'FCST_VAR_LIST'},\n 'FCST_VAR_LEVEL': {'sec': 'config', 'alt': 'FCST_VAR_LEVEL_LIST'},\n 'OBS_VAR_NAME': {'sec': 'config', 'alt': 'OBS_VAR_LIST'},\n 'OBS_VAR_LEVEL': {'sec': 'config', 'alt': 'OBS_VAR_LEVEL_LIST'},\n 'REGION': {'sec': 'config', 'alt': 'VX_MASK_LIST'},\n 'INTERP': {'sec': 'config', 'alt': 'INTERP_LIST'},\n 'INTERP_PTS': {'sec': 'config', 'alt': 'INTERP_PTS_LIST'},\n 'CONV_THRESH': {'sec': 'config', 'alt': 'CONV_THRESH_LIST'},\n 'FCST_THRESH': {'sec': 'config', 'alt': 'FCST_THRESH_LIST'},\n 'LINE_TYPE': {'sec': 'config', 'alt': 'LINE_TYPE_LIST'},\n 'STAT_ANALYSIS_DUMP_ROW_TMPL': {'sec': 'filename_templates', 'alt': 'STAT_ANALYSIS_DUMP_ROW_TEMPLATE'},\n 'STAT_ANALYSIS_OUT_STAT_TMPL': {'sec': 'filename_templates', 'alt': 'STAT_ANALYSIS_OUT_STAT_TEMPLATE'},\n 'PLOTTING_SCRIPTS_DIR': {'sec': 'dir', 'alt': 'MAKE_PLOTS_SCRIPTS_DIR'},\n 'STAT_FILES_INPUT_DIR': {'sec': 'dir', 'alt': 'MAKE_PLOTS_INPUT_DIR'},\n 'PLOTTING_OUTPUT_DIR': {'sec': 'dir', 'alt': 'MAKE_PLOTS_OUTPUT_DIR'},\n 'VERIF_CASE': {'sec': 'config', 'alt': 'MAKE_PLOTS_VERIF_CASE'},\n 'VERIF_TYPE': {'sec': 'config', 'alt': 'MAKE_PLOTS_VERIF_TYPE'},\n 'PLOT_TIME': {'sec': 'config', 'alt': 'DATE_TIME'},\n 'MODEL<n>_NAME': {'sec': 'config', 'alt': 'MODEL<n>'},\n 'MODEL<n>_OBS_NAME': {'sec': 'config', 'alt': 'MODEL<n>_OBTYPE'},\n 'MODEL<n>_STAT_DIR': {'sec': 'dir', 'alt': 'MODEL<n>_STAT_ANALYSIS_LOOKIN_DIR'},\n 'MODEL<n>_NAME_ON_PLOT': {'sec': 'config', 'alt': 'MODEL<n>_REFERENCE_NAME'},\n 'REGION_LIST': {'sec': 'config', 'alt': 'VX_MASK_LIST'},\n 'PLOT_STATS_LIST': {'sec': 'config', 'alt': 'MAKE_PLOT_STATS_LIST'},\n 'CI_METHOD': {'sec': 'config', 'alt': 'MAKE_PLOTS_CI_METHOD'},\n 'VERIF_GRID': {'sec': 'config', 'alt': 'MAKE_PLOTS_VERIF_GRID'},\n 'EVENT_EQUALIZATION': {'sec': 'config', 'alt': 'MAKE_PLOTS_EVENT_EQUALIZATION'},\n 'MTD_CONFIG': {'sec': 'config', 'alt': 'MTD_CONFIG_FILE'},\n 'CLIMO_GRID_STAT_INPUT_DIR': {'sec': 'dir', 'alt': 'GRID_STAT_CLIMO_MEAN_INPUT_DIR'},\n 'CLIMO_GRID_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates', 'alt': 'GRID_STAT_CLIMO_MEAN_INPUT_TEMPLATE'},\n 'CLIMO_POINT_STAT_INPUT_DIR': {'sec': 'dir', 'alt': 'POINT_STAT_CLIMO_MEAN_INPUT_DIR'},\n 'CLIMO_POINT_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates', 'alt': 'POINT_STAT_CLIMO_MEAN_INPUT_TEMPLATE'},\n 'GEMPAKTOCF_CLASSPATH': {'sec': 'exe', 'alt': 'GEMPAKTOCF_JAR', 'copy': False},\n 'CUSTOM_INGEST_<n>_OUTPUT_DIR': {'sec': 'dir', 'alt': 'PY_EMBED_INGEST_<n>_OUTPUT_DIR'},\n 'CUSTOM_INGEST_<n>_OUTPUT_TEMPLATE': {'sec': 'filename_templates', 'alt': 'PY_EMBED_INGEST_<n>_OUTPUT_TEMPLATE'},\n 'CUSTOM_INGEST_<n>_OUTPUT_GRID': {'sec': 'config', 'alt': 'PY_EMBED_INGEST_<n>_OUTPUT_GRID'},\n 'CUSTOM_INGEST_<n>_SCRIPT': {'sec': 'config', 'alt': 'PY_EMBED_INGEST_<n>_SCRIPT'},\n 'CUSTOM_INGEST_<n>_TYPE': {'sec': 'config', 'alt': 'PY_EMBED_INGEST_<n>_TYPE'},\n 'TC_STAT_RUN_VIA': {'sec': 'config', 'alt': 'TC_STAT_CONFIG_FILE',\n 'copy': False},\n 'TC_STAT_CMD_LINE_JOB': {'sec': 'config', 'alt': 'TC_STAT_JOB_ARGS'},\n 'TC_STAT_JOBS_LIST': {'sec': 'config', 'alt': 'TC_STAT_JOB_ARGS'},\n 'EXTRACT_TILES_OVERWRITE_TRACK': {'sec': 'config',\n 'alt': 'EXTRACT_TILES_SKIP_IF_OUTPUT_EXISTS',\n 'copy': False},\n 'EXTRACT_TILES_PAIRS_INPUT_DIR': {'sec': 'dir',\n 'alt': 'EXTRACT_TILES_STAT_INPUT_DIR',\n 'copy': False},\n 'EXTRACT_TILES_FILTERED_OUTPUT_TEMPLATE': {'sec': 'filename_template',\n 'alt': 'EXTRACT_TILES_STAT_INPUT_TEMPLATE',},\n 'EXTRACT_TILES_GRID_INPUT_DIR': {'sec': 'dir',\n 'alt': 'FCST_EXTRACT_TILES_INPUT_DIR'\n 'and '\n 'OBS_EXTRACT_TILES_INPUT_DIR',\n 'copy': False},\n 'SERIES_ANALYSIS_FILTER_OPTS': {'sec': 'config',\n 'alt': 'TC_STAT_JOB_ARGS',\n 'copy': False},\n 'SERIES_ANALYSIS_INPUT_DIR': {'sec': 'dir',\n 'alt': 'FCST_SERIES_ANALYSIS_INPUT_DIR '\n 'and '\n 'OBS_SERIES_ANALYSIS_INPUT_DIR'},\n 'FCST_SERIES_ANALYSIS_TILE_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'FCST_SERIES_ANALYSIS_INPUT_TEMPLATE '},\n 'OBS_SERIES_ANALYSIS_TILE_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'OBS_SERIES_ANALYSIS_INPUT_TEMPLATE '},\n 'EXTRACT_TILES_STAT_INPUT_DIR': {'sec': 'dir',\n 'alt': 'EXTRACT_TILES_TC_STAT_INPUT_DIR',},\n 'EXTRACT_TILES_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'EXTRACT_TILES_TC_STAT_INPUT_TEMPLATE',},\n 'SERIES_ANALYSIS_STAT_INPUT_DIR': {'sec': 'dir',\n 'alt': 'SERIES_ANALYSIS_TC_STAT_INPUT_DIR', },\n 'SERIES_ANALYSIS_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'SERIES_ANALYSIS_TC_STAT_INPUT_TEMPLATE', },\n }\n\n # template '' : {'sec' : '', 'alt' : '', 'copy': True},\n\n logger = config.logger\n\n # create list of errors and warnings to report for deprecated configs\n e_list = []\n w_list = []\n all_sed_cmds = []\n\n for old, depr_info in deprecated_dict.items():\n if isinstance(depr_info, dict):\n\n # check if <n> is found in the old item, use regex to find variables if found\n if '<n>' in old:\n old_regex = old.replace('<n>', r'(\\d+)')\n indices = find_indices_in_config_section(old_regex,\n config,\n index_index=1).keys()\n for index in indices:\n old_with_index = old.replace('<n>', index)\n if depr_info['alt']:\n alt_with_index = depr_info['alt'].replace('<n>', index)\n else:\n alt_with_index = ''\n\n handle_deprecated(old_with_index, alt_with_index, depr_info,\n config, all_sed_cmds, w_list, e_list)\n else:\n handle_deprecated(old, depr_info['alt'], depr_info,\n config, all_sed_cmds, w_list, e_list)\n\n\n # check all templates and error if any deprecated tags are used\n # value of dict is replacement tag, set to None if no replacement exists\n # deprecated tags: region (replace with basin)\n deprecated_tags = {'region' : 'basin'}\n template_vars = config.keys('config')\n template_vars = [tvar for tvar in template_vars if tvar.endswith('_TEMPLATE')]\n for temp_var in template_vars:\n template = config.getraw('filename_templates', temp_var)\n tags = get_tags(template)\n\n for depr_tag, replace_tag in deprecated_tags.items():\n if depr_tag in tags:\n e_msg = 'Deprecated tag {{{}}} found in {}.'.format(depr_tag,\n temp_var)\n if replace_tag is not None:\n e_msg += ' Replace with {{{}}}'.format(replace_tag)\n\n e_list.append(e_msg)\n\n # if any warning exist, report them\n if w_list:\n for warning_msg in w_list:\n logger.warning(warning_msg)\n\n # if any errors exist, report them and exit\n if e_list:\n logger.error('DEPRECATED CONFIG ITEMS WERE FOUND. ' +\\\n 'PLEASE REMOVE/REPLACE THEM FROM CONFIG FILES')\n for error_msg in e_list:\n logger.error(error_msg)\n return False, all_sed_cmds\n\n return True, []", "def test_old_config_fails() -> None:\n with pytest.raises(SystemExit):\n fauxmo.main(config_path_str=\"tests/old-config-sample.json\")", "def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')", "def test_no_config_keyword(self):\n args = self.get_args()\n config = {\n \"site\": {\n \"username\": \"\",\n \"name\": \"\",\n \"ip_address\": \"\",\n \"password\": \"\",\n \"local\": \"\",\n \"use_https\": \"\"\n }\n }\n temp = sys.stdout\n fake_out = FakeStdio()\n sys.stdout = fake_out\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n sys.stdout = temp\n self.assertTrue(fake_out.verify_output(['%% Invalid configuration file', '\\n']))", "def test_missing_repos(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n fail_fast = true\n \"\"\"\n ).pre_commit(\n \"\"\"\n grepos:\n - hooks:\n - id: whatever\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 331, \" doesn't have the 'repos' root key\")\n )", "def _load_common_config(self, config: Dict[str, Any]) -> Dict[str, Any] :\n # Log level\n if 'loglevel' in self.args.loglevel:\n config.update({'verbosity': self.args.loglevel})\n else:\n config.update({'verbosity': 0})\n logging.basicConfig(\n level=logging.INFO if config['verbosity'] < 1 else logging.DEBUG,\n format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n set_loggers(config['verbosity'])\n logger.info('Verbosity set to %s', config['verbosity'])\n\n # Add dynamic whitelist if found\n if 'dynamic_whitelist' in self.args and self.args.dynamic_whitelist:\n config['pairlist'] = {\n 'method': 'VolumePairList',\n 'config': {'number_assets': self.args.dynamic_whitelist}\n }\n logger.warning(\n 'Parameter --dynamic-whitelist has been deprecated, '\n 'and will be completely replaced by the whitelist dict in the future. '\n 'For now: using dynamically generated whitelist based on VolumePairList. '\n '(not applicable with Backtesting and Hyperopt)'\n )\n if self.args.db_url and self.args.db_url != constant.DEFAULT_DB_PROD_URL:\n config.update({'db_url': self.args.db_url})\n logger.info('Parameter --db-url detected ...')\n\n if config.get('dry_run', False):\n logger.info('Dry run is enabled')\n if config.get('db_url') in [None, constant.DEFAULT_DB_PROD_URL]:\n # Default to in-memory db for dry_run if not specified\n config['db_url'] = constant.DEFAULT_DB_DRYRUN_URL\n else:\n if not config.get('db_url', None):\n config['db_url'] = constant.DEFAULT_DB_PROD_URL\n logger.info('Dry run is disabled')\n\n if config.get('forcebuy_enable', False):\n logger.warning('`forcebuy` RPC message enabled.')\n\n # Setting max_open_trades to infinite if -1\n if config.get('max_open_trades') == -1:\n config['max_open_trades'] = float('inf')\n\n logger.info(f'Using DB: \"{config[\"db_url\"]}\"')\n\n # Check if the exchange set by the user is supported\n self.check_exchange(config)\n\n return config", "def check_config(config):\n\n # Check config\n assert config.dataset in [\"conll04\", \"ace05\"]\n assert config.train_mode in [\"train\", \"train+dev\"]\n\n for emb in config.embedder:\n assert emb in [\"word\", \"char\", \"bert-base\", \"bert-large\"], emb\n\n if \"char\" in config.embedder:\n assert config.char_pool in [\"last\", \"avg\", \"max\"]\n\n if config.encoder is not None:\n assert config.encoder == \"bilstm\"\n\n for task in config.tasks:\n assert task in [\"ner\", \"re\"]\n\n assert config.ner_decoder in [\"iobes\", \"span\"]\n\n if \"cuda\" in config.device:\n assert torch.cuda.is_available(), \"CUDA not available\"", "def test_load_config(self):\n config = copyclipper.LoadConfig()\n self.assertTrue(len(config) > 0)", "def test_running_with_badly_formatted_config():\n cli_result = subprocess.run(\n ['kaiba', 'tests/files/bad_config.json', 'tests/files/input.json'],\n capture_output=True,\n )\n assert b\"'target' is a required property\" in cli_result.stderr", "def main(\n root: Path = typer.Argument(Path.cwd(), help=\"Root path of repo\"),\n skip: bool = typer.Option(False, \"--skip\", \"-s\", help=\"Skip errors\"),\n):\n msg.info(f\"Updating configs in {root}\")\n for path in root.glob(\"**/configs/*.cfg\"):\n rel_path = path.relative_to(root)\n if rel_path.parts[0].startswith(\".\"):\n continue\n print(rel_path)\n try:\n before, after = fill_config(path, path, silent=True)\n except (Exception, SystemExit) as e:\n if skip:\n msg.fail(\"Failed\", e)\n continue\n else:\n raise\n if before != after:\n msg.good(\"Filled\")\n else:\n msg.info(\"Already up to date\")", "def test_compliance_configuration(self, evidence):\n evidence_config = json.loads(evidence.content)\n if evidence_config != self.config.raw_config:\n evidence = json.dumps(evidence_config, indent=2).split('\\n')\n config = json.dumps(self.config.raw_config, indent=2).split('\\n')\n self.add_failures(\n 'Differences found',\n {\n 'Fetcher Configuration': evidence,\n 'Check Configuration': config\n }\n )", "def test_no_body_smart_require_min_body_lines_option_ignored(self, custom_config):\n del custom_config['body']['smart_require']['min_changes']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run(\n {\n 'commits': [\n {\n 'stats': {'total': 2000},\n 'message': 'xxxxx',\n 'sha': 'aa',\n 'url': '',\n }\n ]\n }\n )[0]\n assert result.success is True", "def test_config_wrong_config(self):\n test_data_1 = (\"[gnupg_missing]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"[data]\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\")\n test_data_2 = (\"[gnupg]\\n\"\n \"recipients_missing = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"[data]\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data_1)\n config = Config(\"test_config.conf\")\n self.assertRaises(\n ConfigError, config.check, \"gnupg\", [\"recipients\", \"signer\"])\n file(\"test_config.conf\", \"wb\").write(test_data_2)\n config = Config(\"test_config.conf\")\n self.assertRaises(\n ConfigError, config.check, \"gnupg\", [\"recipients\", \"signer\"])\n os.remove(\"test_config.conf\")", "def test_read_config():\n # for config in config_fname, config_solaris_fname:\n for config in config_fnames:\n cfg = _read_config(config)\n assert all(\n \"unknown\" not in block.lower() and block != \"\"\n for block in cfg[\"user_blocks\"]\n )", "def add_fixed_parameters_from_config_file(self, config_file):\n pass", "def config(config, fork_name=\"\", origin_name=\"\", default_branch=\"\"):\n state = read(config.configfile)\n if fork_name:\n update(config.configfile, {\"FORK_NAME\": fork_name})\n success_out(f\"fork-name set to: {fork_name}\")\n else:\n info_out(f\"fork-name: {state['FORK_NAME']}\")\n\n if origin_name:\n update(config.configfile, {\"ORIGIN_NAME\": origin_name})\n success_out(f\"origin-name set to: {origin_name}\")\n else:\n info_out(f\"origin-name: {state.get('ORIGIN_NAME', '*not set*')}\")\n\n if default_branch:\n update(config.configfile, {\"DEFAULT_BRANCH\": default_branch})\n success_out(f\"default-branch set to: {default_branch}\")\n else:\n info_out(f\"default-branch: {state.get('DEFAULT_BRANCH', '*not set*')}\")", "def check_config(cls, config: 'bittensor.Config' ):\n assert 'wallet' in config\n assert isinstance(config.wallet.name, str)\n assert isinstance(config.wallet.hotkey, str)\n assert isinstance(config.wallet.path, str)", "def test_load_config_safe(self):\n self.__test_load_config_safe(\".scuba.yml\")", "def test_blank_config_doesnt_crash(tmpdir):\n config = tmpdir.join(\"config.yml\")\n config.write('')\n util.read_config(tmpdir)", "def test_basic_parse(config, expected):\n if isinstance(expected, dict):\n raw_dict = call(config)\n assert expected == raw_dict\n elif expected is None:\n raw_dict = call(config)\n assert expected == raw_dict\n elif issubclass(expected, ValueError):\n with pytest.raises(ValueError):\n raw_dict = call(config)\n elif issubclass(expected, FileNotFoundError):\n with pytest.raises(FileNotFoundError):\n raw_dict = call(config)\n elif issubclass(expected, TypeError):\n with pytest.raises(TypeError):\n raw_dict = call(config)\n elif issubclass(expected, KeyError):\n with pytest.raises(KeyError):\n raw_dict = call(config)\n else:\n raise ValueError(f\"expected {expected} not accounted for\")", "def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")", "def _fillConfig(config, bare=False):\n if not bare:\n configFile = CONFIG_STRUCTURE.format(\n sup2Sub=config.sup2Sub,\n handBrake=config.handBrake,\n java=config.java,\n mkvExtract=config.mkvExtract,\n mkvMerge=config.mkvMerge,\n bFrames=config.bFrames,\n audioFallback=config.audioFallback,\n language=config.language,\n sorting=config.sorting,\n sortingReverse=config.sortingReverse,\n x264Speed=config.x264Speed,\n bq1080=config.quality['bq']['1080'],\n bq720=config.quality['bq']['720'],\n bq480=config.quality['bq']['480'],\n hq1080=config.quality['hq']['1080'],\n hq720=config.quality['hq']['720'],\n hq480=config.quality['hq']['480'],\n uq1080=config.quality['uq']['1080'],\n uq720=config.quality['uq']['720'],\n uq480=config.quality['uq']['480'],\n )\n else:\n configFile = CONFIG_STRUCTURE_BARE.format(\n sup2Sub=config.sup2Sub,\n handBrake=config.handBrake,\n java=config.java,\n mkvExtract=config.mkvExtract,\n mkvMerge=config.mkvMerge\n )\n\n return configFile", "def testReadConfig(loggingMixin, configType, configTypeString):\n # This could be different than configType if we want to use a string.\n # We use a different object because we still want to use the standard config type later in the test.\n configTypeForReadingConfig = configType\n if configTypeString:\n configTypeForReadingConfig = configType.name\n (parameters, filesRead) = config.readConfig(configTypeForReadingConfig)\n\n filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"configTestFiles\", \"{}ConfigRef.yaml\".format(configType.name))\n\n # We need to treat whether the file exists with a bit of care.\n # NOTE: Since the parametization causes this to run mulitple times, some will pass and\n # and some will fail, even when creating the configuration files. This is fine.\n if os.path.exists(filename):\n # Access the expected values\n with open(filename, \"r\") as f:\n expected = yaml.load(f.read(), Loader = yaml.SafeLoader)\n else:\n # For making the reference\n with open(filename, \"w\") as f:\n yaml.dump(parameters, f)\n logger.warning(\"Creating configuration reference for {} module\".format(configType.name))\n # We don't want to go further - we're just creating the reference.\n assert False\n\n # Don't compare the full \"_users\" values because they will always be different due to differences in hashing\n paramUsers = parameters.pop(\"_users\", None)\n expectedUsers = expected.pop(\"_users\", None)\n # However, the beginning should match (same idea as in `testBcrypt`)\n lengthToCheck = 7\n # It won't always exist, so we need to check for it first.\n if paramUsers:\n for k, v in iteritems(paramUsers):\n assert v[:lengthToCheck] == expectedUsers[k][:lengthToCheck]\n\n # Apparently the order of these lists can vary between different systems. We don't care about the order\n # - just the values themselves - so we compare them as sets, which don't depend on order.\n paramTemplates = parameters.pop(\"availableRunPageTemplates\", None)\n expectedTemplates = expected.pop(\"availableRunPageTemplates\", None)\n # It won't always exist, so we need to check for it first.\n if paramTemplates:\n assert set(paramTemplates) == set(expectedTemplates)\n\n # Everything else should be identical.\n assert parameters == expected", "def test_load_release_file(config):\n assert config.has_section(\"main_project\"), \"No `main_project` section in release.ini file\"\n assert config.has_section(\"sonar\"), \"No `sonar` section in release.ini file\"\n assert config.has_section(\"docs\"), \"No `docs` section in release.ini file\"\n assert config.has_section(\"setup\"), \"No `setup` section in release.ini file\"\n assert config.has_section(\"setup_cfg\"), \"No `setup` section in release.ini file\"\n assert config.has_section(\"ansible\"), \"No `ansible` section in release.ini file\"", "def test_post_pull_request_review_bad_config(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n validate_config.side_effect = ConfigError(\n 'Config Validation Error',\n ({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)\n )\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with(\"repo-full-name\", None)\n validate_config.assert_called_once_with(\"config-data\")\n self.assertEqual(\n response,\n (\n {\n 'status': 'Config Validation Error',\n 'message': 'Bad config data'\n },\n 500\n )\n )", "def test_basic_parse(config, expected):\n if isinstance(expected, dict):\n raw_dict = call(config)\n assert expected == raw_dict\n elif issubclass(expected, ValueError):\n with pytest.raises(ValueError):\n raw_dict = call(config)\n elif issubclass(expected, FileNotFoundError):\n with pytest.raises(FileNotFoundError):\n raw_dict = call(config)\n elif issubclass(expected, TypeError):\n with pytest.raises(TypeError):\n raw_dict = call(config)\n elif issubclass(expected, KeyError):\n with pytest.raises(KeyError):\n raw_dict = call(config)\n else:\n raise ValueError(f\"expected {expected} not accounted for\")", "def test_basic_parse(config, expected):\n if isinstance(expected, dict):\n raw_dict = call(config)\n assert expected == raw_dict\n elif issubclass(expected, ValueError):\n with pytest.raises(ValueError):\n raw_dict = call(config)\n elif issubclass(expected, FileNotFoundError):\n with pytest.raises(FileNotFoundError):\n raw_dict = call(config)\n elif issubclass(expected, TypeError):\n with pytest.raises(TypeError):\n raw_dict = call(config)\n elif issubclass(expected, KeyError):\n with pytest.raises(KeyError):\n raw_dict = call(config)\n else:\n raise ValueError(f\"expected {expected} not accounted for\")", "def config_bonus_validator(config: Dict[str, Any]) -> None:\n\n if len(config['infiles']) != 2:\n abort(\"Error: Two file names must be provided, what was found: %s\" % config['infiles'])\n elif not exists(config['infiles'][0]):\n abort(\"Error: The first file does not exist: %s\" % config['infiles'][0])\n elif not exists(config['infiles'][1]):\n abort(\"Error: The second file does not exist: %s\" % config['infiles'][1])\n\n if config['compare_cols'] and config['ignore_cols']:\n abort(\"Error: Provide only one of compare_cols or ignore_cols, not both\")\n\n if len(list(set(config['ignore_cols']) & set(config['key_cols']))) > 0:\n config['ignore_cols'] = [x for x in config['ignore_cols'] if x not in config['key_cols']]\n print(\"Warning: some key-cols removed from ignore-cols\")\n print(\"Revised config['ignore_cols']: %s\" % config.get('ignore_cols', None))\n elif len(list(set(config['compare_cols']) & set(config['key_cols']))) > 0:\n config['compare_cols'] = [x for x in config['compare_cols'] if x not in config['key_cols']]\n print(\"Warning: some key-cols removed from compare-cols\")\n print(\"Revised config['compare_cols']: %s\" % config.get('compare_cols', None))\n\n for kv_pair in config['variables']:\n if ':' not in kv_pair:\n abort('Invalid variable: must be name:value. Was: %s' % kv_pair)\n\n if 'assignments' in config:\n for assign in config['assignments']:\n if isinstance(assign['src_field'], list):\n abort('Assignment src_field must be a string (refers to col_name) '\n 'or an integer - it is a list')\n if isinstance(assign['dest_field'], list):\n abort('Assignment dest_field must be a string (refers to col_name)'\n 'or an integer - it is a list')", "def test_no_body_max_line_length_option_ignored(self, custom_config):\n del custom_config['body']['max_line_length']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run(\n {\n 'commits': [\n {\n 'stats': {'total': 2},\n 'message': 'xxxxx\\n\\n{}'.format('A' * 1000),\n 'sha': 'aa',\n 'url': '',\n }\n ]\n }\n )[0]\n assert result.success is True", "def urban_configurations_test():\n gaz = VladGazetteer(TextIOWrapper(resource_stream('pycaptioner', 'test/data/gazetteer_urban.txt')))\n reader = DictReader(TextIOWrapper(resource_stream('pycaptioner', 'test/data/points.csv')))\n for line in reader:\n if line['category'] == 'urban':\n point = geometry.Point(float(line['lon']), float(line['lat']))\n configurations = generate_configurations(point, gaz, 'urban')\n configurations['subject'] = {'dc_title': line['subject']}\n caption = urban_caption(configurations)\n tools.assert_is_not_none(caption)\n caption = generate_caption(caption)\n tools.assert_is_not_none(caption)\n print(caption)\n tools.assert_is_not_none(None)", "def test_example(config):\n conventional_commits = ConventionalCommitsCz(config)\n example = conventional_commits.example()\n assert isinstance(example, str)", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')", "def check_common_defaults(cfg):\n assert cfg.cluster.name.startswith('elasticblast') # Needed to run ElasticBLAST on NCBI AWS account see SYS-360205\n if cfg.cloud_provider.cloud == CSP.GCP:\n assert cfg.cluster.machine_type == constants.ELB_DFLT_GCP_MACHINE_TYPE\n else:\n assert cfg.cluster.machine_type == constants.ELB_DFLT_AWS_MACHINE_TYPE\n\n assert cfg.cluster.use_preemptible == constants.ELB_DFLT_USE_PREEMPTIBLE\n assert cfg.blast.options == f'-outfmt {int(constants.ELB_DFLT_OUTFMT)}'\n assert cfg.blast.db_source.name == cfg.cloud_provider.cloud.name\n assert cfg.blast.db_mem_margin == constants.ELB_BLASTDB_MEMORY_MARGIN", "def test_load_config_climate_params(self, config, config_dict, infile_dict, monkeypatch):\n test_params = {\n 'timeframe': 1,\n 'Prov': 'BC',\n 'format': 'xml',\n }\n monkeypatch.setitem(config_dict['climate'], 'params', test_params)\n config._read_yaml_file = Mock(return_value=config_dict)\n config._read_SOG_infile = Mock(return_value=infile_dict)\n config.load_config('config_file')\n assert config.climate.params == test_params", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def read_config_dict(config_data_dict):\n global template_test_file\n global test_interface_template_file\n global test_variable_template_file\n global report_expression_template_file\n global variable_name_in_template\n global variable_original_name_in_template\n global variable_default_value_in_template\n global test_path\n\n global api_url\n global api_1_0_url\n global bitbucket_repository_url\n global default_domain\n\n global sql_server\n global db_name\n\n global exec_server_address\n global exec_server_username\n global exec_server_password\n global exec_server_working_directory\n global robot_tests_directory\n global archive_output_directory\n global local_working_directory\n\n global cloudshell_server_address\n global cloudshell_server_port\n global cloudshell_server_username\n global cloudshell_server_password\n global cloudshell_server_domain\n global cloudshell_shared_robots_folder\n\n if 'template_test_file' in config_data_dict:\n template_test_file = config_data_dict['template_test_file']\n if 'test_interface_template' in config_data_dict:\n test_interface_template_file = config_data_dict['test_interface_template']\n if 'test_variable_template' in config_data_dict:\n test_variable_template_file = config_data_dict['test_variable_template']\n if 'report_expression_template' in config_data_dict:\n report_expression_template_file = config_data_dict['report_expression_template']\n if 'variable_name_in_template' in config_data_dict:\n variable_name_in_template = config_data_dict['variable_name_in_template']\n variable_original_name_in_template = variable_name_in_template + '_Original'\n if 'variable_default_value_in_template' in config_data_dict:\n variable_default_value_in_template = config_data_dict['variable_default_value_in_template']\n if 'test_path' in config_data_dict:\n test_path = config_data_dict['test_path']\n if not test_path.endswith('\\\\'):\n test_path += '\\\\'\n\n if 'api_url' in config_data_dict:\n api_url = config_data_dict['api_url']\n if 'api_1_0_url' in config_data_dict:\n api_1_0_url = config_data_dict['api_1_0_url']\n if 'bitbucket_repository_url' in config_data_dict:\n bitbucket_repository_url = config_data_dict['bitbucket_repository_url']\n if 'default_domain' in config_data_dict:\n default_domain = config_data_dict['default_domain']\n\n if 'sql_server' in config_data_dict:\n sql_server = config_data_dict['sql_server']\n if 'db_name' in config_data_dict:\n db_name = config_data_dict['db_name']\n\n if 'exec_server_address' in config_data_dict:\n exec_server_address = config_data_dict['exec_server_address']\n if 'exec_server_username' in config_data_dict:\n exec_server_username = config_data_dict['exec_server_username']\n if 'exec_server_password' in config_data_dict:\n exec_server_password = config_data_dict['exec_server_password']\n if 'exec_server_working_directory' in config_data_dict:\n exec_server_working_directory = config_data_dict['exec_server_working_directory']\n if 'robot_tests_directory' in config_data_dict:\n robot_tests_directory = config_data_dict['robot_tests_directory']\n if 'archive_output_directory' in config_data_dict:\n archive_output_directory = config_data_dict['archive_output_directory']\n if 'local_working_directory' in config_data_dict:\n local_working_directory = config_data_dict['local_working_directory']\n\n if 'cloudshell_server_address' in config_data_dict:\n cloudshell_server_address = config_data_dict['cloudshell_server_address']\n if 'cloudshell_server_port' in config_data_dict:\n cloudshell_server_port = config_data_dict['cloudshell_server_port']\n if 'cloudshell_server_username' in config_data_dict:\n cloudshell_server_username = config_data_dict['cloudshell_server_username']\n if 'cloudshell_server_password' in config_data_dict:\n cloudshell_server_password = config_data_dict['cloudshell_server_password']\n if 'cloudshell_server_domain' in config_data_dict:\n cloudshell_server_domain = config_data_dict['cloudshell_server_domain']\n if 'cloudshell_shared_robots_folder' in config_data_dict:\n cloudshell_shared_robots_folder = config_data_dict['cloudshell_shared_robots_folder']", "def setUp(self):\n\n commits = read_file('data/test_commits_data.json')\n self.file_gitignore = commits[0]['data']['files'][0]['file']\n self.file_tests = commits[1]['data']['files'][0]['file']\n self.file_bin = commits[2]['data']['files'][0]['file']\n self.file_perceval = commits[7]['data']['files'][0]['file']\n self.file_authors = commits[0]['data']['files'][1]['file']", "def test_no_yaml_key(tmp_path):\n ProjectMock(tmp_path).style(\n '''\n [[\".pre-commit-config.yaml\".repos]]\n missing_yaml_key = \"\"\"\n - repo: https://github.com/PyCQA/isort\n rev: 5.8.0\n hooks:\n - id: isort\n \"\"\"\n '''\n ).api_check_then_fix(\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 331,\n \" was not found. Create it with this content:\",\n \"\"\"\n repos: []\n \"\"\",\n )\n )", "def setUpConfig(self):\n pass", "def config_sanity_check(config: dict) -> dict:\n\n # back compatibility support\n config = parse_v011(config)\n\n # check model\n if config[\"train\"][\"method\"] == \"conditional\":\n if config[\"dataset\"][\"train\"][\"labeled\"] is False: # unlabeled\n raise ValueError(\n \"For conditional model, data have to be labeled, got unlabeled data.\"\n )\n\n return config", "def test_local_gitconfig_ignored_by_gitrepofixture(tmp_path):\n (tmp_path / \"HEAD\").write_text(\"ref: refs/heads/main\")\n\n with patch.dict(os.environ, {\"HOME\": str(tmp_path)}):\n # Note: once we decide to drop support for git < 2.28, the HEAD file\n # creation above can be removed, and setup can simplify to\n # check_call(\"git config --global init.defaultBranch main\".split())\n check_call(\"git config --global init.templateDir\".split() + [str(tmp_path)])\n root = tmp_path / \"repo\"\n root.mkdir()\n git_repo = GitRepoFixture.create_repository(root)\n assert git_repo.get_branch() == \"master\"", "def test_invalid_config(self):\n shutil.copy(self.beat_path + \"/tests/files/invalid.yml\",\n os.path.join(self.working_dir, \"invalid.yml\"))\n\n exit_code = self.run_beat(config=\"invalid.yml\")\n\n assert exit_code == 1\n assert self.log_contains(\"error loading config file\") is True", "def pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"serial: Tests that will not execute with more than 1 MPI process\")\n config.addinivalue_line(\"markers\",\n \"gpu: Tests that should only run on the gpu.\")\n config.addinivalue_line(\n \"markers\",\n \"cupy_optional: tests that should pass with and without CuPy.\")\n config.addinivalue_line(\"markers\", \"cpu: Tests that only run on the CPU.\")\n config.addinivalue_line(\"markers\", \"gpu: Tests that only run on the GPU.\")", "def test_and_swap(self, config):\n LOGGER.info('Attempting to apply new configuration')\n backup = self.backup_config()\n # We have backed up ALL config files (not just the ones we might\n # replace). If any error occurs from here out, we will need to restore\n # our config, so we will use exception handling.\n try:\n self.install_config(config)\n\n # We have now merged in our new configuration files, lets test this\n # config.\n if self.test_command(quiet=False):\n LOGGER.debug('Configuration good, reloading')\n self.reload_command()\n self.remove_config(backup)\n\n else:\n LOGGER.info('Configuration bad, restoring')\n self.restore_config(backup)\n\n except Exception:\n LOGGER.exception('Failure, restoring config', exc_info=True)\n self.restore_config(backup)", "def test_stable_config(tmp_path, config, defaultenv):\n\n # Set environment variables that some of the configs expect. Using a\n # complex ROLE_CLAIM_KEY to make sure quoting works.\n env = {\n **defaultenv,\n \"ROLE_CLAIM_KEY\": '.\"https://www.example.com/roles\"[0].value',\n \"POSTGREST_TEST_SOCKET\": \"/tmp/postgrest.sock\",\n \"POSTGREST_TEST_PORT\": \"80\",\n \"JWT_SECRET_FILE\": \"a_file\",\n }\n\n # Some configs expect input from stdin, at least on base64.\n stdin = b\"Y29ubmVjdGlvbl9zdHJpbmc=\"\n\n dumped = dumpconfig(config, env=env, stdin=stdin)\n\n tmpconfigpath = tmp_path / \"config\"\n tmpconfigpath.write_text(dumped)\n redumped = dumpconfig(tmpconfigpath, env=env)\n\n assert dumped == redumped", "def setUp(self):\n\n commits = read_file('data/test_commits_data.json')\n self.file_gitignore = commits[0]['data']['files'][0]['file']\n self.file_tests = commits[1]['data']['files'][0]['file']\n self.file_bin = commits[2]['data']['files'][0]['file']\n self.file_py = commits[4]['data']['files'][0]['file']\n self.file_authors = commits[0]['data']['files'][1]['file']", "def check_config( config: 'bittensor.Config' ):\n assert config.neuron.batch_size_train > 0, \"batch_size_train must be a positive value\"\n assert config.neuron.learning_rate > 0, \"learning_rate must be a positive value.\"\n bittensor.logging.check_config( config )\n bittensor.wallet.check_config( config )\n bittensor.subtensor.check_config( config )\n bittensor.metagraph.check_config( config )\n bittensor.dataloader.check_config( config )\n bittensor.dendrite.check_config( config )\n bittensor.axon.check_config( config )\n GPT2Nucleus.check_config( config )\n SGMOERouter.check_config( config )\n full_path = os.path.expanduser('{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name + \"-\" + config.wallet.hotkey, config.neuron.name ))\n config.neuron.full_path = os.path.expanduser(full_path)\n config.neuron.tensorboard_dir = config.neuron.full_path + '/tensorboard-' + '-'.join(str(datetime.now()).split())\n if not os.path.exists(config.neuron.full_path):\n os.makedirs(config.neuron.full_path)", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "def test_missing_different_values(tmp_path):\n ProjectMock(tmp_path).named_style(\n \"root\",\n '''\n [[\".pre-commit-config.yaml\".repos]]\n yaml = \"\"\"\n - repo: https://github.com/user/repo\n rev: 1.2.3\n hooks:\n - id: my-hook\n args: [--expected, arguments]\n \"\"\"\n ''',\n ).named_style(\n \"mypy\",\n '''\n # https://mypy.readthedocs.io/en/latest/config_file.html\n [\"setup.cfg\".mypy]\n ignore_missing_imports = true\n\n # Do not follow imports (except for ones found in typeshed)\n follow_imports = \"skip\"\n\n # Treat Optional per PEP 484\n strict_optional = true\n\n # Ensure all execution paths are returning\n warn_no_return = true\n\n # Lint-style cleanliness for typing\n warn_redundant_casts = true\n warn_unused_ignores = true\n\n [[\".pre-commit-config.yaml\".repos]]\n yaml = \"\"\"\n - repo: https://github.com/pre-commit/mirrors-mypy\n rev: v0.812\n hooks:\n - id: mypy\n \"\"\"\n ''',\n ).named_style(\n \"pre-commit/python\",\n '''\n [[\".pre-commit-config.yaml\".repos]]\n yaml = \"\"\"\n - repo: https://github.com/pre-commit/pygrep-hooks\n rev: v1.8.0\n hooks:\n - id: python-check-blanket-noqa\n - id: python-check-mock-methods\n - id: python-no-eval\n - id: python-no-log-warn\n - id: rst-backticks\n - repo: https://github.com/pre-commit/pre-commit-hooks\n rev: v4.0.1\n hooks:\n - id: debug-statements\n - repo: https://github.com/asottile/pyupgrade\n hooks:\n - id: pyupgrade\n \"\"\"\n ''',\n ).named_style(\n \"pre-commit/bash\",\n '''\n [[\".pre-commit-config.yaml\".repos]]\n yaml = \"\"\"\n - repo: https://github.com/openstack/bashate\n rev: 2.0.0\n hooks:\n - id: bashate\n \"\"\"\n ''',\n ).pyproject_toml(\n \"\"\"\n [tool.nitpick]\n style = [\"root\", \"mypy\", \"pre-commit/python\", \"pre-commit/bash\"]\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: https://github.com/pre-commit/pygrep-hooks\n rev: v1.1.0\n hooks:\n - id: python-check-blanket-noqa\n - id: missing-hook-in-this-position\n - id: python-no-eval\n - id: python-no-log-warn\n - id: rst-backticks\n - repo: https://github.com/pre-commit/pre-commit-hooks\n rev: v4.0.1\n hooks:\n - id: debug-statements\n - repo: https://github.com/asottile/pyupgrade\n rev: v2.16.0\n hooks:\n - id: pyupgrade\n - repo: https://github.com/openstack/bashate\n rev: 0.5.0\n hooks:\n - id: extra-hook-before-should-be-ignored\n - id: bashate\n args: [extra, arguments, should, --not, --throw, errors]\n - id: extra-hook-after-should-be-ignored\n - repo: https://github.com/user/repo\n rev: 1.2.3\n hooks:\n - id: my-hook\n args: [--different, args, --should, throw, errors]\n \"\"\"\n ).api_check_then_fix(\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 332,\n \": hook 'mypy' not found. Use this:\",\n f\"\"\"\n {NBSP * 2}- repo: https://github.com/pre-commit/mirrors-mypy\n rev: v0.812\n hooks:\n - id: mypy\n \"\"\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 332,\n \": hook 'python-check-mock-methods' not found. Use this:\",\n f\"\"\"\n {NBSP * 2}- repo: https://github.com/pre-commit/pygrep-hooks\n rev: v1.8.0\n hooks:\n - id: python-check-mock-methods\n \"\"\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \": hook 'bashate' (rev: 0.5.0) has different values. Use this:\",\n \"rev: 2.0.0\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \": hook 'python-check-blanket-noqa' (rev: v1.1.0) has different values. Use this:\",\n \"rev: v1.8.0\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \": hook 'python-no-eval' (rev: v1.1.0) has different values. Use this:\",\n \"rev: v1.8.0\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \": hook 'python-no-log-warn' (rev: v1.1.0) has different values. Use this:\",\n \"rev: v1.8.0\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \": hook 'my-hook' (rev: 1.2.3) has different values. Use this:\",\n \"\"\"\n args:\n - --expected\n - arguments\n \"\"\",\n ),\n Fuss(\n False,\n PRE_COMMIT_CONFIG_YAML,\n 339,\n \": hook 'rst-backticks' (rev: v1.1.0) has different values. Use this:\",\n \"rev: v1.8.0\",\n ),\n partial_names=[PRE_COMMIT_CONFIG_YAML],\n )", "def _check_config(self):", "def test_minimal_configuration(self):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'minimal-cfg-file.ini'))\n self.cfg = configure(args)\n cfg = ElasticBlastConfig(self.cfg, task = ElbCommand.SUBMIT)\n\n self.assertTrue(cfg.blast.db_source)\n self.assertEqual(cfg.blast.db_source, DBSource.GCP)\n\n self.assertTrue(cfg.blast.batch_len)\n self.assertEqual(cfg.blast.batch_len, 10000)\n\n self.assertTrue(cfg.blast.mem_request)\n self.assertEqual(cfg.blast.mem_request, '0.5G')\n\n self.assertTrue(cfg.blast.mem_limit)\n expected_mem_limit = f'{get_machine_properties(cfg.cluster.machine_type).memory - SYSTEM_MEMORY_RESERVE}G'\n self.assertEqual(cfg.blast.mem_limit, expected_mem_limit)\n\n self.assertTrue(cfg.timeouts.init_pv > 0)\n self.assertTrue(cfg.timeouts.blast_k8s > 0)\n\n ElasticBlastConfig(self.cfg, task = ElbCommand.SUBMIT)", "def read_options(config, cover = False):\n\n root = config.root\n models_root = root + \"models/\"\n\n jsonfiles = \"\"\n for file in os.listdir(models_root):\n if file.split(\".\")[-1] == 'json' and file[:6] == \"config\":\n jsonfiles = file\n\n # print(jsonfiles)\n with open(models_root + jsonfiles, \"r\") as f:\n pre_config = json.loads(f.read())\n\n named = jsonfiles.split(\".\")[0]\n assert pre_config[\"model\"] == named.split(\"_\")[1], \"Wrong Models\"\n assert pre_config[\"dataset\"] == named.split(\"_\")[2], \"Wrong Dataset\"\n\n dict_config = dict(config._get_kwargs())\n for key, val in pre_config.items():\n if not cover and key in dict_config.keys():\n if dict_config[key] != val:\n print(\"Possible Conflict Keys: {}: {} {}\".format(key, dict_config[key], val))\n continue\n setattr(config, key, val)\n\n return config", "def test_valid_hook_with_config_file(self):\n with mock.patch(\n 'detect_secrets_server.core.usage.common.output.ALL_HOOKS',\n [\n HookDescriptor(\n display_name='config_needed',\n module_name='will_be_mocked',\n class_name='ConfigFileRequiredHook',\n config_setting=HookDescriptor.CONFIG_REQUIRED,\n ),\n ],\n ), mock.patch(\n 'detect_secrets_server.core.usage.common.output.import_module',\n return_value=Module(\n ConfigFileRequiredHook=ConfigFileRequiredMockClass,\n ),\n ):\n args = self.parse_args(\n 'scan '\n '--output-hook config_needed '\n '--output-config examples/pysensu.config.yaml '\n 'examples '\n )\n\n with open('examples/pysensu.config.yaml') as f:\n content = f.read()\n\n assert args.output_hook.config == content", "def test_missing_repo_key(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n grepo = \"glocal\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - hooks:\n - id: whatever\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 332, \": style file is missing 'repo' key in repo #0\")\n )", "def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"format_sql: mark format_sql tests.\")", "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def prepare(self, config, **kwargs):\n pass", "def test_config_ok_config(self):\n test_data = (\"[gnupg]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"\\n\"\n \"[data]\\n\"\n \"\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\"\n \"\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data)\n config = Config(\"test_config.conf\")\n self.assertIn(\"gnupg\", config.config.sections())\n self.assertIn(\"amazon-s3\", config.config.sections())\n self.assertEqual(config.config.get(\n \"gnupg\", \"recipients\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"gnupg\", \"signer\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"access_key\"), \"ACCESSKEY\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"secret_access_key\"), \"SECRETACCESSKEY\")\n self.assertEqual(config.config.get(\n \"data\", \"bucket\"), \"DATABUCKET\")\n self.assertEqual(config.config.get(\n \"metadata\", \"bucket\"), \"METADATABUCKET\")\n os.remove(\"test_config.conf\")", "def test_configuration_map(self):\n config = load_configuration(package_dir=self.dir, gitconfig_file=self.gitconfig)\n mapping = config.configuration_map()\n self.failUnless('cirrus' in mapping)\n self.failUnless('credentials' in mapping['cirrus'])\n self.failUnless('configuration' in mapping['cirrus'])\n self.failUnless('github_credentials' in mapping['cirrus']['credentials'])\n self.assertEqual(\n mapping['cirrus']['credentials']['github_credentials'],\n {'github_user': None, 'github_token': None}\n )\n self.assertEqual(\n mapping['cirrus']['configuration']['package']['name'], 'cirrus_tests'\n )", "def test_using_cfg_config(line_sorted_checker, capsys):\n want = \"\"\"\nphmdoctest- project.md => .gendir-suite-cfg/test_project.py\nphmdoctest- doc/directive1.md => .gendir-suite-cfg/test_doc__directive1.py\nphmdoctest- doc/directive2.md => .gendir-suite-cfg/test_doc__directive2.py\nphmdoctest- doc/directive3.md => .gendir-suite-cfg/test_doc__directive3.py\nphmdoctest- doc/example1.md => .gendir-suite-cfg/test_doc__example1.py\nphmdoctest- doc/example2.md => .gendir-suite-cfg/test_doc__example2.py\nphmdoctest- doc/inline_example.md => .gendir-suite-cfg/test_doc__inline_example.py\nphmdoctest- tests/managenamespace.md => .gendir-suite-cfg/test_tests__managenamespace.py\nphmdoctest- tests/one_code_block.md => .gendir-suite-cfg/test_tests__one_code_block.py\nphmdoctest- tests/output_has_blank_lines.md => .gendir-suite-cfg/test_tests__output_has_blank_lines.py\nphmdoctest- tests/setup_only.md => .gendir-suite-cfg/test_tests__setup_only.py\nphmdoctest- tests/twentysix_session_blocks.md => .gendir-suite-cfg/test_tests__twentysix_session_blocks.py\nphmdoctest- tests/generate.cfg generated 12 pytest files\n\"\"\"\n phmdoctest.main.generate_using(config_file=Path(\"tests/generate.cfg\"))\n drop_newline = want.lstrip()\n line_sorted_checker(drop_newline, capsys.readouterr().out)", "def _run(args, base_dir, workflows_dir, config_path):\n if not os.path.exists(config_path):\n sys.stdout.write(\n f\"The config file: {config_path} does not exist.\\nProvide a path to the config file with \"\n f\"--configfile or if you do not have a config file run:\\n\"\n f\"seq2science init {args.workflow}\\n\"\n )\n os._exit(1) # noqa\n\n # parse the args\n parsed_args = {\n \"snakefile\": os.path.join(workflows_dir, args.workflow.replace(\"-\", \"_\"), \"Snakefile\"),\n \"use_conda\": True,\n \"conda_cleanup_pkgs\": \"cache\",\n \"conda_frontend\": \"mamba\",\n \"conda_prefix\": os.path.join(base_dir, \".snakemake\"),\n \"dryrun\": args.dryrun,\n \"printreason\": args.reason,\n \"keepgoing\": args.keep_going,\n \"unlock\": args.unlock,\n \"cleanup_metadata\": args.cleanup_metadata,\n \"force_incomplete\": args.rerun_incomplete,\n \"rerun_triggers\": [\"mtime\", \"input\", \"software-env\"] if not args.skip_rerun else [],\n }\n\n # get the additional snakemake options\n snakemake_options = args.snakemakeOptions if args.snakemakeOptions is not None else dict()\n snakemake_options.setdefault(\"config\", {}).update({\"rule_dir\": os.path.join(base_dir, \"rules\")})\n snakemake_options = snakemake_options | {\"scheduler\": \"greedy\"}\n snakemake_options[\"configfiles\"] = [config_path]\n for key, value in snakemake_options.items():\n if not isinstance(value, str):\n continue\n if value.lower() == \"true\":\n snakemake_options[key] = True\n if value.lower() == \"false\":\n snakemake_options[key] = False\n\n parsed_args.update(snakemake_options)\n\n # parse the profile\n if args.profile is not None:\n profile_file = snakemake.get_profile_file(args.profile, \"config.yaml\")\n if profile_file is None:\n subjectively_prettier_error(profile_arg, \"profile given but no config.yaml found.\")\n add_profile_args(profile_file, parsed_args)\n\n # cores\n if args.cores: # command-line interface\n parsed_args[\"cores\"] = args.cores\n elif parsed_args.get(\"cores\"): # profile\n parsed_args[\"cores\"] = int(parsed_args[\"cores\"])\n elif parsed_args[\"dryrun\"]:\n parsed_args[\"cores\"] = 999\n else:\n parsed_args[\"cores\"] = 0\n\n if parsed_args[\"cores\"] < 2 and not any(\n [parsed_args[\"unlock\"], parsed_args[\"cleanup_metadata\"], parsed_args[\"dryrun\"]]\n ):\n subjectively_prettier_error(core_arg, \"specify at least two cores.\")\n\n # when running on a cluster assume cores == nodes (just like snakemake does)\n if \"cluster\" in parsed_args and not \"nodes\" in parsed_args:\n parsed_args[\"nodes\"] = parsed_args[\"cores\"]\n\n # store how seq2science was called\n parsed_args[\"config\"][\"cli_call\"] = sys.argv\n\n parsed_args[\"config\"].update({\"cores\": parsed_args[\"cores\"]})\n resource_parser(parsed_args)\n\n # run snakemake/seq2science\n # 1. pretty welcome message\n setup_seq2science_logger(parsed_args, args.debug)\n log_welcome(logger, args.workflow)\n\n if args.debug:\n # dump the parsed args as readable json\n import json\n logger.debug(json.dumps(parsed_args, sort_keys=True, indent=2))\n\n if not args.skip_rerun or args.unlock or args.cleanup_metadata is not None:\n # 2. start a dryrun checking which files need to be created, and check if\n # any params changed, which means we have to remove those files and\n # continue from there\n logger.info(\n \"Checking if seq2science was run already, if something in the configuration was changed, and if so, if \"\n \"seq2science needs to re-run any jobs.\"\n )\n\n with seq2science.util.CaptureStdout() as targets, seq2science.util.CaptureStderr() as errors:\n exit_code = run_snakemake(\n args.workflow.replace(\"-\", \"_\"),\n **{\n **parsed_args,\n **{\n \"list_params_changes\": True,\n \"quiet\": False,\n \"log_handler\": lambda x: None, # don't show any of the logs\n \"keep_logger\": True,\n },\n }\n )\n if args.debug:\n nl = \"\\n\"\n logger.info(f\"\"\"Targets:\\n{nl.join(sorted(targets))}\\n\\n\"\"\")\n logger.info(f\"\"\"Errors:\\n{nl.join(sorted(errors))}\\n\\n\"\"\")\n\n if not exit_code:\n os._exit(1) # noqa\n\n # 3. check which files would need a rerun, and exclude files we do\n # not want to consider:\n # - genome files, since provider will change to local\n regex_patterns = [\n \"(\\/.+){2}.*\\.(fa(\\.fai|.sizes)?|gaps\\.bed)$\", # match genome files\n \"(\\/.+){2}.*\\.annotation\\.(bed|gtf)$\", # match gene annotations\n ]\n targets = [target for target in targets if not any(re.match(pattern, target) for pattern in regex_patterns)]\n\n # 4. if there are any targets left, force to recreate those targets plus the final results (rule seq2science)\n if len(targets):\n targets += [\"seq2science\"]\n parsed_args[\"forcerun\"] = targets\n parsed_args[\"targets\"] = targets\n parsed_args[\"forcetargets\"] = True\n parsed_args[\"keep_logger\"] = True\n logger.info(\"Done. Now starting the real run.\")\n\n logger.printreason = parsed_args[\"printreason\"]\n logger.stream_handler.setStream(sys.stdout)\n parsed_args[\"config\"][\"no_config_log\"] = True\n\n # 5. start the \"real\" run where jobs actually get started\n exit_code = run_snakemake(args.workflow.replace(\"-\", \"_\"), **parsed_args)\n\n # 6. output exit code 0 for success and 1 for failure\n os._exit(0) if exit_code else os._exit(1) # noqa", "def _validate_main_config(self):\n # check for required top-level parameters in main config\n required_params = {\"name\": str, \"version\": str, \"datasets\": list}\n\n for param, expected_type in required_params.items():\n if param not in self.config:\n msg = (\n \"[ERROR] Config error: missing required configuration parameter in {}: '{}'\"\n )\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param))\n elif not isinstance(self.config[param], expected_type):\n msg = \"[ERROR] Config error: parameter is of unexpected type {}: '{}' (expected: '{}')\"\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param, expected_type))", "def handle_config(self, config, cfile=_vys_cfile_prod, segments=None):\n\n summarize(config)\n\n if search_config(config, preffile=self.preffile, inprefs=self.inprefs,\n nameincludes=self.nameincludes,\n searchintents=self.searchintents):\n\n # starting config of an OTF row will trigger subscan logic\n if config.otf:\n logger.info(\"Good OTF config: calling handle_subscan\")\n self.handle_subscan(config, cfile=cfile)\n else:\n logger.info(\"Good Non-OTF config: setting state and starting pipeline\")\n # for standard pointed mode, just set state and start pipeline\n self.set_state(config.scanId, config=config,\n inmeta={'datasource': 'vys'})\n\n self.start_pipeline(config.scanId, cfile=cfile,\n segments=segments)\n\n else:\n logger.info(\"Config not suitable for realfast. Skipping.\")\n\n self.cleanup()", "def test_config_class():\n assert config is not None", "def clean_config(self, config):\n return config", "def verify_config_params(attack_config):\n _check_config(attack_config, _VALID_CONFIG_CHECKLIST)", "def test_define():\n client = TestClient()\n client.run(\"config set general.fakeos=Linux\")\n conf_file = load(client.cache.conan_conf_path)\n assert \"fakeos = Linux\" in conf_file\n\n client.run('config set general.compiler=\"Other compiler\"')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler = Other compiler' in conf_file\n\n client.run('config set general.compiler.version=123.4.5')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler.version = 123.4.5' in conf_file\n assert \"14\" not in conf_file\n\n client.run('config set general.new_setting=mysetting')\n conf_file = load(client.cache.conan_conf_path)\n assert 'new_setting = mysetting' in conf_file\n\n client.run('config set proxies.https=myurl')\n conf_file = load(client.cache.conan_conf_path)\n assert \"https = myurl\" in conf_file.splitlines()", "def config(ctx):\n return", "def test_config_repository(self):\n self._ucr({\n 'repository/online': 'no',\n 'repository/online/server': 'example.net',\n 'repository/online/port': '1234',\n 'repository/online/prefix': 'prefix',\n 'repository/online/sources': 'yes',\n 'repository/online/httpmethod': 'POST',\n })\n self.u.config_repository()\n self.assertFalse(self.u.online_repository)\n self.assertEqual(self.u.repository_server, 'example.net')\n self.assertEqual(self.u.repository_port, '1234')\n self.assertEqual(self.u.repository_prefix, 'prefix')\n self.assertTrue(self.u.sources)\n self.assertEqual(U.UCSHttpServer.http_method, 'POST')", "def test_everything():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/config.toml\",\n \"ENV\", # Temporarily enabled, needs seperate optional dotenv test\n ]\n )\n\n assert \"root_url\" in str(c._crve_configs)\n assert c.root_url == \"test url\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n )\n assert c.defaults_toml() == default_toml", "def test_bad_config_recovery(mock_empty_os_environ):\n\n def check(d):\n if d and \"wrong\" in d:\n raise KeyError(\"Invalid config\")\n return d\n\n climate = core.Climate(prefix=\"this\", settings_file_suffix=\"suffix\", parser=check)\n assert dict(climate.settings) == {}\n\n # Try to set incorrect config\n with pytest.raises(KeyError):\n climate.update({\"wrong\": 2})\n assert dict(climate.settings) == {}, \"Setting should not have been updated\"\n assert climate._updates == [], \"No external data should have been set.\"\n\n # Updating with other fields will still trigger the error\n climate.update({\"right\": 2})\n assert dict(climate.settings) == {\"right\": 2}\n assert climate._updates == [{\"right\": 2}], \"External data should have been set.\"", "def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config", "def test_custom_config(cli, build_resources):\n books, _ = build_resources\n config = books.joinpath(\"config\")\n result = cli.invoke(commands.build, [config.as_posix(), \"-n\", \"-W\", \"--keep-going\"])\n assert result.exit_code == 0, result.output\n html = config.joinpath(\"_build\", \"html\", \"index.html\").read_text(encoding=\"utf8\")\n soup = BeautifulSoup(html, \"html.parser\")\n assert '<p class=\"title logo__title\">TEST PROJECT NAME</p>' in html\n assert '<div class=\"sphinx-tabs docutils container\">' in html\n assert '<link rel=\"stylesheet\" type=\"text/css\" href=\"_static/mycss.css\" />' in html\n assert '<script src=\"_static/js/myjs.js\"></script>' in html\n\n # Check that our comments engines were correctly added\n assert soup.find(\"script\", attrs={\"kind\": \"hypothesis\"})\n assert soup.find(\"script\", attrs={\"kind\": \"utterances\"})", "def test_load_config_climate_url(self, config, config_dict, infile_dict, monkeypatch):\n test_url = 'https://example.com/climateData/bulkdata_e.html'\n monkeypatch.setitem(config_dict['climate'], 'url', test_url)\n config._read_yaml_file = Mock(return_value=config_dict)\n config._read_SOG_infile = Mock(return_value=infile_dict)\n config.load_config('config_file')\n assert config.climate.url == test_url", "def test_default_config():\n clean_tables() \n config = set_configuration() \n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n \n insert_into_reading()\n row_count = get_count() \n min_id, max_id = min_max_id() \n update_last_object(min_id=min_id, max_id=max_id)\n total_purged, unsent_purged = purge(config, _READING_TABLE)\n\n log = get_log() \n\n assert total_purged == 0\n assert total_purged == log['rowsRemoved']\n assert unsent_purged == 0 \n assert unsent_purged == log['unsentRowsRemoved'] \n assert log['failedRemovals'] == 0 \n assert log['rowsRemaining'] == row_count - total_purged \n clean_tables()", "def test_validate_queries_config():\n cfg = configparser.ConfigParser()\n _set_sections(cfg)\n\n # set up test config\n cfg[CFG_CLOUD_PROVIDER] = {\n CFG_CP_AWS_REGION: 'us-east-1',\n CFG_CP_AWS_SUBNET: 'subnet-2345145',\n CFG_CP_AWS_KEY_PAIR: 'foo',\n CFG_CP_AWS_SECURITY_GROUP: 'sg-2345145'\n }\n # pacify submit config checks\n cfg[CFG_BLAST][CFG_BLAST_RESULTS] = 's3://bucket'\n cfg[CFG_BLAST][CFG_BLAST_DB] = 'nt'\n cfg[CFG_BLAST][CFG_BLAST_PROGRAM] = 'blastn'\n cfg[CFG_CLUSTER][CFG_CLUSTER_MACHINE_TYPE] = ELB_DFLT_AWS_MACHINE_TYPE\n\n # test correct queries\n # S3 bucket\n cfg[CFG_BLAST][CFG_BLAST_QUERY] = 's3://bucket-123/@#$*/queris!.fa'\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n\n # GS bucket\n cfg[CFG_BLAST][CFG_BLAST_QUERY] = 'gs://bucket-123/@^*?/[email protected]'\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n\n # local file\n cfg[CFG_BLAST][CFG_BLAST_QUERY] = 'queries'\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n\n # test illigal characters in bucket name\n cfg[CFG_BLAST][CFG_BLAST_QUERY] = 's3://bucket!-123/@#$*/queris!.fa'\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n assert 'Incorrect queries' in err.value.message\n\n cfg[CFG_BLAST][CFG_BLAST_QUERY] = 'gs://bucket@-123/@#$*/queris!.fa'\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n assert 'Incorrect queries' in err.value.message", "def test_custom_configuration_updated_correctly(self):\n result = self.run_cli_command(\n \"--skip-consistency-check\",\n \"config\",\n \"get\",\n \"vendor.fetchai.skills.error.is_abstract\",\n cwd=self._get_cwd(),\n )\n assert result.stdout == \"True\\n\"", "def test_config_bad_filename(self):\n args = mock.Mock()\n args.debug = None\n args.generateconfig = None\n args.config = 'jkdhfdskjfhdsfkjhdsfdskjhf.jdkhfkfjh'\n expected_text = '%% Unable to open configuration file jkdhfdskjfhdsfkjhdsfdskjhf.jdkhfkfjh\\n'\n with mock.patch('sys.stdout', new=StringIO()) as fake_out:\n execute_tool(args)\n self.assertEqual(fake_out.getvalue(), expected_text)", "def load_settings_from_file(self, cfg_file):\n \n #\n #\n # TODO\n # Missing settings should not cause exceptions\n #\n #\n #\n\n if not os.path.exists(cfg_file): \n raise Exception('Provided config file [%s] does not exist or cannot be read.' % cfg_file)\n\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.read(cfg_file)\n \n \n self.reference_root = config.get('Paths','reference-root')\n \n self.scratch_root = os.getcwd()\n try:\n self.scratch_root = config.get('Paths','scratch-root')\n except ConfigParser.NoOptionError:\n self.logger.info('Scratch-root setting is missing. Using current directory: %s' % self.scratch_root)\n\n\n if (self.run_folder != None):\n self.run_id = os.path.basename(self.run_folder)\n else:\n raise Exception('Set runfolder with PipelineConfig.set_runfolder() before loading settings')\n \n \n #\n # TODO\n # needs to be updated on update of settings\n #\n self.runs_scratch_dir = os.path.join(self.scratch_root, self.run_id) if self.run_folder != None else self.scratch_root\n self.logger.info('Run\\'s scratch directory: %s' % self.runs_scratch_dir)\n \n # optional results and fastq archive dirs \n self.results_archive = None\n try:\n self.results_archive = config.get('Paths','results-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No results-archive provided. Results will not be archived outside of the run\\'s scratch directory.')\n \n self.fastq_archive = None\n try:\n self.fastq_archive = config.get('Paths','fastq-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No fastq-archive provided. Fastq files will not be archived outside of the run\\'s scratch directory.')\n \n \n # optional /tmp dir\n self.tmp_dir = '/tmp'\n try:\n self.tmp_dir = config.get('Paths','tmp-dir')\n except ConfigParser.NoOptionError:\n self.logger.info('No tmp-dir provided. /tmp will be used.')\n \n \n \n \n # reference files\n self.reference = os.path.join(self.reference_root, config.get('Resources','reference-genome'))\n self.capture = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed'))\n self.capture_qualimap = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed-for-qualimap'))\n self.capture_plus = os.path.join(self.reference_root, config.get('Resources', 'capture-plus-regions-bed'))\n self.gene_coordinates = os.path.join(self.reference_root, config.get('Resources', 'gene-coordinates'))\n \n self.adapters = os.path.join(self.reference_root, config.get('Resources', 'adapters-fasta'))\n \n # tools\n self.bcl2fastq = config.get('Tools','bcl2fastq')\n self.trimmomatic = config.get('Tools','trimmomatic') \n self.bwa = config.get('Tools','bwa')\n self.samtools = config.get('Tools','samtools')\n self.picard = config.get('Tools','picard')\n self.gatk = config.get('Tools','gatk')\n self.freebayes = config.get('Tools','freebayes')\n self.bcftools = config.get('Tools','bcftools')\n self.qualimap = config.get('Tools','qualimap')\n \tself.fastqc\t = config.get('Tools','fastqc')\n\n\n # annovar settings\n self.convert_to_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','convert_to_annovar'))\n self.annovar_annotate = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_annotate'))\n self.table_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','table_annovar'))\n self.annovar_human_db = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_human_db'))\n self.annovar_1000genomes_eur = config.get('Annovar','annovar_1000genomes_eur')\n self.annovar_1000genomes_eur_maf_cutoff = config.get('Annovar','annovar_1000genomes_eur_maf_cutoff')\n self.annovar_inhouse_dbs = config.get('Annovar','annovar_inhouse_dbs')\n self.omim_gene_phenotype_map_file = config.get('Annovar','omim_gene_phenotype_map_file')", "def pytest_configure(config):\n set_default_log_formatter(config, \"%(message)s\")", "def test_info(config):\n conventional_commits = ConventionalCommitsCz(config)\n info = conventional_commits.info()\n assert isinstance(info, str)", "def validate_datalad_config(store, dataset):\n dataset_path = store.get_dataset_path(dataset)\n try:\n git_show(dataset_path, 'HEAD', '.datalad/config')\n except KeyError:\n create_datalad_config(dataset_path)\n commit_files(store, dataset, ['.datalad/config'])" ]
[ "0.6037803", "0.60280555", "0.59878725", "0.5981646", "0.5943651", "0.56515247", "0.5617633", "0.5590721", "0.5510498", "0.54906505", "0.54795974", "0.54218674", "0.5404527", "0.5396753", "0.53920645", "0.5391988", "0.53679043", "0.5367287", "0.53581583", "0.53472453", "0.53248346", "0.5311092", "0.52745867", "0.52692044", "0.5262556", "0.525614", "0.5255509", "0.5255297", "0.5253333", "0.5247692", "0.5230234", "0.52158123", "0.52151614", "0.521066", "0.5202708", "0.5195734", "0.5192824", "0.5189312", "0.518664", "0.5169479", "0.51673394", "0.51625794", "0.514581", "0.514581", "0.5138864", "0.5134521", "0.51304865", "0.513011", "0.51108235", "0.51041377", "0.5094551", "0.5093974", "0.50911164", "0.5090131", "0.50804746", "0.5072921", "0.50695276", "0.5065493", "0.50606376", "0.5059668", "0.5054177", "0.5048035", "0.5041545", "0.5040776", "0.50362223", "0.50354373", "0.503436", "0.5033831", "0.50303614", "0.5027528", "0.5025166", "0.502356", "0.5019436", "0.50188196", "0.5017973", "0.50100076", "0.50093853", "0.499926", "0.4991898", "0.4980962", "0.49681023", "0.49646506", "0.49564698", "0.49541077", "0.49461308", "0.4939846", "0.49386603", "0.4938037", "0.49364915", "0.49302065", "0.4927417", "0.49218202", "0.49181226", "0.49167693", "0.49093416", "0.4908801", "0.49086973", "0.49081403", "0.49056318", "0.49047092" ]
0.5162899
41
Return the contents of the "output" div on the page. The fixtures are configured to update this div when the user interacts with the page.
def output(self): text_list = self.q(css='#output').text if len(text_list) < 1: return None return text_list[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def output(self):\n\t\tif (self.isLoaded()):\n\t\t\treturn self.loader.output()", "def update_output_div(input_value):\n file = str(input_value).split(\"C:\\\\fakepath\\\\\")[-1]\n trial = pd.read_csv(file)\n trial[\"spans\"] = trial.spans.apply(literal_eval)\n _html = [html_to_dash(display_toxics(trial.spans[index], trial.text[index])) \n for index in range(0, trial.shape[0])]\n return html.P(_html)", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def get_html(self):\r\n if self.debug == 'True':\r\n # Reset the user vote, for debugging only!\r\n self.user_voted = False\r\n if self.hints == {}:\r\n # Force self.hints to be written into the database. (When an xmodule is initialized,\r\n # fields are not added to the db until explicitly changed at least once.)\r\n self.hints = {}\r\n\r\n try:\r\n child = self.get_display_items()[0]\r\n out = child.render('student_view').content\r\n # The event listener uses the ajax url to find the child.\r\n child_id = child.id\r\n except IndexError:\r\n out = u\"Error in loading crowdsourced hinter - can't find child problem.\"\r\n child_id = ''\r\n\r\n # Wrap the module in a <section>. This lets us pass data attributes to the javascript.\r\n out += u'<section class=\"crowdsource-wrapper\" data-url=\"{ajax_url}\" data-child-id=\"{child_id}\"> </section>'.format(\r\n ajax_url=self.runtime.ajax_url,\r\n child_id=child_id\r\n )\r\n\r\n return out", "def result(self):\n return self.tmpl_out(\"result.html\",\n height=image(self.work_dir\n + 'output.png').size[1])", "def return_output(self):\n return self.output", "def output_test():\n\toutput_comparison_page(TEST_EVENT_LIST, TEST_COMPARISON_PAGE_FILEPATH)", "def end_rendering(self, output):\n if self.wrapper_to_generate:\n output = self.div(output, id=self.id, class_='nagare-generated nagare-async-view')\n\n return output", "def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()", "def test_html_output(self):\n pass", "def get_output(self):\n\n if self.current_pos < len(self.html_doc):\n self.data_buffer += self.html_doc[self.current_pos:]\n self.current_pos = len(self.html_doc)\n\n return self.data_buffer", "def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def _populate_output(self):\n pass", "def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content", "def run(self) -> None:\n self._render()\n print(self.sio.getvalue())", "def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))", "def get_html(self):\r\n params = {\r\n 'element_id': self.location.html_id(),\r\n 'element_class': self.location.category,\r\n 'ajax_url': self.system.ajax_url,\r\n 'configuration_json': self.dump_poll(),\r\n }\r\n self.content = self.system.render_template('poll.html', params)\r\n return self.content", "def get_text(self):\n return self.output.getvalue()", "def to_html(self):\n return clientCode.get_page_for_export(self._commands)", "def output(self):\n # self.introductions()\n return self.listen()", "def output(self):\r\n return self._output", "def output(self):\r\n return self.result", "def showcontents(self):\n # See ToolTip for an example\n raise NotImplementedError", "def fixture_output_block():\n return Mock()", "def output(self):\r\n self.logic ( )\r\n return self.output", "def get_output(self):\n return self.output", "def get_output(self):\n return self.output", "def output(self):\n return self.__output", "def content(self, **args):\n return self.pageConfig['content'] % self.pageConfig", "def test_output_html(tmp_path):\n\n # This will be the file to\n temporary_file = os.path.join(tmp_path, 'test-cluster.html')\n\n # Run clustering on small dummy data (see test_clustering.py)\n cluster = TextClustering(embedding_random_state=42,\n reducer_random_state=43,\n clustering_random_state=44)\n\n X = ['Wellcome Trust',\n 'The Wellcome Trust',\n 'Sir Henry Wellcome',\n 'Francis Crick',\n 'Crick Institute',\n 'Francis Harry Crick']\n\n cluster.fit(X)\n\n # Run the visualisation function with output_file=temporary_file\n visualize_clusters(clustering=cluster, output_file_path=temporary_file, radius=0.01,\n alpha=0.5, output_in_notebook=False)\n\n # Assert that the html was generated correctly\n assert os.path.exists(temporary_file)", "def tests():\n\n\treturn render_template(\"testing.html\")", "def viewtank_output() -> str:\r\n details_output = []\r\n #Catches tank user has entered on UI\r\n user_tank = request.args.get(\"tank_ask\")\r\n volume_string = (\"The volume of the tank is: \" +\r\n str(brewer_tanks[str(user_tank)][\"Volume\"]))\r\n details_output.append(volume_string)\r\n capability_string = \"The capabilities of this string are: \"\r\n for capability in brewer_tanks[str(user_tank)][\"Capabilities\"]:\r\n capability_string += capability + \" \"\r\n details_output.append(capability_string)\r\n content_string = (\"In the tank the batch currently being stored is: \" +\r\n str(brewer_tanks[str(user_tank)][\"Batch_Content\"]))\r\n details_output.append(content_string)\r\n activity_string = (\"In the tank the batch is currently being: \" +\r\n str(brewer_tanks[str(user_tank)][\"Activity_Status\"]))\r\n details_output.append(activity_string)\r\n return render_template(\"viewtank_output.html\",\r\n output_details=details_output)", "def output(self):\n return self._output", "def output(self):\n return self._output", "def output(self):\n return self._output", "def showTestOutput(self, testId):\n if testId:\n label_text = testId\n result_text, result_errors = self.result.getOutput(testId)\n display_text = '%s\\n%s\\n%s' % (result_errors, '-' * 80, result_text)\n else:\n label_text = ''\n display_text = ''\n self.output_text.configure(label_text=label_text)\n self.output_text.settext(display_text)\n return", "def content(self):\n return self.template.render(weblogsnippet=self.weblogsnippet, pathto=pathto)", "def output_data(self):\n pass", "def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html", "def get_html(self):\r\n goal_level = '{0}-{1}'.format(\r\n self.required_level,\r\n self.required_sublevel)\r\n\r\n showbasic = (self.show_basic_score.lower() == \"true\")\r\n showleader = (self.show_leaderboard.lower() == \"true\")\r\n\r\n context = {\r\n 'due': self.due,\r\n 'success': self.is_complete(),\r\n 'goal_level': goal_level,\r\n 'completed': self.completed_puzzles(),\r\n 'top_scores': self.puzzle_leaders(),\r\n 'show_basic': showbasic,\r\n 'show_leader': showleader,\r\n 'folditbasic': self.get_basicpuzzles_html(),\r\n 'folditchallenge': self.get_challenge_html()\r\n }\r\n\r\n return self.system.render_template('foldit.html', context)", "def printContent(self):\n if self.content != \"Failed to find HTML template.\":\n print self.content", "def output(self):\n return self._parser.result", "def show(self):\n import IPython\n if self._output is None:\n self.render()\n IPython.display.display(self._output, display_id=str(id(self)))", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def get_html(self, system):\r\n _ = self.system.service(self, \"i18n\").ugettext\r\n # set context variables and render template\r\n eta_string = None\r\n if self.child_state != self.INITIAL:\r\n post_assessment = self.latest_post_assessment(system)\r\n score = self.latest_score()\r\n correct = 'correct' if self.is_submission_correct(score) else 'incorrect'\r\n if self.child_state == self.ASSESSING:\r\n # Translators: this string appears once an openended response\r\n # is submitted but before it has been graded\r\n eta_string = _(\"Your response has been submitted. Please check back later for your grade.\")\r\n else:\r\n post_assessment = \"\"\r\n correct = \"\"\r\n previous_answer = self.get_display_answer()\r\n\r\n # Use the module name as a unique id to pass to the template.\r\n try:\r\n module_id = self.system.location.name\r\n except AttributeError:\r\n # In cases where we don't have a system or a location, use a fallback.\r\n module_id = \"open_ended\"\r\n\r\n context = {\r\n 'prompt': self.child_prompt,\r\n 'previous_answer': previous_answer,\r\n 'state': self.child_state,\r\n 'allow_reset': self._allow_reset(),\r\n 'rows': 30,\r\n 'cols': 80,\r\n 'module_id': module_id,\r\n 'msg': post_assessment,\r\n 'child_type': 'openended',\r\n 'correct': correct,\r\n 'accept_file_upload': self.accept_file_upload,\r\n 'eta_message': eta_string,\r\n }\r\n html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)\r\n return html", "def data_page():\n return render_template(\"data.html\")", "def output_div(self, output_method):\n instance = self.instance\n G = myGraph(instance.view_num)\n for i in range(instance.view_num):\n view = instance.tables[instance.views[i].table_pos].views[instance.views[i].view_pos]\n G.addNode(view)\n G.getSim()\n result = G.getTopK(instance.view_num)\n order = 1\n export_list = []\n if output_method == 'list':\n for item in result:\n export_list.append(G.nodes[item].output(order))\n order += 1\n return export_list\n elif output_method == 'print':\n for item in result:\n pprint (G.nodes[item].output(order))\n order += 1\n return\n elif output_method == 'single_json' or output_method == 'multiple_jsons':\n path2 = os.getcwd() + '/json/'\n if not os.path.exists(path2):\n os.mkdir(path2)\n if output_method == 'single_json':\n f = open(path2 + self.table_name + '.json','w')\n for item in result:\n f.write(G.nodes[item].output(order) + '\\n')\n order += 1\n f.close() # Notice that f.close() is out of the loop to create only one file\n else: #if output_method == 'multiple_jsons'\n for item in result:\n f = open(path2 + self.table_name + str(order)+'.json','w')\n f.write(G.nodes[item].output(order))\n order += 1\n f.close() # Notice that f.close() is in the loop to create multiple files\n return\n elif output_method == 'single_html' or output_method == 'multiple_htmls':\n path2 = os.getcwd() + '/html/'\n if not os.path.exists(path2):\n os.mkdir(path2)\n page = Page()\n if output_method == 'single_html':\n self.page = Page()\n for item in result:\n view = G.nodes[item]\n self.html_output(order, view, 'single')\n order += 1\n self.page.render('./html/' + self.table_name + '_all' + '.html')\n else: # if output_method == 'multiple_htmls'\n path3 = os.getcwd() + '/html/' + self.table_name\n if not os.path.exists(path3):\n os.mkdir(path3)\n for item in result:\n view = G.nodes[item]\n self.html_output(order, view, 'multiple')\n order += 1\n return", "def show(self, output: OutputFormat = OutputFormat.TABLE):\n if isinstance(output, str):\n output = OutputFormat(output)\n\n if output == OutputFormat.JSON:\n print(json.dumps(self.json_dict, indent=4))\n elif output == OutputFormat.HTML:\n df = TablePrinter.get_table_dataframe(self.page_class, self.content)\n return TablePrinter.print_html(df)\n\n else:\n table = TablePrinter.get_table(self.page_class, self.content)\n print(table.get_string(title=colored(self.metadata.elements_type.upper(), 'yellow', attrs=['bold'])))\n print(colored(f'page {self.metadata.page_number}/{self.metadata.total_pages}', 'yellow'))\n return self", "def render(self):\n self.run()\n return [{'dest' : self.dest,\n 'text' : self.tmpl.render(**self.data)}]", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def get_info(self) -> str:\n return textwrap.dedent(\n \"\"\"\n <h1>Test page</h1>\n \"\"\"\n )", "def _repr_html_(self) -> str:\n # Speical case inside Google Colab\n if \"google.colab\" in sys.modules:\n load_notebook(hide_banner=True)\n script, div, _ = notebook_content(self.to_render)\n return f\"{div}<script>{script}</script>\"\n\n # Windows forbids us open the file twice as the result bokeh cannot\n # write to the opened temporary file.\n with NamedTemporaryFile(suffix=\".html\", delete=False) as tmpf:\n pass\n\n save(\n self.to_render,\n filename=tmpf.name,\n resources=CDN,\n template=INLINE_TEMPLATE,\n title=\"DataPrep.EDA Report\",\n )\n with open(tmpf.name, \"r\") as f:\n output_html = f.read()\n\n # Delete the temporary file\n Path(tmpf.name).unlink()\n\n # Fix the bokeh: bokeh wrongly call the \"waiting for bokeh to load\" function\n # inside \"Bokeh.safely\", which causes Bokeh not found because\n # Bokeh is even not loaded!\n patched_html = output_html.replace(\n \"Bokeh.safely\",\n \"var __dataprep_bokeh_fix = (f) => document.Bokeh === undefined ? setTimeout(f, 1000) : f(); __dataprep_bokeh_fix\", # pylint: disable=line-too-long\n )\n # embed into report template created by us here\n return patched_html", "def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")", "def run(self):\n self.load_template()\n self.load_data()\n self.load_files()\n self.render_content()\n self.process()\n # pprint(self.data)", "def getOutput(self):\r\n return self._output", "def content_creator():\n with temporary_url_for_logger(app) as logger:\n with logger:\n content = page.render_html(\n solution=solution,\n static_url=static_url,\n lesson_url=lesson_url,\n subpage_url=subpage_url,\n vars=variables\n )\n absolute_urls = [url_for(logged[0], **logged[1]) for logged in logger.logged_calls]\n\n relative_urls = [get_relative_url(request.path, x) for x in absolute_urls]\n\n return {\"content\": content, \"urls\": relative_urls}", "def main():\r\n return render_template(\"UI.html\")", "def testing():\n return render_template(\"testing.html\")", "def get_output(self):\n raise NotImplementedError", "def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')", "def data_page():\n\n return render_template('Data_Page.html')", "def displayWorkout():\n\n return render_template(\"workout.html\")", "def get_page_html(self, xblock):\r\n url = xblock_studio_url(xblock)\r\n self.assertIsNotNone(url)\r\n resp = self.client.get_html(url)\r\n self.assertEqual(resp.status_code, 200)\r\n return resp.content", "def print_contents(self):\n logging.info(self.contents)", "def view_html_page():\n\n return render_template(\"moby.html\")", "def get_html_base(self):\r\n self.update_task_states()\r\n return self.current_task.get_html(self.system)", "def get_output(self):\r\n return self._api.get_output()", "def get_doc_ui():\n return send_file(join(__DOCS_ROOT, 'ui.html'))", "def display(self):\n # this is so that each turtle gets their own canvas. Without this they all try to draw to the first created canvas\n self._randHash = random.getrandbits(128)\n \n # The actual forum seems to be able to display only one of the html texts, so merge them and send them all in one go\n htmlString = \"\";\n ## Canvas creation\n htmlString += ('<script type=\"text/javascript\">%s</script>'%ReadFile('paper.js')) + \"\\n\"\n htmlString += ('<canvas id=\"canv%s\" width=%spx height=%spx></canvas>'%(self._randHash, self._canvWidth, self._canvHeight)) + \"\\n\"\n \n \n # prepare data for injection\n self._arrayString = \"[\"\n for act in self._actions:\n self._arrayString += '[%s, %s, %s, %s, %s, %s, \"%s\", %s, \"%s\", \"%s\"], ' \\\n % (act[0], act[1], act[2], act[3], act[4], act[5], act[6], act[7], act[8], act[9])\n self._arrayString += \"]\"\n \n # inject data\n htmlString += ('<script type=\"text/javascript\">var actionData = %s; var levelData = %s;</script>'% (self._arrayString, self._levelDataString)) + \"\\n\"\n #print(self._levelDataString)\n \n ## Drawing the turtle\n htmlString += ('<script type=\"text/paperscript\" canvas=\"canv%s\">%s</script>'% (self._randHash, ReadFile('AtahansTurtle.js')))\n htmlString = htmlString.replace(\"actionData\", \"actionData\" + str(self._randHash));\n htmlString = htmlString.replace(\"levelData\", \"levelData\" + str(self._randHash));\n #print(htmlString);\n display(HTML(htmlString))", "def get_html(self):\r\n pass", "def get_output(self):\n return self._output", "def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass", "def read_stdout(self, dt):\n\n self.temp_stdout += self.temp_output\n self.ids[\"txt_code_output\"].text = self.temp_output", "def index(self):\n return self.html", "def rawHTMLrendered(self):", "def write_output(self):", "def printOutput(self):\n pass", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)", "def get_inner_html(self):\n\n pass", "def get_output(self, **kwargs):\n return self.out", "def output_data(self):\n with open(self.output_path, 'r') as file:\n return file.read()", "def output(self):\n return super(RequireJSPage, self).output", "def generate_html(self):\n content = self.content\n excerpt = self.excerpt\n\n content_html = publish_parts(content,\n writer_name='html',\n settings_overrides=DOCUTILS_OVERRIDES)['fragment']\n excerpt_html = publish_parts(excerpt,\n writer_name='html',\n settings_overrides=DOCUTILS_OVERRIDES)['fragment']\n\n return (content_html, excerpt_html)", "def get(self):\n WriteTemplate(self.response, 'tips.html', {})", "def preview():\r\n html = create_html_report()\r\n return html", "def results(story_temp):\n user_story = STORIES[story_temp].generate(request.args)\n return render_template(\"story.html\", template_story=user_story)", "def index(self, **args):\n if not self.isConfigured:\n self.configure()\n s = self.override()\n if not s:\n self.pageConfig['timeStamp'] = time.strftime('%a %b %d %X %Z %Y')\n contents = self.content(**args) # Make sure contents is run first (so it\n # can change any pageConfig entries if desired\n s = startPage % self.pageConfig\n s = s + htmlDiv('header', self.header(**args),\n keepEmptyDiv=self.pageConfig['keepEmptyHeader'])\n s = s + htmlDiv('navigation', self.navigation(**args),\n keepEmptyDiv=self.pageConfig['keepEmptyNavigation'])\n s = s + htmlDiv('content', contents,\n keepEmptyDiv=self.pageConfig['keepEmptyContent'])\n s = s + htmlDiv('footer', self.footer(**args),\n keepEmptyDiv=self.pageConfig['keepEmptyFooter'])\n s = s + endPage\n return s", "def div_html_list(self):\n return self.q(css='div.test').html", "def stdout(self):\n if self.dm.fileExists(self.proc):\n try:\n t = self.dm.pullFile(self.proc)\n except DMError:\n # we currently don't retry properly in the pullFile\n # function in dmSUT, so an error here is not necessarily\n # the end of the world\n return ''\n newLogContent = t[self.stdoutlen:]\n self.stdoutlen = len(t)\n # Match the test filepath from the last TEST-START line found in the new\n # log content. These lines are in the form:\n # 1234 INFO TEST-START | /filepath/we/wish/to/capture.html\\n\n testStartFilenames = re.findall(r\"TEST-START \\| ([^\\s]*)\", newLogContent)\n if testStartFilenames:\n self.lastTestSeen = testStartFilenames[-1]\n return newLogContent.strip('\\n').strip()\n else:\n return ''", "def homepage():\r\n words = story.prompts\r\n # i didn't realize you could access class variables like this\r\n\r\n return render_template(\"homepage.html\", words = words)", "def get_html(self):\r\n context = self.get_context()\r\n html = self.system.render_template(\r\n '{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context\r\n )\r\n return html", "def render(self):\n master = Template(self.master_file.read_text())\n content = Template(self.content_template.read_text())\n\n # Render content\n d = {\n \"citekey\": self.citekey,\n \"author\": self.author,\n \"ts\": self.ts_iso,\n \"ts_day\": self.ts_day,\n \"title\": self.fieldValues[\"title\"],\n \"creator\": self.fieldValues[\"author\"],\n \"date\": self.fieldValues[\"issued\"],\n \"doi\": self.fieldValues[\"DOI\"],\n \"type\": self.fieldValues[\"type\"],\n }\n rendered = master.render(d) + \"\\n\\n\" + content.render()\n\n return rendered" ]
[ "0.65045804", "0.63367486", "0.61806214", "0.6148067", "0.6148067", "0.5885983", "0.5870854", "0.5743912", "0.571378", "0.5711744", "0.56615424", "0.5633449", "0.56062645", "0.5534683", "0.5534136", "0.54999006", "0.5468458", "0.5467302", "0.54517037", "0.5444819", "0.544434", "0.5427583", "0.54173595", "0.5385075", "0.538168", "0.53790945", "0.5378761", "0.5371734", "0.5370678", "0.5370678", "0.53672016", "0.5354881", "0.5345103", "0.53343135", "0.5330012", "0.53029245", "0.53029245", "0.53029245", "0.5292104", "0.5289767", "0.5285016", "0.5283627", "0.5269169", "0.5265872", "0.5250237", "0.52414036", "0.5237", "0.5231211", "0.5219256", "0.52072215", "0.5202005", "0.519313", "0.5187114", "0.5187114", "0.5187114", "0.5187114", "0.5187114", "0.5187114", "0.5181107", "0.5176808", "0.5172601", "0.51681256", "0.5165076", "0.5155532", "0.51382864", "0.5137376", "0.51298183", "0.51243997", "0.5114924", "0.51068956", "0.5104955", "0.51027554", "0.5099899", "0.50980836", "0.5097262", "0.50897634", "0.5087347", "0.50859106", "0.50836533", "0.507878", "0.5074629", "0.50724155", "0.5070176", "0.5068093", "0.5067511", "0.50546974", "0.504937", "0.5048217", "0.50438076", "0.5033382", "0.5028955", "0.502747", "0.5027082", "0.502584", "0.5024039", "0.5023155", "0.50206286", "0.50199264", "0.5018604", "0.50128794" ]
0.6102804
5
Click the button on the page, which should cause the JavaScript to update the output div.
def click_button(self): self.q(css='div#fixture input').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def click_button(self):\n self.widgets.get('button').click()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def click(self):\r\n pass", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc", "def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()", "def click_download_button(self):\n self._basket.click_download_button()", "def click(self) -> None:\n logging.info(f\"click element. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.style.border=\"2px solid red\";\n elm.click();\"\"\"\n self._execute_javascript(js)", "def click_display(self) -> None:\n logging.info(f\"Click on the displayed element. {self.desc}\")\n js = 'var elm = document.querySelector(\"' + self.css + '\");' \\\n ' if(elm != null){elm.style.border=\"2px solid red\";elm.click();}'\n self._execute_javascript(js)", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def on_click(self) -> None:\n pass", "def checkout_btn(self):\n self._checkout_btn.click()", "def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()", "def OnButtonSubmitterPageButton(self, event):\r\n\t\twebbrowser.open(self._configtmp[\"imageurl\"])", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def click(self, element):\n element.click()", "def Button(request):\n params = {\n 'mimetype': 'text/javascript',\n 'fn': request.GET.get('fn', '_bRunTest'),\n 'btn_text': request.GET.get('btn_text', 'Run the test'),\n 'cb_text': request.GET.get('cb_text',\n 'and send my results to Browserscope (anonymously)'),\n }\n return util.Render(request, 'user_test_button.js', params)", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def on_click(self) -> None:\n self.cycle()", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def on_click(self) -> None:\n os.startfile(self.url) # noqa: S606", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def _link_clicked(self, href):\n\n self.main_frame.load(href)", "def Click(self):\n if self.function == None:\n return\n \n self.function()", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def execPushButton(self):\n\t\t# verbose.detail(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))\n\t\tprint(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))", "def view(self):\n\t\tself.done(1)", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def mainWebActions(self, **kwargs):\n # If the dictionary item value is the required opens the webpage\n if kwargs['button']=='docs':\n # Only 1 click at every 5 seconds\n self.docs_Button.setDown(True)\n QTimer.singleShot(5000, lambda: self.docs_Button.setDown(False))\n webbrowser.open('https://italorenan.gitbook.io/roc/')", "def click(self, wait_load_page = True):\n\t\tif self.__element.tag == 'a':\n\t\t\tself.__browser.load_page(self.get_property('href'))", "def button1_press(self):\n\n ext = nuke_link(str(self.lineEdit.text()))\n url = 'https://learn.foundry.com/nuke/developers/70/pythonreference/{}'.format(ext)\n webbrowser.open(url)", "def click(self, element_tuple):\n current_state = self.change_monitor()\n self.log_info(f\"Browser.click: Clicking {element_tuple}\")\n self.CORE.find_element(*self.format_element(element_tuple)).click()\n self.change_monitor(previous_data=current_state)\n return", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def clickDashboard(self):\n self.waitForElement(locator=self._dashboardBtn, locatorType=\"xpath\")\n self.elementClick(locator=self._dashboardBtn, locatorType=\"xpath\")", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global entered_code\n\n if button_text == \"Enter Code\":\n self.answer = self.input_box.text\n entered_code = self.answer\n self.convert_string_to_int(self.answer)\n self.ui_manager.purge_ui_elements()\n self.minigame.window.show_view(FakeCodeGame.MyView(self.minigame.main_view))\n print(f\"EnterCode button. {self.answer}\")\n elif button_text == \"Exit Terminal\":\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n\n \"\"\"\n The following functions check the submitted answer\n \"\"\"", "def execbox(response, url=\"/exec/\"):\n response.out.write(\"\"\"\n <form action=\"\" method=\"GET\">\n <b>enter command:</b><input type=\"commit\" name=\"input\" value=\"\">\n // <input type=\"button\" value=\"go\" onClick=\"makePOSTRequest(this.form)\"\n </form>\n \"\"\")", "def js_click(self, css):\r\n t1 = time.time()\r\n js_str = \"$('{0}').click()\".format(css)\r\n try:\r\n self.driver.execute_script(js_str)\r\n self.my_print(\"{0} Use javascript click element: {1}, Spend {2} seconds\".format(success,js_str,time.time()-t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to use javascript click element: {1}, Spend {2} seconds\".format(fail,\r\n js_str, time.time() - t1))\r\n raise", "def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def click_volver(self):\n self.button.click(liquidaciones_historicas_catalog.BOTON_VOLVER)", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def OnButtonClick(self):\n self.choice()", "def update():\n print(\"current page is \", wikiPageStackTrace[-1].getTitle())\n if wikiPageStackTrace[-1].getUrl() != goalPage.getUrl(): # no victory\n eel.addRoundNumber()\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(\n wikiPageStackTrace[-1].getFirstSentence())\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.hideLoader()\n elif wikiPageStackTrace[-1].getUrl() == goalPage.getUrl(): # victory\n eel.hideLoader()\n eel.addRoundNumber()\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.showVictory()\n # we need to do this because overwise the JS is not fat egoth to respond so we get an infinit loading\n time.sleep(0.5)\n eel.hideLoader()", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def click_buy_page_inline_action_button(self, vendor):\n self.click_inline_action_button(self.vendors_div_id, vendor, self.grid_column_number)", "def click(self, selector):\n el = self.locate_element(selector)\n el.click()", "def submit(self):\n self.driver.find_element(*BaseLocators.SUBMIT_BUTTON).click()", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def save(self):\n self.click(\".action-save\")\n self.page.wait_for_ajax()", "def refresh(self):\n #self.find('counter-label').text = 'Counter: %i' % self.counter\n\n #@on('increment-button', 'click')\n #def on_button(self):\n \"\"\"\n This method is called every time a child element\n with ID 'increment-button' fires a 'click' event\n \"\"\"\n #self.counter += 1\n #self.refresh()", "def on_click ( self, object ):\n pass", "def OnButton(self, event):\n\n\n event_id = event.GetId()\n event_obj = event.GetEventObject()\n print(\"Button 1 Clicked:\")\n print(\"ID=%d\" % event_id)\n print(\"object=%s\" % event_obj.GetLabel())", "def do_click(self, xpath):\n e = self._find_element_by_xpath(xpath)\n e.click()", "def _ClickGaiaButton(self, button_text, alt_text):\n get_button_js = '''\n (function() {\n var buttons = document.querySelectorAll('[role=\"button\"]');\n if (buttons == null)\n return false;\n for (var i=0; i < buttons.length; ++i) {\n if ((buttons[i].textContent.indexOf('%s') != -1) ||\n (buttons[i].textContent.indexOf('%s') != -1)) {\n buttons[i].click();\n return true;\n }\n }\n return false;\n })();\n ''' % (button_text, alt_text)\n self._GaiaWebviewContext().WaitForJavaScriptCondition(\n get_button_js, timeout=20)", "def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)", "def click_upload_button(self):\n self.click_element(self.upload_button_locator)", "def do(self, jQuery):", "def click_login_button(self):", "def click_documents_grid_inline_action_button(self, reference_number):\n self.click_inline_action_button(self.documents_grid_div_id, reference_number, self.documents_grid_inline_action_column_number)\n self.wait_for_ajax_spinner_load()", "def click_on_submit(context):\n submit_for_approval = context.browser.find_elements_by_css_selector(\n \"input[type='button'][value='Submit for Approval']\")\n for item in submit_for_approval:\n item.click()\n time.sleep(10)", "def double_clicked_to_view(self):\n\n # TODO need this method? better in init to go to view_file\n self.view_file()", "def handle_remote_button(self, request):\n self._verify_auth_parameters(request)\n content = yield from request.content.read()\n parsed = dmap.parse(content, tag_definitions.lookup_tag)\n self.last_button_pressed = dmap.first(parsed, 'cmbe')\n return web.Response(status=200)", "def on_pushButton_clicked(self):\n print(\"hello\")", "def click_login_button(self):\n submit_button = self.locate_element_by_css_selector(LOGIN_BUTTON_SELECTPR)\n submit_button.click()", "def press_entry(self, button):\r\n buttonText = button.text\r\n selectedPlace = Place()\r\n for place in self.place_list.list_places:\r\n placeDisplayText = self.generateDisplayText(place.name, place.country, place.priority, place.is_required)\r\n if buttonText == placeDisplayText:\r\n selectedPlace = place\r\n break\r\n\r\n selectedPlace.mark_visited() # Mark the place visited\r\n self.root.ids.entriesBox.clear_widgets() # Apply to GUI\r\n self.create_widget()\r\n\r\n self.news = \"You have visited {}\".format(selectedPlace.name) # Display change in news\r", "def commandbox(response, url=\"/dispatch/\"):\n response.out.write(\"\"\"\n <form action=\"%s\" method=\"post\">\n <div><b>enter command:</b> <input type=\"commit\" name=\"content\"></div>\n </form>\n \"\"\" % url)", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def show(self):\n self.driver.send(self.canvas)", "def execute_js(self, script):\n self.driver.execute_script(script)", "def OnButtonRateHelpButton(self, event):\r\n\t\twebbrowser.open(consts.URL_HELP_RATE)", "def click_outbound_statement_search_button(self):\n self.click_element(self.page_header_locator)\n self.click_element(self.outbound_statement_search_button_locator, True)", "def click(self, xpath):\n self.driver.find_element_by_xpath(xpath=xpath).click()", "def on_run_button(self, event):\n text = _(u\"Run button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.run_command()", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def click_upload_statement_button(self):\n self.click_element(self.upload_statement_button_locator)", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def get_submit(self):\r\n return self.driver.find_element(*SinginPage.submit).click()", "def on_click():\n action = self.screens[self.curr_screen].on_click()\n\n if screen_actions.CHANGE_SCREEN == action[\"screen action\"]:\n self.curr_screen = action[\"value\"]\n self.screens[self.curr_screen].update_screen()\n self.lcd_display.show()\n elif screen_actions.UPDATE_REDIS == action[\"screen action\"]:\n self.redis_client.set(action[\"redis key\"], action[\"value\"])\n self.redis_dict[action[\"redis key\"]] = action[\"value\"]\n print(\n \"Key: {}, Value: {}\".format(\n action[\"redis key\"],\n self.redis_client.get(action[\"redis key\"]).decode(\"UTF-8\"),\n )\n )", "def click_the_edit_button_that_appears(driver):\n driver.find_element_by_xpath(xpaths.users.eric_Edit_Button).click()", "def click_the_save_button_which_should_be_returned_to_the_storage_page(driver):\n assert wait_on_element(driver, 5, '//button[contains(.,\"Save Access Control List\")]', 'clickable')\n driver.find_element_by_xpath('//button[contains(.,\"Save Access Control List\")]').click()\n time.sleep(1)\n assert wait_on_element_disappear(driver, 30, '//h6[contains(.,\"Please wait\")]')", "def on_run_clicked(self, button):\n active_tab = self.get_active_tab()\n active_tab.save() # enables auto-save before running\n active_tab.execute()", "def click_win_dispute_button(self):\n self.click_element(self.win_dispute_button_locator)", "def submit_response(self):\r\n self.q(css='input.submit-button').first.click()\r\n\r\n # modal dialog confirmation\r\n self.q(css='button.ok-button').first.click()\r\n\r\n # Ensure that the submission completes\r\n self._wait_for_submitted(self.assessment_type)", "def click_modal_button(self, title):\n locator = lex_locators[\"modal\"][\"button\"].format(title)\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n self._jsclick(locator)", "def download():\n \n browser.find_element_by_xpath('//*[@id=\"ctl00_contentPlaceHolder_divAllVariablesPerYear2012\"]/div[2]/div[2]/div[1]/a').click()", "def __call__(self):\n self.show()", "def answer_problem(self):\r\n self.q(css='input.check').first.click()\r\n self.wait_for_ajax()", "def exec_(self):\n super().exec_()\n return self.clicked_button" ]
[ "0.7255249", "0.7255249", "0.71749187", "0.6997265", "0.6930125", "0.6644868", "0.65429854", "0.6372312", "0.63093776", "0.6224634", "0.6215617", "0.61377794", "0.61235803", "0.6058383", "0.6054273", "0.60490453", "0.6046816", "0.6021236", "0.60204387", "0.6019323", "0.60022116", "0.5999024", "0.59981054", "0.5982797", "0.5931911", "0.58496445", "0.58137524", "0.57777244", "0.5769397", "0.5753556", "0.573585", "0.5726484", "0.5714448", "0.57019395", "0.5689026", "0.56647325", "0.56577027", "0.5626981", "0.5622815", "0.5621199", "0.5618146", "0.56067705", "0.5606475", "0.56008106", "0.5599131", "0.5579039", "0.5569696", "0.5561526", "0.555668", "0.5545033", "0.5535226", "0.55185163", "0.55151594", "0.5486493", "0.54803556", "0.5477284", "0.5460297", "0.54456335", "0.5438773", "0.54372305", "0.5432025", "0.54129404", "0.5412338", "0.5411326", "0.5402682", "0.54001886", "0.53959584", "0.5384323", "0.5373904", "0.53475124", "0.53414893", "0.5340837", "0.53293645", "0.53218555", "0.53190213", "0.53083485", "0.52972", "0.52820843", "0.5277922", "0.52741265", "0.5268884", "0.52620775", "0.52587974", "0.5258133", "0.5253209", "0.52481455", "0.5232223", "0.52314395", "0.5229959", "0.5228842", "0.52227384", "0.5220057", "0.52188325", "0.5218474", "0.5217773", "0.5210973", "0.52105767", "0.52011865", "0.5192546", "0.5190678" ]
0.6700987
5
Input `text` into the text field on the page.
def enter_text(self, text): self.q(css='#fixture input').fill(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generic_input_text(self, element_id, text):\n self._generic_input(element_id, text)", "def input(self, text):\n self.element.clear()\n self.element.send_keys(text)", "def input_text(self, element: Union[WebElement, Tuple[By, str]], text: str):\n element = self.find_element(element)\n element.send_keys(text)", "def ui_input_text() -> str:\n\ttext = input('enter your text ')\n\treturn text", "def set_text(self, input_text):\n self.clear()\n logging.getLogger(__name__).info(\n \"This text set to input field: {}\\nby = {}\\nvalue = {}\".format(input_text, self.by, self.value))\n self.driver.find_element(self.by, self.value).send_keys(input_text)", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def set_text(self, text):\n\n self.text = text", "def text(self, txt):\n\n self.web_element.clear()\n self.web_element.send_keys(txt)\n return None", "def type_text(self, element, text):\n try:\n if element.is_displayed():\n element.clear()\n element.send_keys(text)\n print(text + \" is added to textbox\")\n else:\n print(element + \" is not displaying\")\n except Exception as e:\n print(str(e))", "def input_text(self, text):\n self.android_device_driver.adb.exec_adb_cmd(\"shell input text \" +\n text).wait()", "def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\")\n\n self._text = text", "def text(self, text, enter=True):\n self.ime.text(text)\n\n if enter:\n self.adb.shell_command(\"input keyevent ENTER\")", "def inp(text):\r\n input(text)", "def set_text(self, text):\n self.set_text_f(\"%s\", text)", "def write_text(self, text):\n Application.write_text(self, text, self.TXT_FIELD)", "def SetText(self, text):\r\n\r\n self._text = text", "def text_value(self, text_value):\n\n self._text_value = text_value", "def add_text(self, text):\n self.text = self.text + text", "def get_text(text_input):\r\n return text_input", "def update_text(self, text):\n self.response['text'] = text", "def _type_text(text):\n FlaUIKeyboard.Type(str(text))", "def TextWidget(*args, **kw):\n kw['value'] = str(kw['value'])\n kw.pop('options', None)\n return TextInput(*args,**kw)", "def text(text, enter=True, **kwargs):\n G.DEVICE.text(text, enter=enter, **kwargs)\n delay_after_operation()", "def set_text(self, value):\n self.clear()\n self.send_keys(value)\n return self", "def write_text(self, text):\n self.ui.plainTextEdit.appendPlainText(text)\n logging.info(text)", "def text_field(self, value):\n self.set_property(\"TextField\", value)", "def input_text(self,loc,value,img_name):\r\n self.wait_ele_visible(loc,img_name)\r\n ele = self.get_element(loc,img_nameue)\r\n try:\r\n ele.send_keys(value)\r\n except:\r\n self.getScreenShot(img_name)\r\n logging.exception(\"********input text fail********\")\r\n raise", "def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')", "def aisappium_input_text(self, locator, text, oAppiumInfo=None):\n self._info(\"Typing text '%s' into text field '%s'\" % (text, locator))\n if oAppiumInfo is not None:\n self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver)\n else:\n self._element_input_text_by_locator(locator, text)", "def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\") # noqa: E501\n\n self._text = text", "def _set_text(self, text):\n self.clear()\n r = self.add_run()\n r.text = _to_unicode(text)", "def text(self, text=None):\n if text is None:\n return self._text\n else:\n self._text = text", "def SetText(self, text):\r\n\r\n self._text = text\r\n return self", "def setText(self, text):\n self._state[0] = str(text)\n self._param.update()", "def text(self, text):\n if self.local_vars_configuration.client_side_validation and text is None: # noqa: E501\n raise ValueError(\"Invalid value for `text`, must not be `None`\") # noqa: E501\n\n self._text = text", "def display_text(self, text):\n self.write_to_serial(':DISP:TEXT \\'' + text + '\\'')", "def _set_text(self, text):\n self.clear()\n self.paragraphs[0].text = _to_unicode(text)", "def set_text(self, T):\n self.text = T", "def _enter_text(elem, text, append=False, prepend=False, clear=True):\n pre = app = u''\n\n if prepend:\n pre = elem.value()\n elif append:\n app = elem.value()\n if clear:\n elem.clear()\n elem.send_keys((pre + text + app))", "def write(self, text):\n self.text = text", "def insertText(self, text: str) -> Awaitable[Dict]:\n return self.client.send(\"Input.insertText\", {\"text\": text})", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def ev_textinput(self, event: TextInput) -> None:", "def on_text_box(self, event):\n text_box_value = self.text_box.GetValue()\n text = \"\".join([_(u\"New text box value: \"), text_box_value])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()", "def print_entry(text):\n print \"Text entered: \\n '%s'\" % text", "def text(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"text\")", "def setText(self, text=\"\"):\n self._text = text\n self._text_item.setHtml(self._compile_text())", "def set_text(self):\n pass", "def update(self, text: str) -> None:\n raise NotImplementedError", "def add_text(self, text: str) -> None:\n self.texts.append(text.strip().rstrip(\"\\n\"))", "def input_text_in_field(text_field_web_elem, text, by_symbols):\n text_field_web_elem.clear()\n if by_symbols:\n for letter in text:\n text_field_web_elem.send_keys(letter)\n else:\n text_field_web_elem.send_keys(text)", "def settext(self, text):\n self.__text = text\n self.__nonzero = True", "def write(text):\n try:\n focused_element = driver.switch_to.active_element\n focused_element.send_keys(text)\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def set_text(widget, text):\n if isinstance(widget, QLineEdit):\n widget.setText(text)\n widget.setCursorPosition(0)\n\n if isinstance(widget, QPlainTextEdit):\n widget.setPlainText(text)\n\n if isinstance(widget, QTextBrowser):\n widget.setText(text)\n\n if isinstance(widget, QComboBox):\n index = widget.findText(text, Qt.MatchFixedString)\n if index >= 0:\n widget.setCurrentIndex(index)\n else:\n widget.setEditText(text)", "def submit_text(self, submit_text):\n\n self._submit_text = submit_text", "def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\") # noqa: E501\n if text is not None and len(text) > 1024:\n raise ValueError(\"Invalid value for `text`, length must be less than or equal to `1024`\") # noqa: E501\n if text is not None and len(text) < 0:\n raise ValueError(\"Invalid value for `text`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._text = text", "def set_text(self, texto):\n self.entry.set_text(texto)", "def insert_text(self, text):\n self.str += text", "def text(self):\n return str(self.input.text())", "def setText(self, element_tuple, text):\n self.log_info(f\"Browser.setText: Setting text of {element_tuple} to {text}\")\n\n self.disable_logging()\n self.clearText(element_tuple)\n self.revert_logging()\n\n self.CORE.find_element(*self.format_element(element_tuple)).send_keys(text)\n return", "def SetText(self, text):\n self.Clear()\n self.__context.builder.DocumentInsert(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n text)\n self._blip_data.content = text", "def add_text(self, text):\n text_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/text.html')\n text_output = text_template.render(text=text)\n self.contents.append(text_output)", "def update_text(self: object, widget: Text, new_text: str) -> None:\n widget.delete(\"1.0\", END) #Clear the text window so we can write.\n widget.insert(END,new_text)", "def text_input():\n return input(\">>>\")", "def set_text(self, txt):\n self.value = txt\n # Signal to the application that we need to resize this widget\n self.chsize()", "def write_text(self, text):\n self.write(self.render_text(text))", "def WriteText(self, text):\n print(text)", "def type_text(self, type_text):\n\n self._type_text = type_text", "def _setEditorText(self, text):\n if self.__lineEditKind:\n self._editor.setText(text)\n else:\n self._editor.setEditText(text)\n if text and self._editor.findText(text) == -1:\n self._editor.insertItem(0, text)", "def printText(self, text):\n self._append_plain_text(text)", "def data(self, text):\n if self._keep_text:\n self._text.append(text)", "def setHTML(self, text):\n\n self.ui.textBrowser.append(text)", "def set_text(self, new_text):\n\n self.output['text'] = new_text", "def text(self, text, x, y, height, width):\n cook = cookie()\n t = Text(cook, self)\n t.text = text\n self.call('text', cook, text, x, y, height, width)\n return t", "def input_(text: str):\n inputText = input(text + bcolors.OKBLUE)\n print(bcolors.ENDC, flush=True, end=\"\")\n return inputText", "def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')", "def contents(self, text):\n self.app.contents = text", "def post_text(self, text: str) -> bool:\n return False", "def ev_textinput(self, event: tcod.event.TextInput) -> T | None:", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def test_text_field():", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def fill_textfield(self, post_url: str, text_field: Dict[str, Any], text: str,\n context: Dict[str, Any], uuid: str, label: str = None) -> Dict[str, Any]:\n new_value = {\"#t\": \"Text\", \"#v\": text}\n payload = save_builder() \\\n .component(text_field) \\\n .context(context) \\\n .uuid(uuid) \\\n .value(new_value) \\\n .build()\n\n locust_label = label or f'Fill \\'{text_field[\"label\"]}\\' TextField'\n\n resp = self.post_page(\n self.host + post_url, payload=payload, label=locust_label\n )\n return resp.json()", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def normalOutputWritten(self, text):\n self.ConTextField.moveCursor(QTextCursor.End)\n self.ConTextField.insertPlainText( text )\n QApplication.processEvents()", "def addContent(text):", "def __init__(self, text):\n\n self.text = text", "def SetText(self, text):\n super(AbapCodeEditor, self).SetText(text)\n self.SetEditable(False)" ]
[ "0.7562028", "0.74759895", "0.74647695", "0.7401078", "0.73765755", "0.7364279", "0.7364279", "0.7364279", "0.7364279", "0.7364279", "0.7364279", "0.7331148", "0.72840446", "0.72499853", "0.72281253", "0.7209747", "0.7139128", "0.7137083", "0.7108322", "0.70740104", "0.7031114", "0.69858974", "0.6975402", "0.6947462", "0.6932175", "0.69173527", "0.6913265", "0.69105864", "0.6909489", "0.68472403", "0.6847124", "0.6823111", "0.6819232", "0.68015337", "0.677302", "0.67709327", "0.67526543", "0.6726339", "0.6693879", "0.66638535", "0.66480535", "0.6647953", "0.6618376", "0.66009957", "0.6598245", "0.6586706", "0.6585335", "0.6567739", "0.6556262", "0.6554603", "0.65459454", "0.6514615", "0.6508504", "0.6494749", "0.6489963", "0.6479524", "0.64483505", "0.64469254", "0.6442873", "0.6430716", "0.6402082", "0.6393547", "0.6378543", "0.63738143", "0.6371089", "0.63664216", "0.6354241", "0.6352242", "0.6351585", "0.6349785", "0.63241106", "0.6321302", "0.6296175", "0.62958926", "0.62942517", "0.62936187", "0.6276361", "0.6258594", "0.6248201", "0.6243672", "0.62396634", "0.62061065", "0.61907774", "0.61907667", "0.61891574", "0.61891574", "0.61891574", "0.61891574", "0.61891574", "0.61872977", "0.61338884", "0.6123491", "0.61221856", "0.6118656", "0.6118656", "0.61125034", "0.6111829", "0.6101408", "0.60791814", "0.6069357" ]
0.78288025
0
Select the car with ``car_value`` in the dropdown list.
def select_car(self, car_value): self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_car_selected(self, car):\n return self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car)).selected", "def the_option_with_value(value: str) -> \"SelectByValue\":\n return SelectByValue(value)", "def select_option(self, selector, value):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import Select\n\n select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))\n select.select_by_value(value)", "def select_by_value(self, selector, value):\n el = self.locate_element(selector)\n Select(el).select_by_value(value)", "def car(self, value):\n self.pair.car = value", "def dropdown_choice(value):\r\n return 'You have selected \"{}\"'.format(value)", "def set_select(self, val):\n self.select = val\n return self", "def select(self, value) -> str:", "def link_to_choice(dropdown_value):\n return dropdown_value", "def selected_value(self, selected_value):\n for option in self._options_iter:\n if option.value == selected_value:\n self.selected_option = option\n break\n else:\n raise ValueError(\n \"no option with value '{}' found\".format(selected_value)\n )", "def selectOptionByValue(self, element_tuple, select_value):\n self.log_info(f\"Browser.selectOptionByValue: Setting {element_tuple} to {select_value}\")\n Select(self.CORE.find_element(*self.format_element(element_tuple))).select_by_value(select_value)\n return", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def test_select_box():\n with SeleniumDriver(\"firefox\", headless=True) as obj:\n obj.get(TEST_URL)\n\n select_value = \"1\"\n obj.fill({\"select_dropdown\": select_value})\n element = obj.element(\"select_dropdown\", \"name\")\n for ele in element.find_elements_by_tag_name(\"option\"):\n if ele.text == \"One\":\n assert ele.is_selected() is True", "def perform_as(self, the_actor: Actor) -> None:\n if self.target is None:\n raise UnableToAct(\n \"Target was not provided for SelectByValue. Provide a target using the \"\n \".from_() or .from_the() methods.\"\n )\n\n element = self.target.found_by(the_actor)\n select = SeleniumSelect(element)\n try:\n select.select_by_value(self.value)\n except WebDriverException as e:\n msg = (\n \"Encountered an issue while attempting to select the option with value \"\n f\"{self.value} from {self.target}: {e.__class__.__name__}\"\n )\n raise DeliveryError(msg).with_traceback(e.__traceback__)", "def set_value_in_resolution_grid_dropdown(self, column_name, column_value):\n self.single_selection_from_kendo_in_grid(self.resolution_grid_div_id, column_name, column_value)\n self.click_element(self.page_header_locator)", "def prepare_value(self, value):\n if value is None and self.required:\n choices =list(self.choices)\n if len(choices) == 1:\n value = choices[0][0]\n return super(TemplateChoiceField, self).prepare_value(value)", "def form_SelectChoice(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n return form", "def vehicle_type(self):\n return 'car'", "def vehicle_type(self):\n return 'car'", "def vehicle_type(self):\n return 'car'", "def sankey_dropdown(df=data):\n options = []\n for b in df.PUBorough.unique():\n options.append({'label': b, 'value': b})\n return dcc.Dropdown(\n id='borough',\n placeholder='Select a pick up borough',\n options=options,\n value='Manhattan',\n multi=False\n )", "def _select_value_from_a_profile_combo_box(combo_box_element, combo_box_list_option):\n ui_lib.wait_for_element_and_click(combo_box_element)\n ui_lib.wait_for_element_visible(combo_box_list_option)\n\n ui_lib.wait_for_element_and_click(combo_box_list_option)", "def form_SelectWithOtherChoice(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n return form", "def set(self, value):\n\n if value is None:\n return\n\n self.combobox.set(value)", "def set_dropdown_b_value(value):\n value_b = None\n if value=='A': value_b = 'C'\n if value == 'B': value_b = 'E'\n return value_b", "def select_option(self, option):\n log.info(\"Selecting option '\" + option + \"' on element: \" + self.id)\n select = Select(self.driver.find_element(self.by, self.id))\n select.select_by_visible_text(option)", "def set_transactions_grid_dropdown_value(self, column_name, column_value):\n self.single_selection_from_kendo_in_grid(self.transactions_grid_div_id, column_name, column_value, self.transactions_data_grid_name)\n self.click_element(self.page_header_locator)", "def for_type_select_link_vlan_for_name_enter_vlan1043(driver):\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Type\"]').click()\n wait_on_element(driver, 0.5, 5, '//mat-option[@ix-auto=\"option__Type_VLAN\"]')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Type_VLAN\"]').click()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys('vlan1043')", "def __init__(self, user, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.fields['car'].queryset = Car.objects.filter(\n manufacturer__admin=user)", "def select_by_value(self, *items):\n if len(items) > 1 and not self.is_multiple:\n raise ValueError(f\"The Select {self!r} does not allow multiple selections\")\n\n for value in items:\n matched = False\n for opt in self.browser.elements(f\".//option[@value={quote(value)}]\", parent=self):\n if not opt.is_selected():\n opt.click()\n\n if not self.is_multiple:\n return\n matched = True\n\n if not matched:\n raise ValueError(f\"Cannot locate option with value: {value!r}\")", "def do_select(self, line):\n xpath, option = split_args(line)\n e = self._find_element_by_xpath(xpath)\n select = Select(e)\n select.select_by_value(option)", "def choose_select(select_label, select_item=None):\n try:\n if select_item is not None:\n label = driver.find_element_by_xpath(\"//*[contains(text(), '%s')]\" % select_label)\n label_parent = label.find_element_by_xpath(\"..\")\n select = label_parent.find_element_by_tag_name('select')\n select.click()\n click_on(select_item, scope=select)\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def Dropdown(page, data):\n if page == \"book\":\n ph = \"Select a book from the collection\"\n if page == \"word\":\n ph = \"Select a word from the vocabulary\"\n\n return html.Div(id=\"ddbox\", children=[\n dcc.Dropdown(\n id='dropdown',\n options=[{\"label\": x, \"value\": x} for x in data],\n placeholder=ph,\n )\n ])", "def set_dropdown_b_options(value):\n value_c = None\n if value=='C': value_c = '1'\n if value == 'D': value_c = '3'\n if value=='E': value_c = '5'\n if value == 'F': value_c = '7'\n return value_c", "def get(self):\n return Car.browse()", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def select_variant(self, value) -> VariantSpec:\n return VariantSpec(self._encode, None, self._supports_back_ref)", "def select_collection_type(self, value):\n self.locator_finder_by_select(self.select_collection_type_id, value)\n time.sleep(1)", "def changeSelection(self, value):\n self.layer.selected_label = value\n self.selectionSpinBox.clearFocus()\n self.setFocus()", "def selected(self, item):\n self.elementoSeleccionado = item", "def get_car_id(self) -> str:\n return self.car_id", "def setValue(self,val):\n val = str(val)\n if val in self._choices_:\n self.input.setCurrentIndex(self._choices_.index(val))", "def deselect_option(self, selector, value):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import Select\n\n select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))\n select.deselect_by_value(value)", "def for_failover_vhid_select_30(driver):\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Failover VHID\"]').click()\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Failover VHID_30\"]').click()", "def evo_selected(self):\n pub.sendMessage(\"EVO_SELECTED\", id=self.id, location=self.location)", "def test_render_value_label(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"John\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def dropdown_select(self, event):\n\n school_id = int(event.item)\n same_school = self.data[self.data.school_id == school_id].index\n self.source.selected.indices = list(same_school)", "def form_SelectChoiceDate(request):\n schema = schemaish.Structure()\n schema.add('myDateSelect', schemaish.Date())\n options = [(datetime.date(1970,1,1),'a'),(datetime.date(1980,1,1),'b'),(datetime.date(1990,1,1),'c')]\n\n form = formish.Form(schema, 'form')\n form['myDateSelect'].widget = formish.SelectChoice(options)\n return form", "def __init__(__self__, *,\n name: str,\n value: Optional[str] = None,\n value_from: Optional['outputs.CSIPowerMaxSpecDriverSideCarsEnvsValueFrom'] = None):\n pulumi.set(__self__, \"name\", name)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if value_from is not None:\n pulumi.set(__self__, \"value_from\", value_from)", "def car(self):\n return self.pair.car", "def form_SelectWithOtherChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n form['mySelect'].default = 2\n return form", "def setSelectWidget(browser, name, labels):\n control = browser.getControl(name='%s.from' % name).mech_control\n form = control._form\n for label in labels:\n value = str(control.get(label=label))\n form.new_control('text', 'form.buyable_types', {'value': value})", "def test_select_field():", "def select_by_value(self, option):\n\n select = self._get_selenium_select()\n\n if select and isinstance(option, string_types):\n\n try:\n\n select.select_by_value(option)\n return True\n\n except NoSuchElementException:\n pass\n\n return False", "def select(self, xpath, value=None, text=None, index=None):\n field = webdriver.support.ui.Select(self.driver.find_element_by_xpath(xpath))\n if value:\n field.select_by_value(value)\n elif text:\n field.select_by_visible_text(text)\n elif index:\n field.select_by_index(index)", "def from_the(self, target: Target) -> \"SelectByValue\":\n self.target = target\n return self", "def check_combobox_selection(self, value):\n if self.sender() == self.cmbDepReqAction:\n if value != 0: self.cmbDepInstState.setCurrentIndex(0)\n elif self.sender() == self.cmbDepInstState:\n if value != 0: self.cmbDepReqAction.setCurrentIndex(0)\n elif self.sender() == self.cmbPropType:\n if value == 1:\n self.inpPropVal.setText(\"\")\n self.inpPropDef.setText(\"\")\n self.datamapper_properties.addMapping(self.cmbPropDef, 6)\n self.datamapper_properties.removeMapping(self.inpPropDef)\n self.cmbPropMulti.setCurrentIndex(0)\n self.cmbPropEdit.setCurrentIndex(0)\n self.inpPropVal.setEnabled(False)\n self.inpPropDef.setEnabled(False)\n self.cmbPropMulti.setEnabled(False)\n self.cmbPropEdit.setEnabled(False)\n self.cmbPropDef.setEnabled(True)\n self.cmbPropDef.setCurrentIndex(0)\n else:\n self.datamapper_properties.addMapping(self.inpPropDef, 6)\n self.datamapper_properties.removeMapping(self.cmbPropDef)\n self.inpPropVal.setEnabled(True)\n self.inpPropDef.setEnabled(True)\n self.cmbPropMulti.setEnabled(True)\n self.cmbPropEdit.setEnabled(True)\n self.cmbPropDef.setEnabled(False)", "def select_from_drop_down(\n self, title_or_titles, id=None, has_search_field=True):\n selector = self._get_drop_down_selector(id)\n self._selenium.find_element_by_css_selector(selector).click()\n values = (title_or_titles\n if isinstance(title_or_titles, (tuple, list))\n else [title_or_titles])\n for x in values:\n if has_search_field:\n self._selenium.find_element_by_css_selector(\n \".select2-search__field\").send_keys(str(x))\n self._selenium.find_element_by_css_selector(\n \".select2-search__field\").send_keys(\"\\n\")\n else:\n self._selenium.find_element_by_xpath(\n '//ul[@class=\"select2-results__options\"]'\n '/li[contains(text(), \"{}\")]'.format(x)).click()", "def set(self, value):\n self._nsObject.selectItemAtIndex_(value)", "def __init__(__self__, *,\n name: str,\n value: Optional[str] = None,\n value_from: Optional['outputs.CSIVXFlexOSSpecDriverSideCarsEnvsValueFrom'] = None):\n pulumi.set(__self__, \"name\", name)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if value_from is not None:\n pulumi.set(__self__, \"value_from\", value_from)", "def card_sel(\n self, num=1, **kwargs\n ): # pylint: disable=too-many-locals, too-many-branches\n selectfrom = self.card_selSource(**kwargs)\n force = kwargs[\"force\"] if \"force\" in kwargs else False\n showdesc = kwargs[\"showdesc\"] if \"showdesc\" in kwargs else True\n verbs = kwargs.get(\"verbs\", (\"Select\", \"Unselect\"))\n\n if \"prompt\" in kwargs:\n self.output(kwargs[\"prompt\"])\n\n if \"anynum\" in kwargs and kwargs[\"anynum\"]:\n anynum = True\n num = 0\n else:\n anynum = False\n\n selected = []\n types = kwargs[\"types\"] if \"types\" in kwargs else {}\n types = self._type_selector(types)\n while True:\n options = []\n if (\n anynum\n or (force and num == len(selected))\n or (not force and num >= len(selected))\n ):\n o = Option(selector=\"0\", verb=\"Finish Selecting\", card=None)\n options.append(o)\n index = 1\n for c in sorted(selectfrom):\n if \"exclude\" in kwargs and c.name in kwargs[\"exclude\"]:\n continue\n if not self.select_by_type(c, types):\n continue\n sel = \"%d\" % index\n index += 1\n if c not in selected:\n verb = verbs[0]\n else:\n verb = verbs[1]\n o = Option(selector=sel, verb=verb, card=c, name=c.name)\n if showdesc:\n o[\"desc\"] = c.description(self)\n if kwargs.get(\"printcost\"):\n o[\"details\"] = str(self.card_cost(c))\n if kwargs.get(\"printtypes\"):\n o[\"details\"] = c.get_cardtype_repr()\n options.append(o)\n ui = self.user_input(options, \"Select which card?\")\n if not ui[\"card\"]:\n break\n if ui[\"card\"] in selected:\n selected.remove(ui[\"card\"])\n else:\n selected.append(ui[\"card\"])\n if num == 1 and len(selected) == 1:\n break\n return selected", "def set_vendor(self, vendor_list):\n self.multiple_items_selection_from_kendo_dropdown(self.vendor_dropdown_locator, vendor_list)\n self.wait_for_ajax_spinner_load()", "def select(self, locator: Locator, value: str) -> WindowsElement:\n element = self.ctx.get_element(locator)\n if hasattr(element.item, \"Select\"):\n # NOTE(cmin764): This is not supposed to work on `*Pattern` or `TextRange`\n # objects. (works with `Control`s and its derived flavors only, like a\n # combobox)\n element.item.Select(\n value, simulateMove=self.ctx.simulate_move, waitTime=self.ctx.wait_time\n )\n else:\n raise ActionNotPossible(\n f\"Element {locator!r} does not support selection (try with\"\n \" `Set Value` instead)\"\n )\n return element", "def car(mnfr, model, **car_info):\n car_info[\"manufacturer\"] = mnfr\n car_info[\"model name\"] = model\n return car_info", "def select(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"select\")", "def select(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"select\")", "def set_car(pair, val):\n pair.car = val\n return pair", "async def get_object_id(self, value):\n\n if isinstance(value, str):\n return value\n elif isinstance(value, Condition):\n return await self.get_pk_value(value)\n else:\n raise SelectError(f\"Selection must be of type {Condition} or {str}\")", "def onChangeCity(self, item):\n list = self.lstCities.getMultiSelectedItems()\n self.frame.mode.createChangeCityFrame(list, self.mySystemDict['id'], self.mySystemDict['name'])", "def value_from(self) -> Optional['outputs.CSIVXFlexOSSpecDriverSideCarsEnvsValueFrom']:\n return pulumi.get(self, \"value_from\")", "def mistake_select_value():\n win5 = Toplevel(root)\n win5.title(\"Ошибка\")\n win5.geometry('270x40')\n lbl12 = Label(win5, text=\"Сначала выберите значение из списка!\", width=45, height=2)\n lbl12.place(x=-30, y=0)", "async def test_select_set_option_light_camera(\n hass: HomeAssistant,\n light: Light,\n):\n _, entity_id = ids_from_device_description(Platform.SELECT, light, LIGHT_SELECTS[1])\n\n light.__fields__[\"set_paired_camera\"] = Mock()\n light.set_paired_camera = AsyncMock()\n\n camera = list(light.api.bootstrap.cameras.values())[0]\n\n await hass.services.async_call(\n \"select\",\n \"select_option\",\n {ATTR_ENTITY_ID: entity_id, ATTR_OPTION: camera.name},\n blocking=True,\n )\n\n light.set_paired_camera.assert_called_once_with(camera)\n\n await hass.services.async_call(\n \"select\",\n \"select_option\",\n {ATTR_ENTITY_ID: entity_id, ATTR_OPTION: \"Not Paired\"},\n blocking=True,\n )\n\n light.set_paired_camera.assert_called_with(None)", "def select_option(browser: WebDriver, id: str):\n security_el: WebElement = browser.find_element_by_xpath(\"//select[@id='ctl00_BodyPlaceHolder_EndOfDayPricesView1_ddlAllSecurityType_field']\")\n # work through the select options\n security_opts: List[WebElement] = security_el.find_elements_by_tag_name('option')\n for o in security_opts:\n if id in o.text:\n o.click()\n # click download button\n download_btn: WebElement = browser.find_element_by_xpath(\"//input[@name='ctl00$BodyPlaceHolder$EndOfDayPricesView1$btnAllDownload$implementation$field']\")\n download_btn.click()\n return", "def form_SelectChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n form['mySelect'].default = 2\n return form", "def select(self):\n pass", "def select(self):\n pass", "def dropdown_single(id_, placeholder, text=None):\n components = []\n if text:\n components.append(html.Div(text, className=\"select-dropdown-text\"))\n components.append(\n dcc.Dropdown(id=id_, placeholder=placeholder, style={\"width\": \"200px\"})\n )\n return html.Div(className=\"select-dropdown\", children=components)", "def selectOptionByLabel(self, element_tuple, select_label):\n self.log_info(f\"Browser.selectOptionByLabel: Setting {element_tuple} to {select_label}\")\n Select(self.CORE.find_element(*self.format_element(element_tuple))).select_by_visible_text(select_label)\n return", "def select_site(b):\n\n try:\n drop = Select(b.find_element_by_id('States'))\n drop.select_by_value(\"CA\")\n time.sleep(2)\n drop = Select(b.find_element_by_id('UnitNums'))\n drop.select_by_value(\"506\")\n time.sleep(2)\n except NoSuchElementException as e:\n pass", "def value_from(self) -> Optional['outputs.CSIPowerMaxSpecDriverSideCarsEnvsValueFrom']:\n return pulumi.get(self, \"value_from\")", "def form_SelectChoiceNoneOption(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options,none_option=(None, '--select--'))\n return form", "def __init__(__self__, *,\n name: str,\n value: Optional[str] = None,\n value_from: Optional['outputs.CSIPowerStoreSpecDriverSideCarsEnvsValueFrom'] = None):\n pulumi.set(__self__, \"name\", name)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if value_from is not None:\n pulumi.set(__self__, \"value_from\", value_from)", "def select(self):\r\n pass", "async def _select_vehicle(self, vin):\n params = {\"vin\": vin, \"_\": int(time.time())}\n js_resp = await self.get(API_SELECT_VEHICLE, params=params)\n _LOGGER.debug(pprint.pformat(js_resp))\n if js_resp.get(\"success\"):\n self._current_vin = vin\n _LOGGER.debug(\"Current vehicle: vin=%s\", js_resp[\"data\"][\"vin\"])\n return js_resp[\"data\"]\n if not js_resp.get(\"success\") and js_resp.get(\"errorCode\") == API_ERROR_VEHICLE_SETUP:\n # Occasionally happens every few hours. Resetting the session seems to deal with it.\n _LOGGER.warning(\"VEHICLESETUPERROR received. Resetting session.\")\n self.reset_session()\n return False\n _LOGGER.debug(\"Failed to switch vehicle errorCode=%s\", js_resp.get(\"errorCode\"))\n # Something else is probably wrong with the backend server context - try resetting\n self.reset_session()\n raise SubaruException(\"Failed to switch vehicle %s - resetting session.\" % js_resp.get(\"errorCode\"))", "async def test_selects(hass: HomeAssistant, vehicle_type: str):\n\n entity_registry = mock_registry(hass)\n device_registry = mock_device_registry(hass)\n\n with patch(\"homeassistant.components.renault.PLATFORMS\", [SELECT_DOMAIN]):\n await setup_renault_integration_vehicle(hass, vehicle_type)\n await hass.async_block_till_done()\n\n mock_vehicle = MOCK_VEHICLES[vehicle_type]\n check_device_registry(device_registry, mock_vehicle[\"expected_device\"])\n\n expected_entities = mock_vehicle[SELECT_DOMAIN]\n assert len(entity_registry.entities) == len(expected_entities)\n for expected_entity in expected_entities:\n entity_id = expected_entity[\"entity_id\"]\n registry_entry = entity_registry.entities.get(entity_id)\n assert registry_entry is not None\n assert registry_entry.unique_id == expected_entity[\"unique_id\"]\n state = hass.states.get(entity_id)\n assert state.state == expected_entity[\"result\"]\n for attr in FIXED_ATTRIBUTES + DYNAMIC_ATTRIBUTES:\n assert state.attributes.get(attr) == expected_entity.get(attr)", "def make_car(manufacturer, model, **options):\n car_dict = {\n 'manufacturer': manufacturer.title(),\n 'model': model.title(),\n }\n for option, value in options.items():\n car_dict[option] = value\n\n return car_dict", "def value_from(self) -> Optional['outputs.CSIUnitySpecDriverSideCarsEnvsValueFrom']:\n return pulumi.get(self, \"value_from\")", "def select(self, _: int = 0) -> None:\n if not self.all_items:\n self._exit()\n return\n self.selected_option = self.current_option\n\n assert self.selected_item is not None\n self.selected_item.set_up()\n self.selected_item.action()\n self.selected_item.clean_up()\n\n self.returned_value = self.selected_item.get_return()\n self.should_exit = self.selected_item.should_exit\n\n if not self.should_exit:\n self.draw()", "def get(self):\n value = self.combobox.get()\n return value", "def make_car(manufacturer, model, **options):\r\n car_dict = {\r\n 'manufacturer': manufacturer.title(),\r\n 'model': model.title(),\r\n }\r\n for option, value in options.items():\r\n car_dict[option] = value\r\n\r\n return car_dict", "def __init__(__self__, *,\n name: str,\n value: Optional[str] = None,\n value_from: Optional['outputs.CSIUnitySpecDriverSideCarsEnvsValueFrom'] = None):\n pulumi.set(__self__, \"name\", name)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if value_from is not None:\n pulumi.set(__self__, \"value_from\", value_from)", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def debconfselect(pkg, param, value):\n cmd(\"echo %s %s select %s | debconf-set-selections\" % (pkg, param, value))", "def add_car(self, car):\n\n TheCar.objects.update_or_create(make=car.make, model=car.model.upper())", "def set_selection(self, index, value):\n if not self._has_cbox[index]:\n return\n i = self._widgets[index][\"values\"].index( str(value) )\n self._widgets[index].current(i)", "def value_from(self) -> Optional['outputs.CSIPowerStoreSpecDriverSideCarsEnvsValueFrom']:\n return pulumi.get(self, \"value_from\")", "def select(self):\n return", "def select(action, object_='', options=[], selection=None):\n html = u'<select '\n html += u'name=\"'+action+object_+'\" '\n if action and object_:\n html += u'onchange=\"submitLink(\\''+action+'\\', \\''+object_+'\\');\"'\n html += u'>\\n'\n for option, value in options:\n html += u' <option value=\"'+unicode(value)+'\" '\n if value == selection:\n html += u'selected=\"selected\" '\n html += u'>'\n html += option\n html += u'</option>\\n'\n html += u'</select>\\n'\n return html", "async def test_select_set_option_camera_doorbell_custom(\n hass: HomeAssistant,\n camera: Camera,\n):\n _, entity_id = ids_from_device_description(\n Platform.SELECT, camera, CAMERA_SELECTS[2]\n )\n\n camera.__fields__[\"set_lcd_text\"] = Mock()\n camera.set_lcd_text = AsyncMock()\n\n await hass.services.async_call(\n \"select\",\n \"select_option\",\n {ATTR_ENTITY_ID: entity_id, ATTR_OPTION: \"Test\"},\n blocking=True,\n )\n\n camera.set_lcd_text.assert_called_once_with(\n DoorbellMessageType.CUSTOM_MESSAGE, text=\"Test\"\n )", "def select_based_on_var(self, var):\n if var:\n self.select()\n else:\n self.deselect()" ]
[ "0.6586301", "0.6283673", "0.624815", "0.5832125", "0.5829928", "0.56058097", "0.55932057", "0.5486654", "0.5476527", "0.54241836", "0.54193985", "0.5406573", "0.5401394", "0.52843726", "0.5263898", "0.52331984", "0.51806915", "0.5153212", "0.5153212", "0.5153212", "0.5129445", "0.5080715", "0.50471395", "0.50445884", "0.5030479", "0.50072855", "0.49755263", "0.49543473", "0.4934902", "0.49242532", "0.49142936", "0.49090964", "0.48619226", "0.48560047", "0.48234174", "0.4811991", "0.4774531", "0.47504544", "0.47449368", "0.47433233", "0.47384328", "0.4728154", "0.47229856", "0.47213122", "0.47124934", "0.47049814", "0.4698445", "0.4687754", "0.465142", "0.464614", "0.46430594", "0.46323055", "0.4621215", "0.461679", "0.46136072", "0.46040747", "0.45983925", "0.45929125", "0.4586461", "0.45852333", "0.45825642", "0.45810422", "0.45791575", "0.45765716", "0.457269", "0.457269", "0.4564693", "0.45575643", "0.45426387", "0.45401227", "0.45374933", "0.4534549", "0.45238578", "0.4522836", "0.45224774", "0.45224774", "0.4512619", "0.4506828", "0.45013374", "0.45003563", "0.45003012", "0.4493984", "0.44928375", "0.44888625", "0.44851297", "0.44837716", "0.44835928", "0.44832626", "0.44803286", "0.44737366", "0.44569385", "0.44562283", "0.4452889", "0.445008", "0.4448938", "0.44356367", "0.4432544", "0.44324946", "0.44287854", "0.44240534" ]
0.8951591
0
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
def is_car_selected(self, car): return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_selected(self) -> bool:\n return self.proto.is_selected", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def is_selected(self) -> bool:\r\n return self.selected", "def is_selected(self):\n return self._selected", "def is_selected(self):\n return self._selected", "def is_selected(self):\n return self.container['is_selected']", "def IsSelected(self, item):\r\n\r\n return item.IsSelected()", "def carExists(self, carmake):\n data = db.session.query(Car.id).filter_by(make = carmake).first()\n if data is None:\n return False\n else:\n return True", "def is_selected(self, selector):\n el = self.locate_element(selector)\n return el.is_selected()", "def requires_selection(self) -> bool:\n return True", "def is_selected(self):\n return self._element_call(lambda: self.el.is_selected)", "def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")", "def select(condition: Union[Callable, int], meta: Counter) -> bool:\n if condition is None:\n return True\n elif isinstance(condition, int):\n return sum(meta.values()) == condition\n elif callable(condition):\n if not isinstance(condition(meta), bool):\n raise TypeError('selection condition expected to return a boolean')\n return condition(meta)\n return False", "def add_car(self, car):\n car_coords = car.car_coordinates()\n for coord in car_coords:\n if coord not in self.cell_list(): # not in 7*7 board\n return False\n elif self.cell_content(coord) is not None:\n return False\n for old_car in self.__cars:\n if old_car.get_name() == car.get_name():\n return False\n self.__cars.append(car)\n return True", "def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False", "def is_initially_selected(self, value):\n return value in self._get_selected_values_set()", "def isCarAvailable(self, car, start, end):\n rentals = self.filterRentals(None, car)\n for rent in rentals:\n if start > rent.end or end < rent.start:\n continue\n return False\n return True", "def selected(self):\n\n return self.element().is_selected() if self.exists() else False", "def is_red_car(self):\n return self.identifier == 18", "def is_selected(self):\n return NSCSpecIO().read()[\"profile\"] == self.path.stem", "def is_on(self):\n return self._device.car_state.get(self._key)", "def select_car(self, car_value):\n self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car_value)).first.click()", "def selected(self):\n\n return self.infodock.is_instruction_selected(self.addr)", "def has_car(self, i, lane_index):\n return self._spots[i].has_car(lane_index)", "def is_selection(cfg):\n if LIBRARIES in list(cfg.keys()):\n return True\n else:\n return False", "def IsObjectSelected(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n return rhobj.IsSelected(False)", "def __is_selected_frame(self, frame_index):\n return frame_index == self.selected_index", "def joy_select(event: EventType, widget: WidgetType) -> bool:\n return event.button == JOY_BUTTON_SELECT", "def is_selected_option(self, xpos, ypos, i):\n\n if (\n self.x < xpos < self.x + self.width\n and self.y + self.height * (i + 1)\n < ypos\n < self.y + self.height + (i + 1) * self.height\n ):\n return True\n else:\n return False", "def selectable(cls):\n return True", "def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]", "def is_selected(self):\r\n if hasattr(self, 'name') and self.name == 'home':\r\n return False\r\n if self.opt:\r\n return request.params.get(self.opt, '') in self.aliases\r\n else:\r\n stripped_path = request.path.rstrip('/').lower()\r\n ustripped_path = _force_unicode(stripped_path)\r\n if stripped_path == self.bare_path:\r\n return True\r\n if stripped_path in self.aliases:\r\n return True", "def isWidgetSelected(self, QWidget): # real signature unknown; restored from __doc__\n return False", "def is_checked(self):\n\treturn self._Widget__w['isChecked'] == 'true'", "def isTrackSelected(*args, **kwargs):\n pass", "def check_robot_selection(number_of_robots=None):\n # Check if any robots are selected\n robots = get_robot_roots()\n if not robots:\n return False\n # Check against number of robots\n if number_of_robots:\n if len(robots) > number_of_robots:\n return False\n return True", "def is_choice(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs()) is not None", "def verify_selected_vendor(self, vendor_name):\n is_present = None\n vendor_locator = (By.XPATH, self.selected_vendor_locator_string + \"[text()='%s']\" % vendor_name)\n try:\n self.wait().until(EC.presence_of_element_located(vendor_locator))\n is_present = True\n except:\n is_present = False\n finally:\n return is_present", "def bookingExists(self, user_id, car_id):\n data = db.session.query(Booking).filter_by(user_id = user_id, car_id = car_id).first()\n if data is None:\n return False\n else:\n return True", "def try_to_select_gear_for_upgrade(self):\n if self.emulator.is_ui_element_on_screen(self.ui['CUSTOM_GEAR_CHANGE_OPTION']):\n self.emulator.click_button(self.ui['CUSTOM_GEAR_1'].button)\n return not self.emulator.is_ui_element_on_screen(self.ui['CUSTOM_GEAR_CHANGE_OPTION'])", "def IsSelected(self):\r\n\r\n return self._hasHilight != 0", "def option_chooser():\n option_ = True\n choice = input(\"Do you want to find out information about \" + movie_name + \"? (y/n) \")\n if choice == \"n\":\n option_ = False\n return option_", "def autoselect(self):\n # type: () -> bool\n return self._autoselect", "def isselected(values, feature, parent):\r\n layername=values[0]\r\n fid = feature.id()\r\n layers = QgsMapLayerRegistry.instance().mapLayers()\r\n try:\r\n layer = layers[layername]\r\n except KeyError:\r\n try:\r\n layer = [l for l in layers.iteritems() if l[1].name() == layername][0][1]\r\n except IndexError:\r\n parent.setEvalErrorString( u'No layer with id or name {} found'.format( layername ) )\r\n return False\r\n\r\n return fid in layer.selectedFeaturesIds()", "def is_active(self, channel):\n return bool(int(self.bus.ask('sel:%s?' % channel)))", "def is_ecChoose(self):\n return len(self.ecChoose_list) > 0", "def is_condition(cfg):\n if SELECTIONS in list(cfg.keys()):\n return True\n else:\n return False", "def checkbox_should_be_selected(self, locator):\n self._info(\"Verifying checkbox '%s' is selected.\" % locator)\n if not self._selenium.is_checked(self._parse_locator(locator)):\n raise AssertionError(\"Checkbox '%s' should have been selected \"\n \"but was not\" % locator)", "def is_select(status):\n if not status:\n return False\n return status.split(None, 1)[0].lower() == 'select'", "def value(self):\n return self.element.is_selected()", "def is_valid_option(cls, id_):\n return id_ in cls.CHOICES", "def all_cards_selected(self, game_key):\n participants = models.Participant.query(\n models.Participant.playing == True,\n models.Participant.selected_card == None,\n ancestor=game_key).fetch()\n logging.debug(\"participants who have not selected a card: %s\", participants)\n if participants:\n return False\n else:\n return True", "def isSelected(*args):", "def isSelected(*args):", "def solved(self):\n return GOAL_VEHICLE in self.vehicles", "def is_allow_select_boot_device(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsAllowSelectBootDevice', self.handle))", "def get_bool(self, sect, opt):\r\n return self.get_safe(sect, opt) == \"True\"", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def selected(self, key):\n default = self.default\n if not default:\n return False\n for item in default:\n if compare(key, item) == 0:\n return True\n return False", "def selected(self, key):\n default = self.default\n if not default:\n return False\n for item in default:\n if compare(key, item) == 0:\n return True\n return False", "def is_select_one(self) -> bool:\n select_one_starts = (\n 'select_one ',\n 'select_one_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_one_starts)", "def is_selected_main(self, xpos, ypos):\n\n if self.x < xpos < self.x + self.width and self.y < ypos < self.y + self.height:\n return True\n else:\n return False", "def check_choice(choice):\n return choice == 'y' or choice == 'n'", "def hasSelectedText(self):\n return self.textCursor().hasSelection()", "def CanCopy(self):\n return self.HasSelection()", "def is_selected(self, is_selected):\n\n self.container['is_selected'] = is_selected", "def is_vehicle_parked(self, registration_number):\n return registration_number in self.__vehicle_slot_mapping__", "def __bool__(self):\n return any(self.smask)", "def set_selection(self, selection):\n for num in self.cryptomattes:\n if self.cryptomattes[num][\"name\"] == selection:\n self.selection = num\n return True\n self.selection = None\n return False", "def validateSelection(self):\n if self.cameraMoving == 0:\n return 1\n else:\n return 0", "def is_active(self):\n return self == self.item.active_take", "def IsObjectSelectable(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n return rhobj.IsSelectable(True,False,False,False)", "def camera_set(self) -> bool:\n if self.camera is None: # pragma: no cover\n return False\n return self.camera.is_set", "def vehicle_type(self):\n return 'car'", "def vehicle_type(self):\n return 'car'", "def vehicle_type(self):\n return 'car'", "def is_select_type(self) -> bool:\n row_type = self.get_type()\n return row_type.startswith('select')", "def move_car(self, name, movekey):\n for a_car in self.__cars:\n if name == a_car.get_name() \\\n and movekey in a_car.possible_moves():\n empty_cell = a_car.movement_requirements(movekey)\n lst_of_idx = self.cell_list()\n last_cell = lst_of_idx[-1]\n if empty_cell[0] == last_cell:\n a_car.move(movekey)\n return True\n if self.cell_content(empty_cell[0]) is None \\\n and empty_cell[0] in self.cell_list():\n a_car.move(movekey)\n return True\n return False", "def isSelected(self):\n raise AbstractError", "def is_select(self) -> bool:\n return self.statement.is_select", "def value(self):\n return self.input.checkState() == QtCore.Qt.Checked", "def is_checked(self, locator_type, locator):\n try:\n radio_button = self.wait_until_element_find(locator_type, locator)\n return radio_button.is_selected()\n except TimeoutException:\n return False", "def is_taking_part_in_active_career_event(cls, sim_info: SimInfo) -> bool:\n if sim_info is None:\n return False\n return any(career.is_at_active_event for career in sim_info.careers.values())", "def autoselect(self, autoselect):\n # type: (bool) -> None\n\n if autoselect is not None:\n if not isinstance(autoselect, bool):\n raise TypeError(\"Invalid type for `autoselect`, type has to be `bool`\")\n\n self._autoselect = autoselect", "def bool_option (arg: Any) -> bool:\n return True", "def test_boolean_and_selection(self):\n\n # The selection loop:\n sel = list(mol_res_spin.residue_loop(\"#Ap4Aase:4 & :Pro\"))\n\n # Test:\n self.assertEqual(len(sel), 1)\n for res in sel:\n self.assert_(res.name == \"Pro\" and res.num == 4)", "def is_food(self) -> bool:\n return self in (self.off, self.off_pro)", "def is_changed(self) -> bool:\n return self.selected_vms != self._initial_vms", "def isAnyFlagSelected(self):\n for key in self.canSelectFlags.keys():\n if self.canSelectFlags[key] == 1:\n return 1\n return 0", "def IsItemChecked(self, item):\r\n\r\n return item.IsChecked()", "def is_insert_selection():\n\n selection = Gui.Selection.getSelectionEx()\n\n if len(selection) != 2:\n return False\n\n object_one = selection[0].SubObjects[0]\n object_two = selection[1].SubObjects[0]\n\n return is_connected(object_one, object_two)", "def _has_selected_sentence_from_search_results(action: Union[Dict, Message]):\n k_task = 'task_data'\n k_selected = 'selected_text_candidates'\n if (k_task in action) and (k_selected in action[k_task]):\n # Boolean value that user has not selected any option\n return not action[k_task][k_selected][0][0]\n return False", "def process(self, car):\n car.check_coordination(self)", "def verify_selected_price_list(self, price_list_item):\n is_present = None\n price_list_locator = (By.XPATH, self.selected_price_list_locator_string + \"[text()='%s']\" % price_list_item)\n try:\n self.wait().until(EC.presence_of_element_located(price_list_locator))\n is_present = True\n except:\n is_present = False\n finally:\n return is_present", "def checkRegionControl(self, iPlayer, regionID, bVassal = False):\n\t\t\n\t\tbFound = False\n\t\tplotList = self.getRegionPlotList([regionID])\n\t\tfor tPlot in plotList:\n\t\t\t\tpCurrent = gc.getMap().plot(tPlot[0], tPlot[1])\n\t\t\t\tif pCurrent.isCity():\n\t\t\t\t\tiOwner = pCurrent.getPlotCity().getOwner()\n\t\t\t\t\tif iOwner != iPlayer:\n\t\t\t\t\t\tif bVassal:\n\t\t\t\t\t\t\tif gc.getTeam(gc.getPlayer(iOwner).getTeam()).isVassal(iPlayer):\n\t\t\t\t\t\t\t\tbFound = True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\tbFound = True\n\t\tif bFound:\n\t\t\treturn True\n\t\telse:\n\t\t\tfor tPlot in plotList:\n\t\t\t\tpCurrent = gc.getMap().plot(tPlot[0], tPlot[1])\n\t\t\t\tiOwner = pCurrent.getOwner()\n\t\t\t\tif iOwner != iPlayer:\n\t\t\t\t\tbFound = False\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tbFound = True\n\t\t\tif bFound:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def is_select_multiple(self) -> bool:\n select_multiple_starts = (\n 'select_multiple ',\n 'select_multiple_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_multiple_starts)", "def is_country_selection_criteria_field_present_in_view_price_list_details_ref_rates_page(self):\n return self.is_specific_selection_criteria_filter_present(self.view_price_list_ref_rates_page_div_id, self.country_label_name)", "def is_option_selected(option, field):\n if field.attr.default and option[0] == field.attr.default: # and option[0] != self.empty:\n return ' selected=\"selected\"'\n else:\n return ''", "def can_pickup(self):\n return False", "def CheckIfRouteShouldBeSelected(cls, PathInfo, RouteConditions):\r\n\t\tIfTest = cls.IfTestRouteSearch\r\n\r\n\t\t# VisitStations\r\n\t\tif RouteConditions.has_key(cls.VisitStations):\r\n\t\t\tcond = cls.VisitStations\r\n\t\t\tparameters = RouteConditions[cond]\r\n\t\t\tStationList = parameters[0]\r\n\t\t\tIncludeOption = parameters[1]\r\n\t\t\tif IncludeOption in (INCLUDE_ALL,):\r\n\t\t\t\tif not CheckIfRouteIncludesAllStationsInList(PathInfo, StationList):\r\n\t\t\t\t\tIncrementDicValue(cls.TerminationReasonsDic, 'VisitStations_INCLUDE_ALL')\r\n\t\t\t\t\tif IfTest: print \"--------- VisitStations_INCLUDE_ALL violated ---------\"\r\n\t\t\t\t\treturn False\r\n\r\n\t\t# passed all conditions\r\n\t\tcls.RouteCountAfterRouteSelection += 1\r\n\t\treturn True" ]
[ "0.6459931", "0.64561963", "0.64533645", "0.5970567", "0.5970567", "0.5941123", "0.59283936", "0.5885889", "0.58118576", "0.5765825", "0.57478505", "0.5745376", "0.55844533", "0.5575426", "0.5548363", "0.5547362", "0.5532758", "0.5526049", "0.55059886", "0.5495182", "0.5488999", "0.54878986", "0.5470811", "0.542837", "0.5425493", "0.54199123", "0.5416708", "0.539024", "0.5382875", "0.5349741", "0.5342664", "0.5327459", "0.52844757", "0.52242166", "0.5207063", "0.5186644", "0.5166335", "0.5154475", "0.51485854", "0.5145172", "0.51184106", "0.51102465", "0.51098096", "0.51038504", "0.50910884", "0.5070072", "0.50694394", "0.5068005", "0.50554746", "0.5049658", "0.5035118", "0.49918813", "0.4983754", "0.4983754", "0.49831793", "0.49798125", "0.4973293", "0.49566832", "0.49542037", "0.49542037", "0.49434173", "0.49309677", "0.48798832", "0.4879631", "0.4858666", "0.48583078", "0.48323888", "0.48314044", "0.4811233", "0.48028043", "0.4799846", "0.47818145", "0.4780448", "0.47697258", "0.47697258", "0.47697258", "0.47589892", "0.47567305", "0.4744622", "0.47232732", "0.47227815", "0.47157633", "0.47111508", "0.47083637", "0.4684304", "0.4675371", "0.46747604", "0.4670613", "0.46664694", "0.46644458", "0.4657347", "0.46510604", "0.46481055", "0.46333575", "0.46319398", "0.46312198", "0.46135053", "0.46116176", "0.4607669", "0.46062973" ]
0.86627126
0
Toggle the box for the pill with `pill_name` (red or blue).
def toggle_pill(self, pill_name): self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toggle_color_picker(self, wid, color_name='square_fill_ink'):\n print(\"TOGGLE COLOR PICKER\", getattr(wid, color_name), self.color_picker)\n is_open = self.color_dropdown and self.color_dropdown.attach_to\n if is_open:\n self.color_dropdown.dismiss()\n if self.color_dropdown:\n self.color_picker.unbind(color=wid.setter(color_name))\n self.color_picker = None\n self.color_dropdown = None\n if not is_open:\n self.color_dropdown = Factory.ColorPickerDD()\n self.change_flow(id_of_flow('suggest'))\n self.color_dropdown.open(wid)\n self.color_picker = self.color_dropdown.ids.col_pic\n self.color_picker.color = getattr(wid, color_name)\n self.color_picker.bind(color=wid.setter(color_name))", "def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")", "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "def toggle_box(self):\n if self.overlay:\n self.overlay.show_box()", "def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)", "def led_toggle(self):\n if self.state == GPIO.LOW:\n self.state = GPIO.HIGH\n else:\n self.state = GPIO.LOW\n return self.update_status()", "def pin_toggle(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n port_state = gpio.HIGH\n if gpio.input(port_num) == gpio.HIGH:\n port_state = gpio.LOW\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, port_state)", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "def ToggleSpinner(event, state, widget):\n if state == True:\n widget.Enable()\n else:\n widget.Disable()\n event.Skip()", "def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))", "def toggle(self, color='all'):\n if color in ['all', 'r']:\n self.__send('r', 'toggle')\n\n if color in ['all', 'g']:\n self.__send('g', 'toggle')\n\n if color in ['all', 'b']:\n self.__send('b', 'toggle')", "def toggle_color(self, index):\n if self.get_state(index):\n self.canvas.itemconfigure(self.cells[index], state=HIDDEN)\n else:\n self.canvas.itemconfigure(self.cells[index], state=NORMAL)", "def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')", "def change_to_tasks(self):\n self.ids[\"shp_btn\"].color = 1, 1, 1, 0.5", "def togglePulseUI():\n if isPulseUIShowing():\n hidePulseUI()\n else:\n showPulseUI()", "def change_colour(self) -> None:\n if self.ui.radioButtonWhite.isChecked():\n self.pen_colour = QColor(Qt.white)\n elif self.ui.radioButtonColour.isChecked():\n self.pen_colour = self.chosen_colour\n else: # Impossible but better to control\n message.show_error(config.PROG_ERR3)\n return None", "def toggle_highlighted_spikes(self, checked):\n self.show_all_spikes = checked\n self.set_interval()", "def toggle(self) -> None:\n ...", "def toggle(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.toggle())\n console.print(f\"[{ip}] Light {id} Toggle:\\n{json.dumps(resp, indent=2)}\")", "def dimmer_switch(turtle, color):\n turtle.fillcolor(color + \"4\")", "def toggle(self, layout, item, feats):\n if self.active.isChecked():\n self.fill_active(layout)\n\n self.default_button = QPushButton('set to defaults', feats)\n layout.addWidget(self.default_button)\n self.default_button.clicked.connect(self.rec_default)\n\n item.setForeground(QColor('black'));\n else:\n self.clear_params(layout, item)", "def toggle(self) -> None:", "def toggle(self) -> None:", "def toggle(self, id):\n e = self.objectmanager.objects.get(id=id)\n e.enabled = not e.enabled\n e.save()\n return render({\"id\": id, \"status\": e.enabled})", "def btnState(self, box):\n if box.text() == \"Log to File\":\n if box.isChecked():\n self.stdoutBox.setChecked(False)\n # should not edit filename\n self.logfileEdit.setReadOnly(False)\n self.debugStatements = True\n self.switchDebugOutput()\n\n if box.text() == \"Log to Stdout\":\n if box.isChecked():\n self.fileBox.setChecked(False)\n self.logfileEdit.setReadOnly(True)\n self.debugStatements = False\n self.switchDebugOutput()", "def change_to_shopping(self):\n self.ids[\"tsk_btn\"].color = 1, 1, 1, 0.5", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def toggle(self):\n try:\n self.pin.toggle()\n except NotImplementedError:\n if self.ison():\n self.off()\n else:\n self.on()", "def change_stepper_status(self, status):\n\n if status:\n GPIO.output(26, GPIO.HIGH)\n else:\n GPIO.output(26, GPIO.LOW)", "def toggle_flag_slot(self, pick):\n self._check_game_over()\n self._validate_pick(pick)\n\n self.board.toggle_flag_slot(pick)", "def light_on(self, pin='D13'):\n self.light_set(pin, '1')", "def _on_change(self, *_):\n colour = self.on_colour if self.value else self.off_colour\n self.configure(bg=colour)\n if self.label:\n self.label.configure(bg=colour)", "def switch_state():\n\tDmg.OpenWindow()", "def pin_on(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, gpio.HIGH)", "def ToggleLock(self, event):\n pass", "def toggle(self, **kwargs):\n self.on = False if self.on else True", "def unlock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_unlock.click()\n\n if check:\n self.close_notification('success')", "def toggle(self):\n s = self.status()\n if s == self.POWER_OFF:\n self.on()\n else:\n self.off()\n return self.status()", "def toggle(self):", "def toggle(self):\n if self._state in [STATE_OFF, STATE_IDLE, STATE_STANDBY]:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF", "def toggle_pin(self, pin=TIOCM_DTR, time=1000):\n\n\t\tlogging.debug(\"Set pin high\")\n\t\tioctl(self.fd, TIOCMBIS, struct.pack('I', pin))\n\n\t\tsleep(float(time) / 1000.)\n\n\t\tlogging.debug(\"Set pin low\")\n\t\tioctl(self.fd, TIOCMBIC, struct.pack('I', pin))", "def toggle_locked():\n panel_id = request.args.get('id')\n json = False\n if not panel_id:\n json = True\n panel_id = request.json['id']\n project_id = get_project_id_by_panel_id(s, panel_id)\n if current_user.id == get_locked_user(s, panel_id) and json:\n unlock_panel_query(s, panel_id)\n return jsonify(\"complete\")\n elif check_user_has_permission(s, current_user.id, project_id):\n unlock_panel_query(s, panel_id)\n return manage_locked(message=\"Panel Unlocked\")\n else:\n return manage_locked(message=\"Hmmmm you don't have permission to do that\")", "def switch_off(self,name):\n self.circles[name].switch_off()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=0 WHERE target=%s\"\"\", (name,))", "def on_pushButton_toggled(self, checked):\n self.isPause = checked", "def gpio_set(self, pin: str, status: Union[bool, str]) -> None:\n self.__logger.debug('Eva.gpio_set called')\n return self.__http_client.gpio_set(pin, status)", "def gpio_input(door: Door):\n input_state = GPIO.input(GPIO_PIN)\n if input_state:\n door.is_closed()\n else:\n door.is_opened()", "def lock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_lock.click()\n\n if check:\n self.close_notification('success')", "def toggleEdgeMode(self, PWMpin):\n mask = 1 << PWMpin\n self._injectFault(\"PWM1PCR\", self.PCR, mask)", "def pin_pulldown(self, pin):\n port_num = self._convert_pin_port(pin)\n gpio.pullup(port_num, gpio.PULLDOWN)", "def toggle(self):\n self.open = not self.open", "def light_standby():\n for led in leds:\n led.on()\n\n rgb_driver.pulse(on_color=(scale[\"R\"], scale[\"G\"], scale[\"B\"]), off_color=(0,0,0))", "def led(color: int, /) -> None:", "def toggle(self) -> None:\n if self.value is None:\n raise ValueError('Cannot toggle dark mode when it is set to auto.')\n self.value = not self.value", "async def blink(my_board, pin):\n\n # set the pin mode\n await my_board.set_pin_mode_digital_output(pin)\n\n # toggle the pin 4 times and exit\n for x in range(4):\n print('ON')\n await my_board.digital_write(pin, 1)\n await asyncio.sleep(1)\n print('OFF')\n await my_board.digital_write(pin, 0)\n await asyncio.sleep(1)", "def toggle_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.toggle()", "def pin_pullup(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLUP)", "def switch_color(color):\n return \"b\" if color == \"w\" else \"w\"", "def skill_down_enable(self, skill_string):\r\n self.__skills_ui_elem_ALL[skill_string][\"down\"]. \\\r\n configure(state=NORMAL, bg=\"red\")", "def do_red(self,command):\n if \"on\" in command:\n print 'Red ON'\n GPIO.output(7,GPIO.HIGH)\n elif \"off\" in command:\n print 'Red OFF'\n GPIO.output(7,GPIO.LOW)\n elif \"flash\" in command:\n print 'Flashing green'\n FlashPin(pin=7,count=5,delay=0.1)\n else:\n print \"ERROR! MF!\"", "def change_color(self, color):\r\n if color == \"black\":\r\n self.color = \"white\"\r\n self.canvas.itemconfig(self.ball, fill='white')\r\n else:\r\n self.color = \"black\"\r\n self.canvas.itemconfig(self.ball, fill='black')", "def updateCheck(self):\n if (self.checkStatus1.get() == True):\n self.master.configure(background='#f5f5f0')\n self.checkStatus2.set(False)\n self.checkStatus3.set(False)\n\n elif (self.checkStatus2.get() == True):\n self.master.configure(background='#ff99ff')\n self.checkStatus3.set(False)\n self.checkStatus1.set(False)\n elif (self.checkStatus3.get() == True):\n self.master.configure(background='#00ff00')\n self.checkStatus1.set(False)\n self.checkStatus2.set(False)", "def choose_color(self, b, name):\n section, option = name\n cur = b.background_color[:3]\n self.subview_open = True\n name = dialogs.list_dialog(\"Choose a color\", COLORS, multiple=False)\n self.subview_open = False\n if name is None:\n return\n _stash.config.set(section, option, repr(name))\n self.table.reload_data()\n self.save()", "def set_light_on(self):\r\n self._light = \"ON\"", "def brighter_switch(turtle, color):\n turtle.fillcolor(color + \"1\")", "def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def toggle_pick_upable(self,new_bool):\n self.pick_upable = new_bool", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def rgb_spin_changed(self, event):\n spin_red = self.spinbutton_r.get_value_as_int()\n spin_green = self.spinbutton_g.get_value_as_int()\n spin_blue = self.spinbutton_b.get_value_as_int()\n\n self.change_color((spin_red, spin_green, spin_blue))", "def _act_task_checked(self, iden, b):\n if b:\n self.data.turn_on(iden)\n else:\n self.data.turn_off()", "def toggle_shade(self,shade):\n\n # First toggle the user specified shade\n if self.shades[shade][0]:\n self.shades[shade][0] = 0\n else:\n self.shades[shade][0] = 1\n\n # Now draw the image with the active shades\n self.image.blit(self.pic,(0,0))\n for key in self.shades:\n if self.shades[key][0]:\n self.image.blit(self.shades[key][1],(0,0))", "def do_green(self,command):\n if \"on\" in command:\n print 'Green ON'\n GPIO.output(22,GPIO.HIGH)\n elif \"off\" in command:\n print 'Green OFF'\n GPIO.output(22,GPIO.LOW)\n elif \"flash\" in command:\n print 'Flashing green'\n FlashPin(pin=22,count=5,delay=0.1)\n else:\n print \"ERROR! MF!\"", "def toggle_method(*args):\n sel = cmds.radioButtonGrp(widgets[\"methodRBG\"], q=True, sl=True)\n \n if sel == 1:\n cmds.floatFieldGrp(widgets[\"recoFFG\"], e=True, en=True)\n cmds.intFieldGrp(widgets[\"totalIFBG\"], e=True, en=False)\n\n elif sel == 2:\n cmds.floatFieldGrp(widgets[\"recoFFG\"], e=True, en=False)\n cmds.intFieldGrp(widgets[\"totalIFBG\"], e=True, en=True)", "def toggled(self, *args, **kwargs): # real signature unknown\n pass", "def toggle_item_starred(self):\n self.get_selected()\n if not self.selected_item:\n return\n was_starred = self.selected_item.starred\n message = 'Starred flag is now ON'\n if was_starred:\n message = 'Starred flag is now OFF'\n self.trigger_item_starred(not was_starred)\n self.controller.display_message(message)", "def stateChanged(self, obj, box):\n logger.debug(\"checkbox state changed\")\n if(box.isChecked()==False):\n logger.debug(\"deselect: %s\" % obj)\n cmds.select(obj, d=True) #deselect object\n else:\n logger.debug(\"%s is checked\" % obj)", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def set_light_on(self):\n self._light = \"ON\"", "def changePopupColour(templateVar):\n\tglobal currentPeaWindow\n\tif templateVar in podTemplate.templateColours and currentPeaWindow:\n\t\tcorrectColour=podTemplate.templateColours[templateVar]\n\t\tcurrentPeaWindow.sectionData[\"Template\"].label.config(fg=correctColour)\n\t\t#currentPeaWindow.status.colour(correctColour)", "def set_led(self, on=True):\n if on:\n GPIO.output(self.LED, GPIO.HIGH)\n else:\n GPIO.output(self.LED, GPIO.LOW)", "def pickColour(self):\n colour = QColorDialog.getColor()\n if colour.isValid():\n self.user[\"Colour\"] = colour.name()\n self.ui.l_colour.setText(self.user[\"Colour\"])", "def led(red: int, green: int, blue: int, /) -> None:", "def setLED(self):\n newValue = 0\n while newValue != '':\n newValue = input('Enter 0 or 1 to turn LED on or off or enter to exit.\\n')\n \n if newValue == '0':\n self.board.write(b'0')\n time.sleep(1)\n elif newValue == '1':\n self.board.write(b'1')\n time.sleep(1)\n else:\n time.sleep(1)", "def led_on(self, led_type):\n if not GPIO.input(led_type.value):\n GPIO.output(led_type.value, GPIO.HIGH)\n logging.info('LED: {} - Status: {}'.format(led_type, GPIO.HIGH))", "def styled_status(enabled, bold=True):\n return click.style('Enabled' if enabled else 'Disabled', 'green' if enabled else 'red', bold=bold)", "def show_setting(message,\r\n toggle):\r\n\r\n if toggle:\r\n display.noteprint((message,alerts.ON))\r\n else:\r\n display.noteprint((message,alerts.OFF))", "def setDisabledColor(*args):", "def setDisabledColor(*args):", "def pin_state(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n value = gpio.input(port_num)\n return value", "def _iotool_enable_yellow_command(self):\n return self._iotool.commands.set_low(self._spconfig.IOTOOL_GREEN_YELLOW_SWITCH_PIN)", "def show_box(self):\n self.permanent_show = not self.permanent_show", "def toggle(self):\r\n self._variable.set(not self._variable.get()) \r\n self._activate()", "def control_lights(state):\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led],state[led])", "def clickWhiteReference(self, event):\n if self.whiteReference is None:\n self.whiteReference = self.spectrometer.getSpectrum()\n self.lightBtn.color = '0.99'\n else:\n self.whiteReference = None\n self.lightBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()", "def change_light(self):\n self._light_status = not self._light_status", "def toggle_power(self, duration, wait):\n payload = {\"duration\": duration}\n response = requests.post(self.__api_url('toggle'.format(self.name)), data=payload, headers=self.headers)\n if wait:\n time.sleep(duration)\n return response.text", "def on_gas_filled_toggled(self, checked):\n # TODO: not implemented yet\n if checked:\n self.gas_set = 1\n self.VI_gas_set.setEnabled(True)\n else:\n self.gas_set = 0\n self.VI_gas_set.setEnabled(False)", "def turnLightOff(ID):\n dislin.litmod(ID, 'OFF')", "def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val" ]
[ "0.5491586", "0.5272434", "0.51687783", "0.5140479", "0.5102771", "0.50949675", "0.49874344", "0.49179575", "0.48726612", "0.4838384", "0.4801307", "0.47595447", "0.47443792", "0.47273305", "0.4704383", "0.4703406", "0.46984416", "0.4670263", "0.46450222", "0.46340665", "0.46057907", "0.4601846", "0.4601846", "0.45938054", "0.45886624", "0.45734787", "0.45720023", "0.45546064", "0.45523235", "0.45389196", "0.45214957", "0.45133522", "0.44978097", "0.4480587", "0.44751287", "0.44527227", "0.44512457", "0.4439135", "0.44383335", "0.44241253", "0.4422736", "0.44222823", "0.44097623", "0.44064146", "0.4386668", "0.4386246", "0.43832272", "0.4374053", "0.4367011", "0.4363214", "0.4362759", "0.43609348", "0.4358566", "0.43503115", "0.43486392", "0.43422204", "0.43384978", "0.43360212", "0.43200207", "0.43154994", "0.43118158", "0.4304932", "0.42955273", "0.42933148", "0.42820314", "0.4276972", "0.42749125", "0.42355466", "0.4234416", "0.42332527", "0.42122784", "0.42120618", "0.42093748", "0.4208463", "0.42082033", "0.42053577", "0.4204708", "0.42005658", "0.41962412", "0.41942963", "0.4186567", "0.41786653", "0.41757914", "0.4174289", "0.4167968", "0.41650075", "0.41614863", "0.41479355", "0.41479355", "0.4146214", "0.41455212", "0.4144374", "0.4142105", "0.41368827", "0.41326204", "0.41303736", "0.41236696", "0.41178474", "0.41141167", "0.41132146" ]
0.8160374
0
Click the ``Confirm`` button and confirm the dialog.
def confirm(self): with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def you_should_be_able_to_confirm_and_close(driver):\n wait_on_element(driver, 0.5, 30, '//h1[contains(.,\"Test Changes\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__TEST CHANGES\"]').click()\n wait_on_element_disappear(driver, 1, 30, '//h6[contains(.,\"Please wait\")]')", "def __window_confirm(self, text):\n return True", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def javaScriptConfirm(self, frame, message):\n\n if self._robot._confirm_expected is None:\n raise Exception('You must specified a value to confirm \"%s\"' %\n message)\n confirmation, callback = self._robot._confirm_expected\n logger.debug(\"confirm('%s')\" % message)\n self._robot._confirm_expected = None\n self._robot.popup_messages = message\n\n if callback is not None:\n return callback()\n return confirmation", "def decision(question):\n return click.confirm(question, show_default=True)", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def confirm():\n\t\traise NotImplementedError", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)", "def confirmDialog(*args, annotation: Union[AnyStr, List[AnyStr]]=\"\", backgroundColor:\n List[float, float, float]=None, button: Union[AnyStr, List[AnyStr]]=\"\",\n cancelButton: AnyStr=\"\", defaultButton: AnyStr=\"\", dismissString: AnyStr=\"\",\n icon: AnyStr=\"\", message: AnyStr=\"\", messageAlign: AnyStr=\"\", parent:\n AnyStr=\"\", title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )", "def on_confirm_button(self, negotiation_outcome):\n # Send message.\n self.ros_node.send_message(UserInput.NEGOTIATION,\n negotiation_outcome)\n # Reset label and button.\n self.confirm_label.setText(\"Selection sent.\")\n self.confirm_button.setStyleSheet('QPushButton {color: gray;}')\n self.confirm_button.setEnabled(False)", "def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)", "def confirm(text, app, version, modules=None, default_yes=False):\n print(text)\n print(' Directory: %s' % os.path.basename(app.app_dir))\n print(' App ID: %s' % app.app_id)\n print(' Version: %s' % version)\n print(' Modules: %s' % ', '.join(modules or app.modules))\n if default_yes:\n return raw_input('Continue? [Y/n] ') not in ('n', 'N')\n else:\n return raw_input('Continue? [y/N] ') in ('y', 'Y')", "def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)", "def _confirm(self) -> None:\n\n self.__series.title = self._getTitleFromView()\n\n if len(self.__series.data) == 0:\n self._showMessage(\"Invalid data. No data selected.\")\n return\n\n self._result = DialogResult.Ok\n self._close()", "def confirm_as_variable() -> None:\n\n confirmed = click.confirm(\"Are you sure you want to drop the users table?\")\n status = click.style(\"yes\", fg=\"green\") if confirmed else click.style(\"no\", fg=\"red\")\n click.echo(\"Drop table confirmed?: \" + status)", "def confirm_lnk_click (self, **event_args):\r\n self.raise_event('x-close-alert', value='confirm_email')", "def confirm(self, message):\n raise NotImplementedError", "def wait_for_confirm(self, confirm=True, callback=None):\n\n self._robot._confirm_expected = (confirm, callback)\n self._robot.wait_for(lambda: self._robot._confirm_expected is None)\n return self.popup_messages", "def confirm(self, prompt, default):\n raise NotImplementedError(NotImplementedMessage)", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def render_confirm(self, h, comp, *args):\n return h.form(\n self.msg, h.br,\n h.input(type='submit', value='ok').action(comp.answer)\n )", "def runAskOkDialog(self, c: Cmdr, title: str, message: str=None, text: str=\"Ok\") -> None:\n if g.unitTesting:\n return\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Information)\n dialog.addButton(text, ButtonRole.YesRole)\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n dialog.exec_()\n finally:\n c.in_qt_dialog = False", "def confirm_delete(self):\n self.language = LANGUAGE.get(self.lang)\n message = Message(self.language[\"del_user\"], self.language[\"del_info\"])\n delete_message = message.create_question_message(self.language[\"yes\"])\n response = delete_message.exec()\n\n if response == QMessageBox.Yes:\n self.delete_user()\n elif response == QMessageBox.No:\n delete_message.close()", "def _confirm_action(self, action):\n\t\treturn True", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def confirm_exit(self):\n return True", "def collection_delete_confirm_btn(self):\n collection_delete_confirm_btn_sitem = self.locator_finder_by_xpath(self.collection_delete_confirm_btn_id)\n collection_delete_confirm_btn_sitem.click()\n time.sleep(1)", "def confirm(msg: str = \"Do you want it:\", default: bool = True) -> bool:\n\n question = [\n {\n 'type': 'confirm',\n 'name': 'confirm',\n 'message': msg,\n 'default': default\n }\n ]\n try:\n answer = prompt(question)\n return answer['confirm']\n except KeyError:\n exit = confirm(msg=\"Do you want cancel script\")\n if exit:\n raise SystemExit\n else:\n return confirm(msg, default)", "def buttonOK_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_OK)", "async def async_step_confirm(self, user_input=None):\n errors = {}\n if user_input is not None:\n return await self.async_step_one(user_input=None)\n return self.async_show_form(step_id=\"confirm\", errors=errors)", "def confirm(message):\n if not sys.stdout.isatty():\n return False\n reply = BaseCommand.input(\"\\n{message} [Y/N]:\".format(message=message))\n return reply and reply[0].lower() == 'y'", "def show_confirm_version(name, version, release_notes, confirm, will_push, test):\n\n print()\n print(\"Name: %s\" % name)\n print(\"Version: %s\" % version)\n print()\n\n print(\"Release Notes\")\n print(release_notes)\n\n print()\n\n if will_push:\n print(\"Saying yes will automatically push the tag to `origin`, triggering the release immediately\")\n else:\n print(\"The tag **will not** be pushed automatically, you will need to call `git push --tags` yourself\")\n\n if test:\n print()\n print(\"**This will be a dry-run that will not actually release anything permanently.**\")\n\n print()\n\n if confirm:\n val = input(\"Are you sure [y/N]? \")\n if val.lower() != 'y':\n raise GenericError(\"Cancelled by user\", 100)", "def askOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK|wx.CANCEL)", "def _clicked_yes_button(self):\n self.yes = True", "def show_question_dialog(self, title, message):\n dialog = QMessageBox.question(self, title, message, QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)\n '''dialog.setText(title) # format_secondary_text(message)\n dialog.setInformativeText(message)\n dialog.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n #dialog.addButton(QPushButton('Accept'), QMessageBox.YesRole)\n #dialog.addButton(QPushButton('Cancel'), QMessageBox.RejectRole)\n dialog.setDefaultButton(QMessageBox.Cancel)'''\n #response = dialog.exec_()\n #dialog dialog.destroy()\n return dialog # response", "def yesButton(self):\n \n self.answer=self.yesMessage.lower()\n self.top.destroy()", "def popup(self):\r\n return self.exec_() == QDialog.Accepted", "def _confirm(self, delay_factor=1):\n\n delay_factor = self.select_delay_factor(delay_factor)\n error_marker = \"Nothing to confirm in configuration\"\n command_string = \"confirm\"\n\n if self.check_config_mode():\n self.exit_config_mode()\n\n output = self.send_command(\n command_string=command_string, delay_factor=delay_factor\n )\n\n if error_marker in output:\n raise ValueError(\n \"Confirm failed with following errors:\\n\\n{}\".format(output)\n )\n return output", "def on_ConfirmWalletOP_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "async def confirm(\n ctx,\n default: bool = False,\n content: str = None,\n embed: discord.Embed = None,\n post_action: str = Menu.DELETE_MESSAGE,\n timeout: Union[int, float] = 30.0,\n) -> bool:\n if getattr(ctx, \"assume_yes\", False):\n return True\n\n menu = _ConfirmMenu(\n channel=ctx.channel, bot=ctx.bot, member=ctx.author, content=content, embed=embed\n )\n await menu.prompt(post_action=post_action, timeout=timeout)\n return menu.kwargs.get(\"result\", default)", "def test_alert_pop_up(self):\n\n # locators\n alert_button = 'alertbtn'\n\n # steps\n locate_alert_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, alert_button))\n )\n locate_alert_button.click()\n alert = self.driver.switch_to.alert\n print(alert.text)\n alert.accept()", "def confirmCloseEvent(self):\n dlg = simpleDialogs.ConfirmCloseDialog(self)\n\n close = False\n clearSettings = False\n\n reply = dlg.exec_()\n\n if reply:\n close = True\n\n if dlg.clearSettingsCheck.isChecked():\n clearSettings = True\n\n return close, clearSettings", "async def confirm(ctx, *args: discord.Member):\n await _confirm(args)", "def confirm(force):\n if not force:\n ans = input(que(bold(\"Are you sure? [y/N]: \")))\n else:\n ans = 'y'\n\n return ans.lower()", "def proceed():\n c_print(\"********** PROCEED? **********\")\n # capture user input\n confirm = input(\" \" * 36 + \"(y/n) \")\n # quit script if not confirmed\n if confirm.lower() != \"y\":\n c_print(\"******* EXITING SCRIPT *******\")\n print(\"~\" * 80)\n exit()\n else:\n c_print(\"********* PROCEEDING *********\")", "def get_confirmation():\n inp = PInput(\"#> \")\n\n inp.add_keyword(\"yes\")\n inp.add_keyword(\"no\")\n\n inp.ask()\n ans = inp.get_input()\n\n if ans == \"yes\":\n return True\n else:\n return False", "def question(parent, my_message):\n\n if not isinstance(parent, Gtk.Window):\n parent = None\n\n my_message = str(my_message)\n msg_dialog = Gtk.MessageDialog(transient_for=parent,\n modal=True,\n destroy_with_parent=True,\n message_type=Gtk.MessageType.QUESTION,\n buttons=Gtk.ButtonsType.YES_NO,\n text=_(\"Reborn OS Installer - Confirmation\"))\n msg_dialog.format_secondary_text(my_message)\n response = msg_dialog.run()\n msg_dialog.destroy()\n return response", "def showOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK)", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def yes_no_cancel_popup(title=None,\n text=None):\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_YES, gtk.RESPONSE_YES,\n gtk.STOCK_NO, gtk.RESPONSE_NO,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n hb=gtk.HBox()\n hb.show()\n d.vbox.add(hb)\n\n i=gtk.Image()\n i.set_from_stock(gtk.STOCK_DIALOG_QUESTION, gtk.ICON_SIZE_DIALOG)\n i.show()\n hb.pack_start(i, expand=False)\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n hb.add(l)\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n retval=d.run()\n d.destroy()\n return retval", "def __selected(self):\n self.__confDlg = MultiConfirmDialog()\n self.__confDlg.okButton().clicked.connect(self.__onConfirmYes)\n self.__confDlg.cancelButton().clicked.connect(self.__onConfirmNo)\n self.__confDlg.show()", "def move_confirm_btn(self):\n self.wait_for_ajax()\n move_confirm_btn_sitem = self.locator_finder_by_id(self.move_confirm_btn_id, 20)\n move_confirm_btn_sitem.click()\n time.sleep(1)", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def __onConfirmNo(self):\n self.__confDlg.reject()", "def confirm(self):\n self.automatically_detected=False\n self.save()", "def confirm_yes():\r\n confirm = raw_input(\"Enter 'yes' to confirm: \")\r\n if confirm == 'yes':\r\n return True\r\n return False", "def confirm(self, prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def confirm():\n if request.method == 'POST':\n user_type = session.get('type', None)\n if user_type == 'Admin':\n return redirect('/index')\n elif user_type == 'Client':\n return redirect('/clients/' + session.get('name'))\n else:\n return redirect('/')\n\n confirmed = request.values['confirmed']\n \n return render_template('confirm.html', confirmed=confirmed)", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def sd_yes_clicked(self, widget, data=None):\n return True", "def popup():\n msg = messagebox.askyesno('Warning', 'Are you sure you would like to submit?')\n if msg: # if user clicked yes\n save_txt()\n save_db()\n root.destroy()", "def runAskYesNoCancelDialog(\n self,\n c: Cmdr,\n title: str,\n message: str=None,\n yesMessage: str=\"&Yes\",\n noMessage: str=\"&No\",\n yesToAllMessage: str=None,\n defaultButton: str=\"Yes\",\n cancelMessage: str=None,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setWindowTitle(title)\n # Creation order determines returned value.\n yes = dialog.addButton(yesMessage, ButtonRole.YesRole)\n no = dialog.addButton(noMessage, ButtonRole.NoRole)\n cancel = dialog.addButton(cancelMessage or 'Cancel', ButtonRole.RejectRole)\n if yesToAllMessage:\n dialog.addButton(yesToAllMessage, ButtonRole.YesRole)\n if defaultButton == \"Yes\":\n dialog.setDefaultButton(yes)\n elif defaultButton == \"No\":\n dialog.setDefaultButton(no)\n else:\n dialog.setDefaultButton(cancel)\n try:\n c.in_qt_dialog = True\n dialog.raise_() # #2246.\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return {\n 0: 'yes', 1: 'no', 2: 'cancel', 3: 'yes-to-all',\n }.get(val, 'cancel')", "def ask_yes_no(message=\"\", title=None):\n return dialog(\"ask_yes_no\", message=message, title=title)", "def click_save_changes_button(self):\n self.click_element(self.save_changes_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def validate_confirm(self, field):\n if not field.data == \"CLOSE\":\n raise ValidationError('Please enter \"CLOSE\".')", "def onAccepted():\n dialog.done(1)", "def click_continue(self):\n self.click_element(self.continue_button_selector)", "def _onConfirmPressed (self):\n self._classSelectionMenu.createCharacter()", "def confirm(message: str = \"Confirm?\", suffix: str = \" (y/n) \") -> bool:\n session = create_confirm_session(message, suffix)\n return session.prompt()", "def yesButton(self):\n \n self.answer=\"yes\"\n self.top.destroy()", "def button_clicked(self):\n sender = self.sender()\n if self._buttons_active:\n message_box = QtGui.QMessageBox()\n quit_button = message_box.addButton('Quitter',\n QtGui.QMessageBox.RejectRole)\n if (sender.row, sender.col, sender.text()) in self._solution:\n message_box.setText('Félicitations, vous avez deviné juste !')\n else:\n message_box.setText('Dommage, vous avez perdu !')\n message_box.setInformativeText('Vous pouvez proposer une '\n 'autre sortie, ou afficher de '\n 'nouveau les objets.')\n show_items_button = message_box.addButton('Voir objets',\n QtGui.QMessageBox.AcceptRole)\n play_again_button = message_box.addButton( 'Réessayer',\n QtGui.QMessageBox.AcceptRole)\n message_box.setDefaultButton(play_again_button)\n message_box.exec()\n if message_box.clickedButton() == quit_button:\n self.close()\n elif message_box.clickedButton() == show_items_button:\n self.reset_game()", "def consent(s, eType, eVal):\n try:\n import maya.cmds as cmds # Is Maya active? Ask using their GUI\n answer = cmds.confirmDialog(t=eType.__name__, m=CONFIRM_MSG, b=(\"Yes\",\"No\"), db=\"Yes\", cb=\"No\", ds=\"No\")\n return \"Yes\" == answer\n except ImportError:\n return True # No means to ask? Ah well ...", "def confirmarOperacion(self):\n if self.productosAgregados == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"No se ha agregado ningun producto\")\n else:\n ventana = Cobrar(self,self.calcularTotal(),self.factura,self.sesion)\n ventana.exec_()\n if self.facturaCobrada:\n QtGui.QMessageBox.information(self,\"Venta\",\"La venta se ha realizado con exito\")\n data = {}\n data[\"numero\"] = self.factura.numero\n data[\"fecha\"] = self.factura.fecha_emision\n data[\"detalles\"] = self.data.values()\n data[\"formaPago\"] = self.formapago\n generarFactura(data)\n self.factura.setObra(self.obraSocialSeleccionada)\n self.factura.modificar(self.sesion)\n self.limpiarVentana()\n else:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura aun no ha sido cobrada\")", "def display_confirm(self, text, password):\n return self.display_prompt(text) == password", "def confirm(\n\t\ttext: str,\n\t\tdefault: bool = False,\n\t\tabort: bool = False,\n\t\tprompt_suffix: str = \": \",\n\t\tshow_default: bool = True,\n\t\terr: bool = False,\n\t\t):\n\n\tprompt = _build_prompt(text, prompt_suffix, show_default, \"Y/n\" if default else \"y/N\")\n\n\twhile True:\n\t\ttry:\n\t\t\tvalue = _prompt(prompt, err=err, hide_input=False).lower().strip()\n\t\texcept (KeyboardInterrupt, EOFError):\n\t\t\traise click.Abort()\n\n\t\tif value in ('y', \"yes\"):\n\t\t\trv = True\n\t\telif value in ('n', \"no\"):\n\t\t\trv = False\n\t\telif value == '':\n\t\t\trv = default\n\t\telse:\n\t\t\tclick.echo(\"Error: invalid input\", err=err)\n\t\t\tcontinue\n\t\tbreak\n\n\tif abort and not rv:\n\t\traise click.Abort()\n\n\treturn rv", "def on_okButton_clicked(self):\n self.accept=True", "def ok_centrify_license_dlg(dsk_session):\n cntry_dlg = try_find_element(dsk_session, FindElementBy.CLASS, \"#32770\", 5, True)\n if cntry_dlg is not None:\n ok_button = try_find_element(cntry_dlg, FindElementBy.NAME, \"Ok\", 1, True)\n if ok_button is not None:\n ok_button.click()\n else:\n okay_button = try_find_element(cntry_dlg, FindElementBy.NAME, \"OK\", 1, True)\n if okay_button is not None:\n okay_button.click()\n else:\n yes_button = try_find_element(cntry_dlg, FindElementBy.NAME, \"Yes\", 1, True)\n if yes_button is not None:\n yes_button.click()\n else:\n raise Exception(\"Ok or Yes button not found in Dialog Box.\")", "def on_outcome_selected(self, selected):\n self.negotiation_outcome = selected\n self.confirm_label.setText(\"PLEASE CONFIRM: \\nSend {}?\".format(\n selected))\n self.confirm_button.setStyleSheet('QPushButton {color: red;}')\n self.confirm_button.setEnabled(True)", "def on_apply_clicked(self,button):\n\t\tdialog = ConfirmPerformActions()\n\t\t\n\t\tresponse = dialog.run()\n\n\t\tif response == Gtk.ResponseType.OK:\n \n\t\t\tdialog.destroy()\n\t\t\tself.list_partitions.perform_actions()\n\t\t\t\n\t\telif response == Gtk.ResponseType.CANCEL:\n\t\t\tdialog.destroy()", "def runAskYesNoDialog(self,\n c: Cmdr, title: str, message: str=None, yes_all: bool=False, no_all: bool=False,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n # Creation order determines returned value.\n yes = dialog.addButton('Yes', ButtonRole.YesRole)\n dialog.addButton('No', ButtonRole.NoRole)\n # dialog.addButton('Cancel', ButtonRole.RejectRole)\n if yes_all:\n dialog.addButton('Yes To All', ButtonRole.YesRole)\n if no_all:\n dialog.addButton('No To All', ButtonRole.NoRole)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setDefaultButton(yes)\n if c:\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n else:\n dialog.raise_()\n val = dialog.exec() if isQt6 else dialog.exec_()\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return_d = {0: 'yes', 1: 'no'}\n if yes_all and no_all:\n return_d [2] = 'yes-all'\n return_d [3] = 'no-all'\n elif yes_all:\n return_d [2] = 'yes-all'\n elif no_all:\n return_d [2] = 'no-all'\n return return_d.get(val, 'cancel')", "def push_button_ok_clicked(self) -> None:\n if self.save():\n self.close()", "def confirm(msg: str) -> bool:\n res = input(msg + \" (Y/n) > \")\n if res == 'Y' or res == 'y' or res == 'yes' or res == 'Yes' or res == \"\":\n return True\n return False", "def yes_no_dialog(self, message):\n reply = QMessageBox.question(self, \"Are you sure?\",\n message, QMessageBox.Yes, QMessageBox.Cancel)\n\n if reply == QMessageBox.Yes:\n return True\n else:\n return False", "def confirm(msg=\"\"):\n answer = \"\"\n if not msg: msg = \"OK to continue\"\n while answer not in [\"y\", \"n\"]:\n answer = input(msg+\" [Y/N]? \").lower()\n return answer == \"y\"", "def onExit(self, event):\r\n\t\tdlg = wx.MessageDialog(self, \"Are you sure you wish to exit?\",\r\n\t\t\t\t\t\t\t\"Confirm Exit\", wx.CANCEL|wx.OK|wx.ICON_QUESTION)\r\n\t\tresult = dlg.ShowModal()\r\n\t\tdlg.Destroy()\r\n\t\tif result == wx.ID_OK: sys.exit()", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass" ]
[ "0.7253925", "0.7246785", "0.72352403", "0.7205", "0.716402", "0.7038067", "0.70259655", "0.700242", "0.6850886", "0.6840978", "0.682001", "0.6809211", "0.6784496", "0.67724127", "0.6758107", "0.6704055", "0.65794057", "0.6546828", "0.65444165", "0.6512818", "0.6483923", "0.6467671", "0.64611477", "0.64287895", "0.64189684", "0.6396472", "0.63624436", "0.6360682", "0.6341616", "0.6277734", "0.62590486", "0.624782", "0.624677", "0.6236459", "0.6232166", "0.6230132", "0.621994", "0.62101954", "0.61492723", "0.6136429", "0.6111828", "0.60951793", "0.60875463", "0.59871215", "0.5986598", "0.5962804", "0.5954142", "0.594859", "0.5943719", "0.59173113", "0.59037375", "0.5902275", "0.5897266", "0.5888807", "0.58733267", "0.58665365", "0.5854326", "0.5837355", "0.5835813", "0.58317536", "0.5830316", "0.5822082", "0.5816421", "0.58155817", "0.5805903", "0.5805087", "0.58000606", "0.57980365", "0.5792008", "0.5766694", "0.57507086", "0.574364", "0.5742481", "0.57372725", "0.5728768", "0.57212394", "0.5710162", "0.5703456", "0.56963235", "0.56898195", "0.56882036", "0.5678832", "0.56742775", "0.5674175", "0.56661075", "0.5665108", "0.56495404", "0.564648", "0.5643718", "0.56376505", "0.56354403", "0.56156516", "0.5612427", "0.5609281", "0.56030655", "0.55983675", "0.55967516", "0.55733025", "0.55733025", "0.55733025" ]
0.85760707
0
Click the ``Confirm`` button and cancel the dialog.
def cancel(self): with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)", "def click_cancel(self):\n self.click_element(self.cancel_button_selector)", "def cancelButton(self):\n \n self.answer=\"cancel\"\n self.top.destroy()", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def __onConfirmNo(self):\n self.__confDlg.reject()", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def __window_confirm(self, text):\n return True", "def on_cancel(self, *args):\n self.response(Gtk.ResponseType.CANCEL)", "def click_win_dispute_cancel_button(self):\n self.click_element(self.win_dispute_cancel_button_locator)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass\n self.wait_for_ajax_spinner_load()", "def decision(question):\n return click.confirm(question, show_default=True)", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def pressCancel(self):\n self.close()", "def sgnCancel(self):\n\n self.uiCloseWindow()", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def you_should_be_able_to_confirm_and_close(driver):\n wait_on_element(driver, 0.5, 30, '//h1[contains(.,\"Test Changes\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__TEST CHANGES\"]').click()\n wait_on_element_disappear(driver, 1, 30, '//h6[contains(.,\"Please wait\")]')", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )", "def click_statement_entry_cancel_button(self):\n self.click_element(self.statement_entry_cancel_button_locator, False, True)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass", "def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False", "def confirm():\n\t\traise NotImplementedError", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def buttonCancel_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_CANCEL)", "def javaScriptConfirm(self, frame, message):\n\n if self._robot._confirm_expected is None:\n raise Exception('You must specified a value to confirm \"%s\"' %\n message)\n confirmation, callback = self._robot._confirm_expected\n logger.debug(\"confirm('%s')\" % message)\n self._robot._confirm_expected = None\n self._robot.popup_messages = message\n\n if callback is not None:\n return callback()\n return confirmation", "def cancelButton(self):\n \n self.answer=-1\n self.top.destroy()", "def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)", "def confirmDialog(*args, annotation: Union[AnyStr, List[AnyStr]]=\"\", backgroundColor:\n List[float, float, float]=None, button: Union[AnyStr, List[AnyStr]]=\"\",\n cancelButton: AnyStr=\"\", defaultButton: AnyStr=\"\", dismissString: AnyStr=\"\",\n icon: AnyStr=\"\", message: AnyStr=\"\", messageAlign: AnyStr=\"\", parent:\n AnyStr=\"\", title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def onCancelButtonClick(self, event):\n self.EndModal(wx.ID_CANCEL)\n event.Skip()", "def cancel_fedcm_dialog(self):\n pass", "def alert_cancel(self):\n self._alert_accept_cancel(False)", "def confirm(msg: str = \"Do you want it:\", default: bool = True) -> bool:\n\n question = [\n {\n 'type': 'confirm',\n 'name': 'confirm',\n 'message': msg,\n 'default': default\n }\n ]\n try:\n answer = prompt(question)\n return answer['confirm']\n except KeyError:\n exit = confirm(msg=\"Do you want cancel script\")\n if exit:\n raise SystemExit\n else:\n return confirm(msg, default)", "def confirm_lnk_click (self, **event_args):\r\n self.raise_event('x-close-alert', value='confirm_email')", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def runAskYesNoCancelDialog(\n self,\n c: Cmdr,\n title: str,\n message: str=None,\n yesMessage: str=\"&Yes\",\n noMessage: str=\"&No\",\n yesToAllMessage: str=None,\n defaultButton: str=\"Yes\",\n cancelMessage: str=None,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setWindowTitle(title)\n # Creation order determines returned value.\n yes = dialog.addButton(yesMessage, ButtonRole.YesRole)\n no = dialog.addButton(noMessage, ButtonRole.NoRole)\n cancel = dialog.addButton(cancelMessage or 'Cancel', ButtonRole.RejectRole)\n if yesToAllMessage:\n dialog.addButton(yesToAllMessage, ButtonRole.YesRole)\n if defaultButton == \"Yes\":\n dialog.setDefaultButton(yes)\n elif defaultButton == \"No\":\n dialog.setDefaultButton(no)\n else:\n dialog.setDefaultButton(cancel)\n try:\n c.in_qt_dialog = True\n dialog.raise_() # #2246.\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return {\n 0: 'yes', 1: 'no', 2: 'cancel', 3: 'yes-to-all',\n }.get(val, 'cancel')", "def cancel(self): #$NON-NLS-1$\r", "def askOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK|wx.CANCEL)", "def on_cancel_click(self):\r\n\t\t# self.parent.show()\r\n\t\tself.close()", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def yes_no_cancel_popup(title=None,\n text=None):\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_YES, gtk.RESPONSE_YES,\n gtk.STOCK_NO, gtk.RESPONSE_NO,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n hb=gtk.HBox()\n hb.show()\n d.vbox.add(hb)\n\n i=gtk.Image()\n i.set_from_stock(gtk.STOCK_DIALOG_QUESTION, gtk.ICON_SIZE_DIALOG)\n i.show()\n hb.pack_start(i, expand=False)\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n hb.add(l)\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n retval=d.run()\n d.destroy()\n return retval", "def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def cancelarOperacion(self):\n\n ok = QtGui.QMessageBox.information(self,\"Confirmacion\",\"¿Desea cancelar la operacion?\",\\\n QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)\n if (ok==1):\n self.limpiarVentana()", "def cancel():\n global confirmation, output1, place_for_enter\n output1.delete(1.0, END)\n confirmation.after(1, confirmation.destroy)\n place_for_enter.delete(0, END)", "def confirm_as_variable() -> None:\n\n confirmed = click.confirm(\"Are you sure you want to drop the users table?\")\n status = click.style(\"yes\", fg=\"green\") if confirmed else click.style(\"no\", fg=\"red\")\n click.echo(\"Drop table confirmed?: \" + status)", "def confirm(self, prompt, default):\n raise NotImplementedError(NotImplementedMessage)", "def AskYesNoCancel(question, default = 0, yes=None, no=None, cancel=None, id=262):\n\n raise NotImplementedError(\"AskYesNoCancel\")", "def messageCancel(self,message):\n if self.app.DEBUG:\n print 'Dialog: Parent: %s.messageCancel'%self.__class__\n return self.message(message,style=wx.YES_NO|wx.ICON_QUESTION | wx.CANCEL)", "def confirm(self, message):\n raise NotImplementedError", "def YesNoCancelDialog( message, caption, style=wx.ICON_QUESTION ):\n return MessageDialog( message, caption, style | wx.YES_NO | wx.CANCEL )", "def od_cancel_clicked(self, widget, data=None):\n self.open_chooser.hide()", "def onBtnCancelClicked(self):\n self.close()", "def push_button_cancel_clicked(self) -> None:\n self._edit_pair = None\n self.close()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def _confirm(self) -> None:\n\n self.__series.title = self._getTitleFromView()\n\n if len(self.__series.data) == 0:\n self._showMessage(\"Invalid data. No data selected.\")\n return\n\n self._result = DialogResult.Ok\n self._close()", "def closeEvent(self, event):\r\n reply = QMessageBox.question(self, 'Message', \"Are you sure to quit?\", QMessageBox.Yes|QMessageBox.No,\r\n QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n event.accept()\r\n else:\r\n event.ignore()\r\n return", "def click_add_new_note_cancel_button(self):\n self.click_element(self.cancel_dispute_note_button_locator)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass", "def yes_no_dialog(self, message):\n reply = QMessageBox.question(self, \"Are you sure?\",\n message, QMessageBox.Yes, QMessageBox.Cancel)\n\n if reply == QMessageBox.Yes:\n return True\n else:\n return False", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()\r\n\r\n return True", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()\r\n\r\n return True", "def nd_cancel_clicked(self, widget, data=None):\n self.new_chooser.hide()", "def confirm_exit(self):\n return True", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()", "def on_confirm_button(self, negotiation_outcome):\n # Send message.\n self.ros_node.send_message(UserInput.NEGOTIATION,\n negotiation_outcome)\n # Reset label and button.\n self.confirm_label.setText(\"Selection sent.\")\n self.confirm_button.setStyleSheet('QPushButton {color: gray;}')\n self.confirm_button.setEnabled(False)", "def ask_yes_no(message=\"\", title=None):\n return dialog(\"ask_yes_no\", message=message, title=title)", "def confirm(force):\n if not force:\n ans = input(que(bold(\"Are you sure? [y/N]: \")))\n else:\n ans = 'y'\n\n return ans.lower()", "def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)", "def closeEvent(self, event):\n reply = QtWidgets.QMessageBox.question(\n self,\n 'Message',\n \"Are you sure to quit?\",\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.No)\n\n if reply == QtWidgets.QMessageBox.Yes:\n self.onclick_exit()\n event.accept()\n else:\n event.ignore()", "def dismiss(self):\n with self.handle_alert():\n self.q(css='button#alert').first.click()", "def on_btnCancelarsalir_clicked(self, widget):\n variables.vendialogsalir.connect('delete-event', lambda w, e: w.hide() or True)\n variables.vendialogsalir.hide()", "def validate_confirm(self, field):\n if not field.data == \"CLOSE\":\n raise ValidationError('Please enter \"CLOSE\".')", "def click_upload_cancel_button(self):\n self.click_element(self.upload_cancel_button_locator, script_executor=True)", "def reject(self):\r\n QtGui.QDialog.reject(self)", "def cancel(self, event=None):\n\n self.dialog_selection.clear()\n self.dialog.destroy()", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def onExit(self, event):\r\n\t\tdlg = wx.MessageDialog(self, \"Are you sure you wish to exit?\",\r\n\t\t\t\t\t\t\t\"Confirm Exit\", wx.CANCEL|wx.OK|wx.ICON_QUESTION)\r\n\t\tresult = dlg.ShowModal()\r\n\t\tdlg.Destroy()\r\n\t\tif result == wx.ID_OK: sys.exit()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def runAskOkDialog(self, c: Cmdr, title: str, message: str=None, text: str=\"Ok\") -> None:\n if g.unitTesting:\n return\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Information)\n dialog.addButton(text, ButtonRole.YesRole)\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n dialog.exec_()\n finally:\n c.in_qt_dialog = False", "def confirm_delete(self):\n self.language = LANGUAGE.get(self.lang)\n message = Message(self.language[\"del_user\"], self.language[\"del_info\"])\n delete_message = message.create_question_message(self.language[\"yes\"])\n response = delete_message.exec()\n\n if response == QMessageBox.Yes:\n self.delete_user()\n elif response == QMessageBox.No:\n delete_message.close()", "def yesButton(self):\n \n self.answer=self.yesMessage.lower()\n self.top.destroy()", "def confirm(message):\n if not sys.stdout.isatty():\n return False\n reply = BaseCommand.input(\"\\n{message} [Y/N]:\".format(message=message))\n return reply and reply[0].lower() == 'y'", "def cancel_on_pos(self):\n # Start a transaction\n self.log.info(\"Starting a transaction...\")\n pos.click('generic item')\n\n # Click customer id and then cancel\n self.log.info(\"Clicking the customer ID button...\")\n if pos.is_element_present(self.customer_id_button, timeout = self.wait_time):\n pos.click('customer id')\n else:\n tc_fail(\"Customer ID button did not appear.\")\n\n self.log.info(\"Clicking cancel...\")\n if pos.is_element_present(self.manual_button, timeout = self.wait_time):\n pos.click('cancel')\n else:\n tc_fail(\"Did not change to the customer ID screen.\")\n\n msg = pos.read_message_box(timeout = self.wait_time)\n if not msg:\n tc_fail(\"No popup appeared.\")\n elif not \"cancel\" in msg.lower():\n tc_fail(\"Did not display the correct popup message after cancelling.\")\n\n pos.click('ok')\n\n # Make sure we returned to the right screen after cancelling\n if pos.is_element_present(self.customer_id_button, timeout = self.wait_time):\n self.log.info(\"Successfully cancelled input of customer ID!\")\n else:\n tc_fail(\"Did not return from customer ID screen.\")", "def confirmCloseEvent(self):\n dlg = simpleDialogs.ConfirmCloseDialog(self)\n\n close = False\n clearSettings = False\n\n reply = dlg.exec_()\n\n if reply:\n close = True\n\n if dlg.clearSettingsCheck.isChecked():\n clearSettings = True\n\n return close, clearSettings", "def cancel(self):\n return self.RES_OK", "def _confirm_action(self, action):\n\t\treturn True", "def click_cancel_edited_target_buy_policy_button(self):\n self.click_element(self.cancel_edited_target_buy_policy_button_locator)", "def confirm(text, app, version, modules=None, default_yes=False):\n print(text)\n print(' Directory: %s' % os.path.basename(app.app_dir))\n print(' App ID: %s' % app.app_id)\n print(' Version: %s' % version)\n print(' Modules: %s' % ', '.join(modules or app.modules))\n if default_yes:\n return raw_input('Continue? [Y/n] ') not in ('n', 'N')\n else:\n return raw_input('Continue? [y/N] ') in ('y', 'Y')", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def _onButtonCancelClick(self, widget):\n self.delete()", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def TopUpCancel_clicked_cb(self, data=None):\n self.GuiReset_clicked_cb()\n self.builder.get_object('TopUpGui').hide()", "def _cancel(self, __button):\r\n\r\n self.destroy()" ]
[ "0.80490994", "0.7658619", "0.75887465", "0.718055", "0.7045372", "0.6991206", "0.6899526", "0.6881284", "0.6846061", "0.683484", "0.681738", "0.68133414", "0.6789782", "0.6765807", "0.6764634", "0.6748562", "0.6728313", "0.67058414", "0.6674141", "0.66671133", "0.6665826", "0.6620237", "0.66107553", "0.66017795", "0.65847355", "0.65701896", "0.65663785", "0.65618753", "0.6554137", "0.6553285", "0.6519021", "0.65059775", "0.6500404", "0.64982814", "0.64735055", "0.6468597", "0.644506", "0.64388", "0.6437026", "0.6428631", "0.64273685", "0.6401501", "0.63985866", "0.6394535", "0.6392503", "0.6363628", "0.63626117", "0.6338864", "0.631992", "0.6300027", "0.62855065", "0.6284505", "0.62805337", "0.6273678", "0.62649703", "0.62649703", "0.62649703", "0.62649703", "0.62532824", "0.6252067", "0.62429655", "0.6230169", "0.6218781", "0.6218781", "0.6218214", "0.6214344", "0.6207134", "0.6207134", "0.62035507", "0.62010825", "0.6179738", "0.61791056", "0.6163457", "0.61583555", "0.615527", "0.61491394", "0.6146194", "0.6129755", "0.6124054", "0.6117833", "0.61143434", "0.6110296", "0.6110296", "0.6110296", "0.6110296", "0.6108123", "0.6107938", "0.60873556", "0.60512364", "0.60417175", "0.6034797", "0.6024551", "0.6017697", "0.6014776", "0.60063446", "0.60060996", "0.60060644", "0.599245", "0.5972764", "0.5972599" ]
0.8330856
0
Click the ``Alert`` button and confirm the alert.
def dismiss(self): with self.handle_alert(): self.q(css='button#alert').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_alert_pop_up(self):\n\n # locators\n alert_button = 'alertbtn'\n\n # steps\n locate_alert_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, alert_button))\n )\n locate_alert_button.click()\n alert = self.driver.switch_to.alert\n print(alert.text)\n alert.accept()", "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def acceptAlert(self):\n self.log_info(f\"Browser.acceptAlert: Accepting alert\")\n alert = self.CORE.switch_to.alert\n alert.accept()\n return", "def javaScriptAlert(self, frame, message):\n print 'Alert:', message", "def javaScriptAlert(self, frame, message):\n\n self._robot._alert = message\n self._robot.popup_messages = message\n logger.debug(\"alert('%s')\" % message)", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def show_alert(self, text: str):\n # todo|fixme escaping issue. e.g. \\n in text\n text = str(text).replace('\\n', '\\\\n').replace('\\r', '\\\\r')\n self.browser.ExecuteJavascript(f'alert(\"{text}\")')", "def alert(self, timeout=5):\n page_logger.debug('Switching to alert.')\n WebDriverWait(self.driver, timeout).until(EC.alert_is_present())\n return self.driver.switch_to.alert", "def alert(self, txt, title=\"Alert\"):\r\n self.message(txt, title)", "def accept_alerts(self):\n while True:\n try:\n alert = self.browser.switch_to_alert()\n alert.accept()\n except NoAlertPresentException:\n break", "def dismiss_alert(self):\n self.driver.switch_to.alert.dismiss()", "def verify_alert():\n try:\n success_alert = driver.find_element_by_xpath('//*[@class=\"css-rr2n0f\" or @class=\"toast-title\" or @class=\"alert-success\"]')\n if (success_alert.is_displayed()):\n #wait(7)\n return \"Success\"\n else:\n return \"No Alert detected\"\n except Exception as e:\n return \"No Alert detected\"", "def alert_accept(self):\n self._alert_accept_cancel(True)", "def confirm_lnk_click (self, **event_args):\r\n self.raise_event('x-close-alert', value='confirm_email')", "def alert(self, alert):\n\n self._alert = alert", "def you_should_be_able_to_confirm_and_close(driver):\n wait_on_element(driver, 0.5, 30, '//h1[contains(.,\"Test Changes\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__TEST CHANGES\"]').click()\n wait_on_element_disappear(driver, 1, 30, '//h6[contains(.,\"Please wait\")]')", "def check_alert(step, text):\r\n\r\n try:\r\n alert = Alert(world.browser)\r\n assert_equals(alert.text, text)\r\n except WebDriverException:\r\n # PhantomJS is kinda poor\r\n pass", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def close_alert(self, **kws):\r\n self.raise_event('x-close-alert', value='login')", "def javaScriptConfirm(self, frame, message):\n\n if self._robot._confirm_expected is None:\n raise Exception('You must specified a value to confirm \"%s\"' %\n message)\n confirmation, callback = self._robot._confirm_expected\n logger.debug(\"confirm('%s')\" % message)\n self._robot._confirm_expected = None\n self._robot.popup_messages = message\n\n if callback is not None:\n return callback()\n return confirmation", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def alert(self, name, url):\n email = \"\"\n if app.is_checked.get():\n email = app.email_addr_entry.get()\n SendEmail.sendEmail(email, name, url)\n\n # tempWin = tk.Tk() # Temporary, invisible window to use as a popup's root\n # # This way the root will always be in the same thread as the popup\n # tempWin.withdraw()\n # popup = ItemAlertDialogue(tempWin, \"Item Restocked!\", name, url)\n\n kwargs = {\n \"title\": \"Item Stock Tracker\",\n \"ticker\": \"~Item Stock Tracker~\",\n \"app_name\": \"Item Stock Tracker\",\n \"timeout\": 10,\n \"message\": name + \" is restocked! \",\n }\n plyer.notification.notify(**kwargs)\n\n popup = ItemAlertDialogue(self, \"Item Restocked!\", name, url)", "def __window_alert(self, text):\n print str(text)\n config.VERBOSE(config.VERBOSE_DEBUG, '[DEBUG] alertmsg: ' + str(text))", "def alert(self, msg):\r\n messagedialog = Gtk.MessageDialog(self, type=1, buttons=1, message_format=msg)\r\n messagedialog.run()\r\n messagedialog.destroy()", "def alert(data: Any) -> None:\n\n root = Container()\n root += Label(\"[210 italic bold]Alert!\")\n root += Label()\n root += Label(str(data))\n\n root.center()\n root.print()\n getch()\n root.wipe()", "def requestAlert(self, text=\"Error\", buttons=None):\n\t\tself.alerts.append((text, buttons))", "def print_alert(text):\n print \"\\n\\n\\n\\n\"\n print text\n print \"\\n\\n\\n\\n\"", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def decision(question):\n return click.confirm(question, show_default=True)", "def trigger_review_slip_alert(driver, buttons=None):\n\n if not buttons:\n buttons = bet_buttons_via_games(driver)\n print(f\"buttons: {buttons}\")\n button = buttons[0]\n button.send_keys(\"\\n\")\n # button.click()\n time.sleep(1)\n accept_review_step_skip(driver)", "def alert_popup(title, message, path):\n root = Tk()\n root.title(title)\n w = 400 # popup window width\n h = 200 # popup window height\n sw = root.winfo_screenwidth()\n sh = root.winfo_screenheight()\n x = (sw - w)/2\n y = (sh - h)/2\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n m = message\n m += '\\n'\n m += path\n w = Label(root, text=m, width=120, height=10)\n w.pack()\n b = Button(root, text=\"Continue\", command=root.destroy, width=10)\n b.pack()\n mainloop()", "def click_save_changes_button(self):\n self.click_element(self.save_changes_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)", "def __window_confirm(self, text):\n return True", "def click_save_edited_target_buy_policy_button(self):\n self.click_element(self.save_edited_target_buy_policy_button_locator)\n self.click_element(self.confirmation_popup_locator, error_message='success message locator not found before specified time out')\n self.click_element(self.ok_button_locator, error_message='ok button locator not found before specified time out')\n self.wait_for_ajax_spinner_load()", "def check_disable_failover_and_click_save_check_confirm_on_the_warning_dialog_and_press_ok(driver):\n element = driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__Disable Failover\"]')\n class_attribute = element.get_attribute('class')\n if 'mat-checkbox-checked' not in class_attribute:\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__Disable Failover\"]').click()\n assert wait_on_element(driver, 0.5, 7, '//button[@ix-auto=\"button__SAVE\"]')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()\n if 'mat-checkbox-checked' not in class_attribute:\n assert wait_on_element(driver, 0.5, 4, '//h1[contains(.,\"Disable Failover\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__OK\"]').click()", "def _do_studio_prompt_action(intent, action):\r\n assert intent in ['warning', 'error', 'confirmation', 'announcement',\r\n 'step-required', 'help', 'mini']\r\n assert action in ['primary', 'secondary']\r\n\r\n world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))\r\n\r\n action_css = 'li.nav-item > a.action-{}'.format(action)\r\n world.trigger_event(action_css, event='focus')\r\n world.browser.execute_script(\"$('{}').click()\".format(action_css))\r\n\r\n world.wait_for_ajax_complete()\r\n world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))", "def click_button(self):\n self.widgets.get('button').click()", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def alert(self):\n\n # Get board logger\n board_logger = self.get_board_logger()\n\n # Create new Event object to handle event communication\n event = Event(datetime.now(), self.get_input_status())\n \n event.alert(self.__ip, board_logger)\n\n if (self.get_input_status() == 1):\n \n board_logger.info(\"Alarm state active; starting check alert \" \n + \"cycle for 6 cycles.\")\n \n self.check_alert(event)", "def popup():\n msg = messagebox.askyesno('Warning', 'Are you sure you would like to submit?')\n if msg: # if user clicked yes\n save_txt()\n save_db()\n root.destroy()", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def _confirm_action(self, action):\n\t\treturn True", "def alert(self):\n return self._alert", "def get_alert_text(self):\n return self.driver.switch_to.alert.text", "def success(msg):\n click.secho(msg, fg='green')", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def confirm():\n\t\traise NotImplementedError", "def _confirm(self) -> None:\n\n self.__series.title = self._getTitleFromView()\n\n if len(self.__series.data) == 0:\n self._showMessage(\"Invalid data. No data selected.\")\n return\n\n self._result = DialogResult.Ok\n self._close()", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def collection_delete_confirm_btn(self):\n collection_delete_confirm_btn_sitem = self.locator_finder_by_xpath(self.collection_delete_confirm_btn_id)\n collection_delete_confirm_btn_sitem.click()\n time.sleep(1)", "def click_statement_summary_save_button(self):\n self.click_element(self.statement_summary_save_button_locator, True, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_locator), 'success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise", "def html_alert(string):\n return html_div(string, \"alert\")", "def doAuthenticateAlert(self, username, password, timeout=10.0):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n cmdId = self.authenticateDialog(username=username, password=password)\n if self.isAlertAuthenticated(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret", "def save_response(self):\r\n self.q(css='input.save-button').first.click()\r\n EmptyPromise(\r\n lambda: 'save' in self.alert_message.lower(),\r\n \"Status message saved\"\r\n ).fulfill()", "def select_ok_pop_up_item(self):\n if self.driver.wait_for_object(\"retargeting_data_ok_pop_up_btn\", raise_e=False):\n self.driver.click(\"retargeting_data_ok_pop_up_btn\")", "def clickMethod(self):\n if not self.date_format_correct():\n msg = QMessageBox()\n msg.setWindowTitle(\"Warning\")\n msg.setWindowIcon(QIcon(\"icon.png\"))\n msg.setIcon(QMessageBox.Warning)\n\n font = msg.font()\n font.setPointSize(self.font_size)\n msg.setFont(font)\n\n msg.setText(\"Date entered in incorrect format.\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.setDefaultButton(QMessageBox.Ok)\n\n x = msg.exec_() # show our messagebox\n\n elif not self.date_temporal_paradox_free():\n msg = QMessageBox()\n msg.setWindowTitle(\"Warning\")\n msg.setWindowIcon(QIcon(\"icon.png\"))\n msg.setIcon(QMessageBox.Warning)\n\n font = msg.font()\n font.setPointSize(self.font_size)\n msg.setFont(font)\n\n msg.setText(\n \"Date entered is invalid since it is after today's date.\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.setDefaultButton(QMessageBox.Ok)\n\n x = msg.exec_() # show our messagebox\n\n elif not self.date_already_has_data():\n msg = QMessageBox()\n msg.setWindowTitle(\"Warning\")\n msg.setWindowIcon(QIcon(\"icon.png\"))\n msg.setIcon(QMessageBox.Warning)\n\n font = msg.font()\n font.setPointSize(self.font_size)\n msg.setFont(font)\n\n msg.setText(\"Date entered already has a weight value of \"\n f\"{self.weight_edit.text()} in the database. Do you \"\n \"wish to override this value?\")\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msg.setDefaultButton(QMessageBox.Ok)\n msg.buttonClicked.connect(self.okay_button2)\n x = msg.exec_() # show our messagebox\n\n else:\n msg = QMessageBox()\n msg.setWindowTitle(\"New entry\")\n msg.setWindowIcon(QIcon(\"icon.png\"))\n font = msg.font()\n font.setPointSize(self.font_size)\n msg.setFont(font)\n msg.setText(\"New entry added to weight log\")\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msg.setDefaultButton(QMessageBox.Ok)\n msg.buttonClicked.connect(self.okay_button1)\n x = msg.exec_() # show our messagebox", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def alerttext(self):\n text = self.driver.find_element(*LoginPageLocators.ALERT)\n return text.text", "def click_audit_account_save_button(self):\n self.click_element(self.audit_account_save_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.audit_account_save_success_message_locator), 'audit account save success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def click_on_submit(context):\n submit_for_approval = context.browser.find_elements_by_css_selector(\n \"input[type='button'][value='Submit for Approval']\")\n for item in submit_for_approval:\n item.click()\n time.sleep(10)", "def click_buy_and_sell_deal_save_button(self):\n self.click_element(self.save_vendor_profile_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise", "def print_success_msg(msg):\n click.secho(msg, fg='green', file=sys.stdout)", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )", "def alert(title: str, text: str, *, level: str = \"warning\", ID: str = None):\n if level not in (\"info\", \"warning\"):\n raise ValueError(\"Level must be among 'info', 'warning'\")\n if alert.has_disable_been_called:\n raise RuntimeError(\n \"The function alert() is called after disable_old_alert() has generated \"\n \"the javascript code to handle hidding closed alerts. This breaks the \"\n \"system completely, make sure disable_old_alerts is called last\"\n )\n if ID is None:\n alert_id = alert.numid\n alert.numid += 1\n else:\n alert_id = str(ID)\n alert.strid.append(alert_id)\n\n indent = \" \" * 4 * 4\n msg = format_html(f\"<div>{text!s}</div>\").replace(\"\\n\", \"\\n\" + indent)\n return textwrap.dedent(\n f\"\"\"\\\n <input type=\"hidden\" class=\"alert-hidder\" name=\"attr_alert-{alert_id}\" value=\"0\"/>\n <div class=\"alert alert-{level}\">\n <div>\n <h3> {level.title()} - {title}</h3>\n {msg}\n </div>\n <label class=\"fakebutton\">\n <input type=\"checkbox\" name=\"attr_alert-{alert_id}\" value=\"1\" /> ×\n </label>\n </div>\"\"\"\n )", "def click_download_button(self):\n self._basket.click_download_button()", "def try_dismiss_popup(self):\n try:\n self._driver.switch_to.alert.accept\n logger.warning(\"Javascript alert found, dismissing.\")\n return True\n except NoAlertPresentException:\n # There is no alert box.\n try:\n popup_keywords = {\"Modal\", \"Popup\", \"Overlay\"}\n # See if there is some sort of close button we can click.\n popup_xpath = [f\"\"\"contains(., \"{keyword}\") or contains(., \"{keyword.lower()}\")\"\"\" for keyword in popup_keywords]\n popup_xpath = \"\"\"//*[@*[\"\"\" + \" or \".join(popup_xpath) + \"\"\"]]\"\"\"\n # for keyword in popup_keywords:\n # modal_xpath += f\"\"\"//*[@*[contains(., \"{keyword}\") or contains(., \"{keyword.lower()}\")\"\"\" + \\\n # \"\"\" or contains(., \"popup\") or contains(., \"Popup\")\"\"\" + \\\n # \"\"\" or contains(., \"overlay\") or contains(., \"Overlay\")]]\"\"\"\n # The close button can either be a button or something with role=button.\n close_button_xpaths = {\n \"\"\"//*[@role=\"button\"][@demod_reachable=\"true\"][@*[contains(., \"close\") or contains(., \"Close\")]]\"\"\",\n \"\"\"//button[@demod_reachable=\"true\"][@*[contains(., \"close\") or contains(., \"Close\")]]\"\"\"\n }\n close_button_xpaths = {popup_xpath + close_button_xpath for close_button_xpath in close_button_xpaths}\n close_button_xpath = \"|\".join(close_button_xpaths)\n close_button = self._driver.find_element_by_xpath(close_button_xpath)\n logger.warning(\"Popup found, dismissing.\")\n close_button.click()\n return True\n except NoSuchElementException:\n return False", "def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def card_success(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2])\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success'))) \n self.driver.find_element_by_class_name(\"success\").click()\n self.driver.switch_to_window(handles[0])", "def click_on_analyze_and_complete_inline_action(self, inline_item):\n self.select_inline_action_item(inline_item)\n self.wait_for_ajax_spinner_load(300)\n try:\n self.wait().until(EC.presence_of_element_located(self.analyze_and_complete_confirmation_popup_locator), 'analyze and complete confirmation popup locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "async def alert(self, entry):\n\n if self.outputs.get('log.enabled'):\n rssalertbot.alerts.alert_log(self, self.outputs.get('log'), entry)\n\n if self.outputs.get('email.enabled'):\n rssalertbot.alerts.alert_email(self, self.outputs.get('email'), entry)\n\n if self.outputs.get('slack.enabled'):\n await rssalertbot.alerts.alert_slack(self, self.outputs.get('slack'), entry)", "def alert(self, alert_str):\n # Make sure alerts have the same type\n alert_str = str(alert_str)\n self._output_object.add_alert(\n html_tag(plain_to_html(alert_str), alert_str, self.proc)\n )\n self.alerts.append((alert_str, self.proc))", "def alert(self, message):\n try:\n self.send_message(message)\n except Exception as err:\n logger.exception(\n f\"Slack notification to {self.username} failed with {err.__repr__()}\"\n )", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def showOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK)", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def clickDashboard(self):\n self.waitForElement(locator=self._dashboardBtn, locatorType=\"xpath\")\n self.elementClick(locator=self._dashboardBtn, locatorType=\"xpath\")", "def confirm(self, message):\n raise NotImplementedError", "def selection_alert(self):\n self._probe.swj_sequence(136, 0x19bc0ea2e3ddafe986852d956209f392ff)", "def click(self, element):\n element.click()", "def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)", "def waitForAlertPresent(self, *, timeout=5):\n try:\n WebDriverWait(self.CORE, timeout).until(EC.alert_is_present())\n self.log_info(f\"Browser.waitForAlertPresent: Alert is present within {timeout} seconds\")\n return True\n except SeleniumExceptions.TimeoutException:\n self.log_warning(f\"Browser.waitForAlertPresent: Alert did not become present after {timeout} seconds\")\n return False", "def popup(self):\r\n return self.exec_() == QDialog.Accepted", "def alert_cancel(self):\n self._alert_accept_cancel(False)", "def __window_dump(self, text):\n self.alert(text)", "def print_success(cls, text, bold=True):\n click.secho(text, fg=cls.COLORS[cls.SUCCESS], bold=bold)", "def test_dweet_for_alert(self):\n dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n dweet = dweepy.dweet_for(self.my_thing_id, {'alertValue': 11}, key=test_key)\n check_valid_dweet_response(self, dweet, skip_content=True)\n dweet = dweepy.dweet_for(self.my_thing_id, {'alertValue': 5}, key=test_key)\n check_valid_dweet_response(self, dweet, skip_content=True)\n dweet = dweepy.dweet_for(self.my_thing_id, {'alertValue': 10}, key=test_key)\n check_valid_dweet_response(self, dweet, skip_content=True)", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def uncheck_disable_failover_and_click_save_check_confirm_on_the_warning_dialog_and_press_ok(driver):\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__Disable Failover\"]').click()\n assert wait_on_element(driver, 0.5, 7, '//button[@ix-auto=\"button__SAVE\"]')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()", "def consent(s, eType, eVal):\n try:\n import maya.cmds as cmds # Is Maya active? Ask using their GUI\n answer = cmds.confirmDialog(t=eType.__name__, m=CONFIRM_MSG, b=(\"Yes\",\"No\"), db=\"Yes\", cb=\"No\", ds=\"No\")\n return \"Yes\" == answer\n except ImportError:\n return True # No means to ask? Ah well ...", "def you_should_see_the_dashboard(driver):\n rsc.Verify_The_Dashboard(driver)\n if wait_on_element(driver, 2, '//h1[contains(.,\"End User License Agreement - TrueNAS\")]'):\n try:\n assert wait_on_element(driver, 2, '//button[@ix-auto=\"button__I AGREE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__I AGREE\"]').click()\n if wait_on_element(driver, 2, xpaths.button.close, 'clickable'):\n driver.find_element_by_xpath(xpaths.button.close).click()\n except ElementClickInterceptedException:\n assert wait_on_element(driver, 2, xpaths.button.close, 'clickable')\n driver.find_element_by_xpath(xpaths.button.close).click()\n assert wait_on_element(driver, 2, '//button[@ix-auto=\"button__I AGREE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__I AGREE\"]').click()", "def buttonOK_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_OK)", "def add_alert(self, content):\n self._add_content(html_alert(content))", "def confirm_as_variable() -> None:\n\n confirmed = click.confirm(\"Are you sure you want to drop the users table?\")\n status = click.style(\"yes\", fg=\"green\") if confirmed else click.style(\"no\", fg=\"red\")\n click.echo(\"Drop table confirmed?: \" + status)", "def click_non_traffic_charges_save_changes_button(self):\n non_traffic_charges_save_changes_button_element = self.wait().until(EC.element_to_be_clickable(self.non_traffic_charges_save_changes_button_locator), 'non traffic charges save changes button locator not found before specified time out')\n non_traffic_charges_save_changes_button_element.click()\n self.accept_alert_pop_up()\n try:\n self.wait().until(EC.presence_of_element_located(self.confirmation_popup_locator), 'confirmation pop up locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise", "def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False" ]
[ "0.80544215", "0.79212785", "0.6950813", "0.6772512", "0.67127967", "0.6550232", "0.6544324", "0.6449943", "0.6366633", "0.6313416", "0.6296438", "0.62774825", "0.6252475", "0.6232385", "0.61924887", "0.6176421", "0.615149", "0.6099906", "0.60878664", "0.5922482", "0.5895606", "0.5862601", "0.5833091", "0.58260816", "0.5799373", "0.57951057", "0.57948416", "0.5770607", "0.5714576", "0.57102215", "0.56630707", "0.5653567", "0.5652559", "0.5612773", "0.559509", "0.55816674", "0.5567627", "0.555344", "0.55524313", "0.5541327", "0.5536112", "0.5521577", "0.5510865", "0.550774", "0.5500527", "0.5462869", "0.54610795", "0.54390556", "0.5409165", "0.5403222", "0.53898436", "0.53806394", "0.53787357", "0.53562707", "0.53515863", "0.5348578", "0.53433764", "0.5338907", "0.53332597", "0.5329201", "0.53230476", "0.532182", "0.5321616", "0.5313161", "0.52918124", "0.5287673", "0.5274122", "0.52723044", "0.5272098", "0.5270063", "0.52679414", "0.526759", "0.52664113", "0.52649534", "0.5258362", "0.5253508", "0.5224419", "0.52198553", "0.52194506", "0.52075183", "0.5204408", "0.5202961", "0.52002084", "0.5199467", "0.5198533", "0.51942796", "0.51896805", "0.51793295", "0.5177427", "0.517481", "0.517328", "0.5171191", "0.5166548", "0.5165323", "0.51631874", "0.51551676", "0.51543236", "0.5136815", "0.5122844", "0.51181924" ]
0.6783705
3
Count the number of div.test elements.
def num_divs(self): return len(self.q(css='div.test').results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_number_of_testcase_elements(self):\n testcases = self.root.findall('testcase')\n self.assertEqual(len(testcases), 4)", "def test_count(self):\n return len(self.tests) + sum(suite.test_count for suite in self.suites)", "def test_element_count(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.element_count(2,\"F\"),6)", "def b_count_test(self):\n \t \n\tsel = self.selenium\n test = \"Test B - Count Articles, Titles, Headings, Etc.\"\n print test\n \n headers = sel.get_css_count(\"css=\" + CSS[1])\n images = sel.get_css_count(\"css=\" + CSS[2])\n authors = sel.get_css_count(\"css=\" + CSS[3])\n\tdots = sel.get_css_count(\"css=\" + CSS[7]) + sel.get_css_count(\"css=\" + CSS[6])\t\n \n if ((images < 8) or (dots < 8) or (authors < 8) or (headers < 8)):\n print \"Missing articles!\"\n L.log(BROWSERS[x], test, \"FAIL, MISSING CONTENT\", \"Images: \" + str(images) + \" Dots: \" + str(dots) + \" Authors: \" + str(authors) + \" Headers: \" + str(headers)) \n \n\telse:\n\t L.log(BROWSERS[x], test, \"PASS, OK\", \"None\")\n\t \n\t######################################################################## ", "def numberTests(self):\n for i, test in enumerate(self._tests):\n test.number = i + 1\n test.info.cs_test_num = test.number", "def assertCountSeleniumElements(self, selector, count, root_element=None):\n from selenium.webdriver.common.by import By\n\n root_element = root_element or self.selenium\n self.assertEqual(\n len(root_element.find_elements(By.CSS_SELECTOR, selector)), count\n )", "def get_number_of_testing(self):\n return self.n_test", "def test_count(self):\n self._test_count_func(count)", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def test_abcdee():\n assert part_01.count_for('abcdee', 2) == 1\n assert part_01.count_for('abcdee', 3) == 0", "def test_count_elements(self):\n from pykml.util import count_elements\n\n test_datafile = path.join(\n path.dirname(__file__),\n 'testfiles',\n 'google_kml_developers_guide/complete_tour_example.kml'\n )\n with open(test_datafile) as f:\n doc = parse(f, schema=Schema('kml22gx.xsd'))\n summary = count_elements(doc)\n\n self.assertTrue('http://www.opengis.net/kml/2.2' in summary)\n self.assertEqual(4,\n summary['http://www.opengis.net/kml/2.2']['Placemark']\n )\n self.assertTrue('http://www.google.com/kml/ext/2.2' in summary)\n self.assertEqual(5,\n summary['http://www.google.com/kml/ext/2.2']['FlyTo']\n )\n self.assertEqual(2,\n summary['http://www.google.com/kml/ext/2.2']['Wait']\n )", "def testArticleCount(self):\n\n self.articleCount(17)", "def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1", "def count():", "def test_bababc():\n assert part_01.count_for('bababc', 2) == 1\n assert part_01.count_for('bababc', 3) == 1", "def count(self):\n return len(self._elements)", "def element_count(self):\n return self._internal.get_element_count()", "def count_passages(self, step, count):\r\n count = int(count)\r\n assert_equals(len(world.css_find('.annotatable-span')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight-yellow')), count)", "def count(self):\n return len(self.find())", "def count(self):\n\t\treturn len(list(self.nodes))", "def element_count(self):\r\n result = conf.lib.clang_getNumElements(self)\r\n if result < 0:\r\n raise Exception('Type does not have elements.')\r\n\r\n return result", "def test_own_count(self):\n self._test_count_func(it_count)", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count(self):\n return self.__tree.node_count", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0", "def elements_count(self):\n return self.__elements_count", "def testSectionCount(self):\n\n self.sectionCount(3640)", "def count(self):\n # TODO not implemented yet\n return 0", "def count(app, status):\n item = app.tv.selection()[0]\n\n def count_children(item):\n children = app.tv.get_children(item)\n return len(children) + sum(count_children(child) for child in children)\n\n status.config(text=f'{count_children(item)} descendants')", "def count(self):\n\n raise NotImplementedError", "def test_suite():\n test(count(\"is\", \"Mississippi\") == 2)\n test(count(\"an\", \"banana\") == 2)\n test(count(\"ana\", \"banana\") == 2)\n test(count(\"nana\", \"banana\") == 1)\n test(count(\"nanan\", \"banana\") == 0)\n test(count(\"aaa\", \"aaaaaa\") == 4)", "def get_child_element_count(self):\n return len(self._child_elements)", "def test_task_count_tags(self):\r\n tasks.count_tags()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TAG_CT)\r\n self.assertEqual(stat.data, 4)", "def count() -> int:\n pass", "def element_count(self):\n return len(self.elements) + len(self.virtual_elements)", "def children_num(self,p):\n counter = 0\n for child in self.children(p):\n counter += 1\n return counter", "def childCount(self):\n if self.__child is not None:\n return len(self.__child)\n return self._expectedChildCount()", "def test_count_66(self):\n value: int = 66\n result: int = 18\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def count(self):\r\n return self.count_helper(self.top_node)", "def find_test_count(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n\n result = parsed['metrics']['testsCount']['_value']\n _logger.debug('Using subtest count: %s', result)\n\n return result", "def test_aabcdd():\n assert part_01.count_for('abbcdd', 2) == 1\n assert part_01.count_for('aabcdd', 3) == 0", "def test_count(self):\r\n assert TestModel.objects.count() == 12\r\n\r\n q = TestModel.objects(test_id=0)\r\n assert q.count() == 4", "def test_count_10(self):\n value: int = 10\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def node_count(self):\n return self._root.count()", "def run(self, test):\n\n self.test_case_count = test.countTestCases()\n return super(CustomTextTestRunner, self).run(test)", "def test_count_9(self):\n value: int = 9\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def test_getTotalIndividualCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getTotalIndividualCount(), 15)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getTotalIndividualCount(), 976)\r\n self.assertEqual(self.est3.getTotalIndividualCount(), 237)", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def test_count_666(self):\n value: int = 666\n result: int = 264\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def test_count_35(self):\n value: int = 35\n result: int = 6\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def _expectedChildCount(self):\n return 0", "def test_test_data_length(self):\n total_count = 0\n for batch in self._dataset.get_test():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_test_len())", "def count(self, elem):\n return self.iter.count(elem)", "def count(self, cls=None):\n return len(self.all(cls))", "def test_child_count(self):\n self.shell.onecmd(\"create %s/something ''\" % (self.tests_path))\n self.shell.onecmd(\"create %s/something/else ''\" % (self.tests_path))\n self.shell.onecmd(\"create %s/something/else/entirely ''\" % (self.tests_path))\n self.shell.onecmd(\"create %s/something/else/entirely/child ''\" % (self.tests_path))\n self.shell.onecmd(\"child_count %s/something\" % (self.tests_path))\n expected_output = u\"%s/something/else: 2\\n\" % (self.tests_path)\n self.assertEqual(expected_output, self.output.getvalue())", "def get_test_amount(self):\n\n return len(self.__test_set_list)", "def test_count_0(self):\n self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')", "def test_count_72(self):\n value: int = 72\n result: int = 21\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def fetch_test_counts(stdout, release=False):\n LOGGER.info(\"Ignored test classes: {}\".format(IGNORED_CLASSES))\n if release:\n LOGGER.info(\"Release build detected. Fetching count for release.\")\n return fetch_counts_for_release(stdout)\n\n return fetch_counts_for_debug(stdout)", "def test_stats(self):\n self.assertEqual(self.root.attrib['tests'], '4')\n self.assertEqual(self.root.attrib['errors'], '1')\n self.assertEqual(self.root.attrib['failures'], '1')\n self.assertEqual(self.root.attrib['skip'], '1')", "def test_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(test_id=0)\r\n assert q.count() == 4", "def count_search_results(self):\n raw_text = self.driver.find_element(*self.HEADING_COUNTER).text\n num = re.findall(r'\\d+', raw_text) \n return int(num[0])", "def count(self):\n return len(self._components)", "def test_item_count(self):\n self.assertEqual(len(self.items), 2)", "def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "def node_count(self):\n return self.process_tree.get_descendant_count() + 1", "def test_total_ct(self):\r\n ct = 5\r\n for i in range(ct):\r\n t = Tag(gen_random_word(10))\r\n DBSession.add(t)\r\n\r\n ct = TagMgr.count()\r\n self.assertEqual(5, ct, 'We should have a total of 5: ' + str(ct))", "def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])", "def count(self):\n return len(self)", "def child_count(self):\n\t\treturn len(self._children)", "def test_counter(self):\n self.assertEqual(self._n_registered, 1)", "def _count_children(self, item):\n return len(self.tree.get_children(item))", "def test_getObservationCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getObservationCount(), 5)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getObservationCount(), 140)\r\n self.assertEqual(self.est3.getObservationCount(), 112)", "def test_count_6_645_243(self):\n value: int = 6_645_243\n result: int = 3_615_948\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def count(self):\n count = 0\n # get list of intermediate directories\n dirs = []\n self.__get_list_of_interm_dirs(dirs)\n # count elements in sub-directories\n for name in dirs:\n for element in os.listdir('%s/%s' % (self.path, name)):\n if _ELEMENT_REGEXP.match(element):\n count += 1\n return count", "def test_task_count_total(self):\r\n tasks.count_total()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TOTAL_CT)\r\n self.assertEqual(stat.data, 4)", "def test_count_5(self):\n value: int = 5\n result: int = 0\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def count(self):\n return self.size()", "def test_all_count(self):\n self.assertEqual(2, self.alice_storage.all_count)\n self.assertEqual(3, self.bob_storage.all_count)\n self.assertEqual(0, self.carol_storage.all_count)\n self.assertEqual(0, self.anonymous_storage.all_count)", "def test_get_publish_content_html(self):\n response = self.setup_get_html_test('/api/publish')\n count_elements = self.count_markup_elements(response.data, 'input')\n self.assertEqual(count_elements, 4)", "def test_container_count(dockerc):\n # stopped parameter allows non-running containers in results\n assert (\n len(dockerc.containers(stopped=True)) == 2\n ), \"Wrong number of containers were started.\"", "def test_numero_elementos_BD(self):\n respuesta = self.client.get(self.response)\n num_elementos = Musica.objects.count()\n self.assertEqual(num_elementos, len(respuesta.data))", "def dcount(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n a = profData.Descendants().AsArray()\n if len(a) > 0:\n return profData.DescendantCount(a[0])\n return \"\"", "def test_song_counts(self):\n self.assertEqual(self.show.total_song_count, 19)\n self.assertEqual(self.show.set1_song_count, 9)\n self.assertEqual(self.show.set2_song_count, 8)\n self.assertEqual(self.show.set3_song_count, 0)\n self.assertEqual(self.show.encore_song_count, 1)\n self.assertEqual(self.show.encore2_song_count, 1)", "def test_count_459(self):\n value: int = 459\n result: int = 148\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def count(self):\n return len([i for i in self.iteritems()])", "def get_untested_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.UNTESTED)", "def count(self) -> int:\n return self.__count", "def num_test_samples(self):\n if self._num_test_samples is None:\n for key, value in self._test_data.items():\n self._num_test_samples[key] = len(value[0])\n return self._num_test_samples", "def test_pressure_count(self):\n self.assertEqual(self.Pcount, 7)", "def total(self):\n return len(self._results) + len(self.test_cases)" ]
[ "0.72423947", "0.6702707", "0.66566426", "0.6639454", "0.6568075", "0.6378697", "0.6344244", "0.6308958", "0.6286621", "0.62182474", "0.61914384", "0.61866164", "0.617343", "0.6128987", "0.6103108", "0.6089674", "0.60860085", "0.60737354", "0.6061457", "0.604047", "0.6024079", "0.60166556", "0.60077524", "0.59659874", "0.5963736", "0.5963736", "0.5963736", "0.5963736", "0.5961491", "0.594927", "0.59461576", "0.5932704", "0.5912908", "0.58944046", "0.587041", "0.58503413", "0.58331436", "0.5826868", "0.58246785", "0.58148956", "0.58123374", "0.5806216", "0.58039695", "0.58001333", "0.57995343", "0.57873327", "0.57854486", "0.5784024", "0.5771892", "0.5769046", "0.57541203", "0.5747057", "0.57368004", "0.5730636", "0.5728692", "0.57220465", "0.57158786", "0.5689453", "0.5685255", "0.56821424", "0.56751496", "0.5668774", "0.56676996", "0.566725", "0.56669277", "0.56648904", "0.565136", "0.5643251", "0.56428933", "0.5638088", "0.5634365", "0.5632875", "0.56297696", "0.5606549", "0.5605705", "0.5603749", "0.5592376", "0.5591215", "0.5590077", "0.55899733", "0.5582521", "0.55632377", "0.5557752", "0.55435604", "0.5542903", "0.5540765", "0.55347526", "0.5528968", "0.5528478", "0.5527399", "0.5527399", "0.55202776", "0.55152255", "0.551088", "0.55084074", "0.5501795", "0.5497246", "0.5495401", "0.54940623", "0.5492968" ]
0.83748364
0
Return list of text for each div.test element.
def div_text_list(self): return self.q(css='div.test').text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def div_html_list(self):\n return self.q(css='div.test').html", "def texts(self):\n return [elem.text for elem in self.web_elements]", "def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()", "def div_value_list(self):\n return self.q(css='div.test').attrs('value')", "def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label", "def get_tests(self):\n return self.tests[:]", "def get_text(self) -> List[str]:\n return self.__texts", "def return_textview_elements(self):\n return self.driver.find_elements_by_class_name('android.widget.TextView')", "def get_all_text(self):\n result = list()\n\n for path in ['./OrgQuestion/OrgQSubject',\n './OrgQuestion/OrgQBody',\n './OrgQuestion/Thread/RelQuestion/RelQSubject',\n './OrgQuestion/Thread/RelQuestion/RelQBody',\n './OrgQuestion/Thread/RelComment/']:\n result.extend([\n element.text if element.text is not None else '' for element in self.merged_root.findall(path)\n ]) # extract text from each element matching the path\n\n return result", "def get_elements_text(self, elements: Union[List[WebElement], Tuple[By, str]]) -> List[str]:\n elements = self.find_elements(elements)\n return [element.get_attribute('innerText') for element in elements]", "def process_test(self, data):\n new_utts = []\n for l in data:\n tem = []\n for sent in l:\n tem.append([\"<s>\"] + sent + [\"</s>\"])\n new_utts.append(tem)\n return new_utts # 以输入的测试标题为topic,四句空诗", "def get_texts(self) -> List[str]:\n return self.texts", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def get_all_elements_text(self, *locator):\n all_texts = []\n elements = self.__driver.find_elements(*locator)\n for element in elements:\n element_text = element.text\n all_texts.append(element_text)\n return \" \".join(all_texts).strip(\"[]\")", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def find_text_content_by_class(bs, tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n item_text = strip_tags(str(item))\n result.append(\" \".join(item_text.split()))\n return result", "def _generateDisplayedText(self, obj, **args ):\n result = self._generateSubstring(obj, **args)\n if result:\n return result\n\n displayedText = self._script.utilities.displayedText(obj)\n if not displayedText:\n return []\n\n return [displayedText]", "def _get_texts(locator, timeout=default_timeout, type = By.XPATH):\n logger.debug(\"Entered _get_text() method\")\n elts = _find_elements(locator, type = type, timeout = timeout)\n if elts:\n return [elt.text for elt in elts]\n return None", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def list_feature_tests(self):\n\t\treturn self.test_names", "def list_texts(self, start: int = None, end: int = None) -> List:\n return [str(i.text) for i in self.data[start:end]]", "def parse_text_into_separate_test_cases(text): \n for test_case in text.split('\\n\\n'):\n yield test_case\n #for test_case in TEST_CASE_PATTERN.finditer(text):\n #yield test_case.group(0)", "def test_text_classifier_get_details_all(self):\n pass", "def get_random_texts(self):\n texts=[]\n nodes=self.get_random_nodes()\n for node in nodes:\n texts+=self.get_corpus_from_node(node)\n return texts", "def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def find_text_in_content(self, el):\n try:\n content_divs = [el.get_element_by_id(\"content\")]\n except KeyError:\n # try class\n content_divs = el.find_class(\"content\")\n\n if content_divs == []:\n return None\n \n # iterate over divs and extract text\n all = []\n for div in content_divs:\n r = self.find_text_in_p(div)\n all.append(r)\n return \" \".join(all)", "def allTextGenerator(node):\n if node.nodeType == node.TEXT_NODE:\n yield node.data\n for child in node.childNodes:\n for text in allTextGenerator(child):\n yield text", "def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults", "def test_cases(self) -> list[str]:\n cases = []\n for t in self._test_cases:\n if t not in cases:\n cases.append(t)\n return cases", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def get_text():\n global x\n for i in soup.body(\"aside\", {\"id\": \"text-2\"}):\n x = i.get_text()", "def parse_test_context(self, test_list_output):\n # Sample command line output:\n #\n # MyHobbesTest\n # Arrays\n # Compiler\n # Definitions\n #\n #\n # Sample Result:\n #\n # [\n # ['Arrays', []],\n # ['Compiler', []]\n # ['Definitions', []]\n # ]\n result = [[line.strip(), []] for line in test_list_output.splitlines()]\n return result", "def get_text(parent, tag, plural = False):\n text = None\n for item in parent.findall(tag):\n t = item.text\n if not text:\n if plural:\n text = [t]\n else:\n text = t\n elif isinstance(text, list):\n text.append(t)\n else:\n text = [text, t]\n return text", "def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "def GetTopLevelTests(self):\n return [node for node in self.Walk() if node.IsTopLevelTest()]", "def get_text(self):", "def test_list_to_string_display(self): \n test1 = list_as_text(['a', 'b', 'c', 'd', 'e'])\n self.assertEqual(test1, 'a, b, c, d and e')\n test2 = list_as_text(['Atlanta, GA', 'New York City, NY',\n 'Miami, FL'])\n self.assertEqual(test2, 'Atlanta, GA, New York City, NY and Miami, FL')\n test3 = list_as_text(['Apple a day...'])\n self.assertEqual(test3, 'Apple a day...')\n test4 = list_as_text(['love', 'hate'])\n self.assertEqual(test4, 'love and hate') \n sites = Site.objects.filter(id__in=[2, 3, 4])\n test5 = list_as_text(sites)\n self.assertEqual(test5, 'Hudson Valley, Triangle and Capital Area')", "def test_text_classifier_get_testing_samples(self):\n pass", "def text(self):\n text = ''\n for run in self.runs:\n text += run.text\n return text", "def get_children(self, test, expression):\n\n for child in self.children:\n if TextMatch.dict_call(test, child.text, expression):\n yield child", "def readTests():\n testsList = []\n\n with open(\"Files/Tests.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n testsList.append(line.rstrip('\\n').rstrip().lstrip())\n f.close()\n return testsList", "def text(self, just_text=False):\n lines = []\n for node, data in self.traverse():\n if just_text or data['has_text'] or data['pad']:\n lines += data['text']\n else:\n lines += [data['meta']] + data['title'] + data['text']\n return flatten(lines)", "def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__", "def generate_test_list(tdir):\n\n # Skip this if it already exists\n if os.path.exists(os.path.join(tdir.name, \"kstest-list\")):\n return\n\n kstest_log = os.path.join(tdir.name, \"kstest.log\")\n with open(kstest_log) as f:\n for line in f.readlines():\n if not line.startswith(\"Running tests: \"):\n continue\n\n tests = [os.path.basename(os.path.splitext(s)[0]) for s in line[15:].split()]\n with open(os.path.join(tdir.name, \"kstest-list\"), \"wt\") as klf:\n for t in tests:\n print(t, file=klf)\n break", "def list_tests(tests_module,\n test_module_names=None, test_class_map=None, skip_class_map=None):\n tests = load_tests(tests_module, test_module_names, test_class_map, skip_class_map)\n for test_class in tests:\n print(cmd.COLORS['title'](test_class.__name__) + ':')\n test_cases = unittest.loader.getTestCaseNames(test_class, 'test')\n for test_case in test_cases:\n print(textwrap.indent(test_case, cmd.INDENT))", "def ExtractText(self, selector):\n xpaths = map(self.tree.xpath, selector)\n elements = list(chain.from_iterable(xpaths))\n paragraphs = [e.text_content() for e in elements]\n paragraphs = [s.strip() for s in paragraphs if s and not s == ' ']\n\n return paragraphs", "def get_gtests(gtest_binary):\n process = Popen([gtest_binary, '--gtest_list_tests'], stdout=PIPE)\n (output, _) = process.communicate()\n exit_code = process.wait()\n if exit_code != 0:\n return []\n lines = output.split('\\n')\n fixture = ''\n count = 0\n tests = []\n for line in lines:\n count = count+1\n if count != 1:\n if line == '':\n continue\n if line[0] != ' ':\n fixture = line\n else:\n test = fixture+line[2:]\n tests.append(test)\n return tests", "def getTestResults():", "def get_individual_performance(self):\n\n divs = self.page.find_all(\"span\", {\"class\":\"value\"})\n values = [div.text for div in divs]\n return values", "def getText(self):\n return(' '.join(map(lambda x:x.text,self.getNested())))", "def collect_content(parent_tag):\n content = ''\n for tag in parent_tag:\n p_tags = tag.find_all('p')\n for tag in p_tags:\n content += tag.text + '\\n'\n return content", "def get_inner_text(self, css_selector):\n element = self.driver.find_elements_by_css_selector(css_selector)\n if len(element) > 0:\n information = element[0].get_attribute(\"innerText\").strip()\n return information", "def tests(c):\n results = [test(c, i) for i, test_path in enumerate(TEST_PATHS)]\n print('\\n\\n\\n############## SUMMARY ##############')\n for i, test_path in enumerate(TEST_PATHS):\n print(i, test_path, 'PASSED' if result[i] == 0 else 'FAILED')", "def stats_text(test):\n\n stats_text_en(test) \n \n stats_text_cn(test)", "def list_tests(self, executable):\n # This will return an exit code with the number of tests available\n try:\n output = subprocess.check_output(\n [executable, \"--list-test-names-only\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n\n result = output.strip().split(\"\\n\")\n\n return result", "def list_test_cases(program):\n\n return list(INFO[program].test_cases)", "def get_results(self) -> List[str]:\n output = []\n for row in self.row_layout.children():\n if self.possible_values is None:\n text = row.itemAt(0).widget().text()\n else:\n text = row.itemAt(0).widget().currentText()\n\n if text != \"\":\n output.append(text)\n return output", "def List(ctx):\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListTestsResult = ctx.element.list_tests()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListTestsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def visitTests(tests, grepStr=''):\n\n # First flatten the list of tests.\n testsFlat = []\n toCheck = [t for t in tests]\n while toCheck:\n test = toCheck.pop()\n if isinstance(test, unittest.TestSuite):\n toCheck += [t for t in test]\n else:\n if grepStr in str(type(test)):\n testsFlat.append(test)\n testsFlat.sort()\n\n # Follow the flattened list of tests and show the module, class\n # and name, in a nice way.\n lastClass = None\n lastModule = None\n \n grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)\n\n for t in testsFlat:\n moduleName, className, testName = t.id().rsplit('.', 2)\n \n # If there is a failure loading the test, show it\n if moduleName.startswith('unittest.loader.ModuleImportFailure'):\n print red(moduleName), \" test:\", t.id()\n continue\n\n if moduleName != lastModule:\n lastModule = moduleName\n print(\" - From %s.py (to run all use --allPrograms)\"\n % '/'.join(moduleName.split('.')) + grepPrint)\n\n\n if className != lastClass:\n lastClass = className\n print(\" ./xmipp test %s\" % className)", "def run_tests(tests):\n return [test(t) for t in tests]", "def iter_texts():\n dirs = 'comm_use_subset noncomm_use_subset pmc_custom_license biorxiv_medrxiv'.split()\n for dir in dirs:\n fnames = (DATA_PATH / dir / dir).glob('*')\n for fname in fnames:\n with fname.open() as f:\n content = json.load(f)\n \n for key in 'abstract body_text'.split():\n for row in content[key]:\n yield row['text']", "def _toList(self):\n return [block.text() \\\n for block in _iterateBlocksFrom(self._doc.firstBlock())]", "def _find_with_text(self, selector, text):\n stripped = text.strip()\n elements = self.selenium.find_elements_by_css_selector(selector)\n return [e for e in elements if e.text.strip() == stripped]", "def 取所有项目文本(self): # real signature unknown; restored from __doc__\n return self.GetStrings()", "def get_text(self):\n rc = \"\"\n for node in self.node.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc = rc + node.data\n return rc", "def allText(node):\n return \"\".join(allTextGenerator(node))", "def find_elements_by_text(self,param={},ignore_error_handle = False):\n message = {};\n step = 'find all elements by text ' + param.get('text',None) + ' on current page';\n text = param.get('text',None);\n try:\n elements = self.driver.find_elements(by=By.NAME,value=text);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def read_test(cell):\n hidden = bool(re.search(\"hidden\", get_source(cell)[0], flags=re.IGNORECASE))\n output = ''\n for o in cell['outputs']:\n output += ''.join(o.get('text', ''))\n results = o.get('data', {}).get('text/plain')\n if results and isinstance(results, list):\n output += results[0]\n elif results:\n output += results\n return Test('\\n'.join(get_source(cell)[1:]), output, hidden)", "def extract_text(soup, result):\n if soup:\n for t in soup.children:\n if type(t) == NavigableString:\n # Text content node\n result.append(t)\n elif isinstance(t, NavigableString):\n # Comment, CDATA or other text data: ignore\n pass\n elif t.name in whitespace_tags:\n # Tags that we interpret as whitespace, such as <br> and <img>\n result.append_whitespace()\n elif t.name in block_tags:\n # Nested block tag\n result.begin() # Begin block\n extract_text(t, result)\n result.end() # End block\n elif t.name not in exclude_tags:\n # Non-block tag\n extract_text(t, result)", "def get_text(self):\n logging.getLogger(__name__).info(\"Element text: {}\\nby = {}\\nvalue = {}\".format(\n self.driver.find_element(self.by, self.value).text, self.by, self.value))\n return self.driver.find_element(self.by, self.value).text", "def get_elements(self):\n\t\treturn self._testing_cache", "def get_elements(self):\n\t\treturn self._testing_cache", "def get_tests(self, cluster_id):\n return self._client.get(\n url=\"/tests/{}\".format(cluster_id),\n ).json()", "def parse_gtest_tests(gtest_output_raw: str):\n test_list = []\n current_test_prefix = ''\n gtest_output_split = gtest_output_raw.split('\\n')\n current_index = 0\n # skip to the actual test list\n while current_index < len(gtest_output_split):\n current_string = gtest_output_split[current_index]\n test_matches = re.findall(r'^[a-zA-Z]*\\.$', current_string)\n if len(test_matches) != 0:\n break\n current_index += 1\n while current_index < len(gtest_output_split):\n current_string = gtest_output_split[current_index]\n if len(current_string) == 0:\n current_index += 1\n continue\n # get the test name\n test_match = re.findall(r'^\\s*\\S*', current_string)[0].replace(' ', '')\n if test_match[len(test_match) - 1] == '.':\n # We've found a new prefix\n current_test_prefix = test_match\n current_index += 1\n continue\n test_list.append(current_test_prefix + test_match)\n current_index += 1\n return test_list", "def _get_child_text(node):\n for child in node.children:\n if isinstance(child, NavigableString) and not isinstance(child, Comment):\n yield child.split()", "def _text_of(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string", "def get_test_data(path: str, text: str) -> tuple[str, list[str], str]:\n skip_results = skip_re.search(text)\n if skip_results:\n skip_unless = skip_results.group(1).strip()\n else:\n skip_unless = \"1\"\n search_results = desc_re.search(text)\n assert search_results is not None\n description, eval_string = (s.strip() for s in list(search_results.groups()))\n description = path + \": \" + description\n return description, eval_string, skip_unless", "def get_test_cases(self):\n\n return self._test_cases", "def sentences(self, tag=False, tag_method=None):\n self.__set_text_node(self.root_)\n sentence_nodes = filter(lambda n: n.nodeType == n.ELEMENT_NODE and n.tagName == 's',\n list(self.text_node.childNodes))\n sentences = []\n for s in sentence_nodes:\n current = []\n TimeMLDoc.__get_text(s, current, False)\n #print(current)\n if not tag:\n sentences.append(''.join([ c[0] for c in current]))\n else:\n sentences.append(tag_method(current))\n return sentences", "def get_texts(json_object):\n texts = list()\n texts.append(json_object.get(FIELD_NAMES_CITIES['text_field']))\n return texts", "def get_text_data_list(self):\n return [self.name, str(self.type)]", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test\n # set.\n return list(map(self.classify, test))", "def get_course_unit_titles(self):\n\n for title in self.course_page.find_all(attrs=COURSE_UNIT_TITLE):\n self.course_unit_titles.append(title.text)\n logging.debug(\"course_unit_titles:{}\".format(self.course_unit_titles))\n logging.info(\"Course unit titles retrieved\")", "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def test_gettesttools_html(self):\n pass", "def num_divs(self):\n return len(self.q(css='div.test').results)", "def list(self, frame=0):\n text = []\n if not self.number_of_variations:\n return \"\"\n for group_number in range(1, len(self.varexercise_numbers)+1):\n text.extend(\n self.group_list(group_number))\n return text", "def process_test_data(self, test_data):\n\n result = []\n for suite in test_data:\n suite_report = TestGroupReport(\n name=suite[\"name\"],\n category=ReportCategories.TESTSUITE,\n uid=suite[\"name\"],\n )\n suite_has_run = False\n\n for testcase in suite[\"data\"]:\n if testcase[\"status\"] != \"skipped\":\n suite_has_run = True\n\n testcase_report = TestCaseReport(\n name=testcase[\"name\"],\n uid=testcase[\"name\"],\n suite_related=True,\n )\n assertion_obj = RawAssertion(\n passed=testcase[\"status\"] == \"pass\",\n content=testcase[\"error\"] or testcase[\"duration\"],\n description=testcase[\"name\"],\n )\n testcase_report.append(registry.serialize(assertion_obj))\n testcase_report.runtime_status = RuntimeStatus.FINISHED\n suite_report.append(testcase_report)\n\n if suite_has_run:\n result.append(suite_report)\n\n return result", "def text_of(soup):\n return ''.join([str(x) for x in soup.findAll(text=True)])", "def sequence(self):\n for tn in self._testnodes:\n yield tn", "def body(self, response):\t\n\t\tx = response.xpath(\"//div[@class='story-content row-fluid']/p/text()\").extract()\n\n\t\tfor i in range(0,len(x)):\n\t\t\tx[i] = x[i].strip(\"\\r\\n\\t\")\n\t\treturn x", "def get_text_only(self, soup):\n val = soup.string\n # see if we have a text element\n if val is None:\n conts = soup.contents\n resulttext = ''\n # not text so continue recursing through the tags\n for tag in conts:\n subtext = self.get_text_only(tag)\n resulttext += subtext + '\\n'\n return resulttext\n return val.strip()", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test set.\n return list(map(self.classify, test))", "def paragraph(self, text):\n return [text]", "def get_text(self, i: int = None) -> str:\n if i is None:\n i = self.index\n else:\n i = str(i)\n logging.info(f\"get text. {self.desc}\")\n js = f\"\"\"return document.querySelectorAll(\"{self.css}\")[{i}].textContent;\"\"\"\n return self._execute_javascript(js)", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])" ]
[ "0.7000479", "0.6322341", "0.61989576", "0.6173835", "0.6019634", "0.59161645", "0.5873848", "0.58302426", "0.57565117", "0.5753218", "0.57449764", "0.57032067", "0.56822264", "0.5670111", "0.5669928", "0.56666976", "0.5641307", "0.5638164", "0.5599844", "0.55976945", "0.55873597", "0.55713975", "0.5567262", "0.55324024", "0.55287504", "0.55118924", "0.5445981", "0.5444896", "0.5432942", "0.54211193", "0.5406943", "0.538909", "0.5363917", "0.5350576", "0.5347315", "0.5326956", "0.53264874", "0.53228796", "0.52874607", "0.52853316", "0.52765816", "0.5274816", "0.5271788", "0.52640116", "0.52611333", "0.5236247", "0.52213216", "0.5216429", "0.5215271", "0.5212949", "0.5211296", "0.520469", "0.52040315", "0.51981395", "0.5191953", "0.51918495", "0.5179073", "0.5177808", "0.51716435", "0.51697004", "0.5162635", "0.5156174", "0.5147375", "0.5144068", "0.5142882", "0.5139111", "0.51389486", "0.5102175", "0.50993025", "0.508889", "0.508799", "0.50410587", "0.5034926", "0.5025834", "0.5025834", "0.5023003", "0.50219077", "0.5004009", "0.500268", "0.50022244", "0.5001356", "0.49867025", "0.49810216", "0.49688923", "0.4962052", "0.4953842", "0.49407652", "0.4937773", "0.49359596", "0.49351883", "0.49347734", "0.49345952", "0.4929356", "0.4929288", "0.4927091", "0.49245209", "0.49243078", "0.49206904", "0.49170727", "0.49170727" ]
0.8607301
0