query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Create a table for the example_test data
def create_example_test_table(conn): execute_sql_script(conn, "06_create_example_test_table.sql")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_table(\n table_id: str = typer.Option(..., help=\"The id of the table to be created\"),\n test: bool = typer.Option(False, help=\"Whether it is a test or not\")\n) -> None:\n base_path = Path.cwd().parent\n architecture_file = base_path / f\"extra/architecture/{table_id}.csv\"\n if not architecture_file.exists():\n raise FileNotFoundError(f\"Arquivo {architecture_file} não encontrado\")\n\n tbl = bd.Table(dataset_id=\"br-cgu-servidores-executivo-federal\", table_id=table_id)\n output_path = f\"output/{table_id}\" if not test else f\"output/test/{table_id}_test\"\n tbl.create( # pylint: disable=no-value-for-parameter\n path=str(base_path / output_path),\n force_dataset=True,\n if_table_exists=\"replace\",\n if_storage_data_exists=\"pass\",\n if_table_config_exists=\"replace\",\n columns_config_url_or_path=str(\n base_path / f\"extra/architecture/{table_id}.csv\"\n ),\n )", "def create_example_sample_submission_table(conn):\n execute_sql_script(conn, \"07_create_example_sample_submission_table.sql\")", "def test_dummydb_new_table(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)", "def create_table(self):\n pass", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def test_create_table(self):\n self.assertEqual(\n ['CREATE', 'TABLE', 'T1', '(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n)'],\n grammar._CREATE_TABLE.parseString(\n 'CREATE TABLE IF NOT EXISTS `T1`(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n);'\n ).asList()\n )", "def autogen_dataset_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n test_path='tests/data/dummy_tabular_test/test.csv',\n seed=42,\n sep=',')", "def putTestData(self):\n # print 'Not Yet implement / sample DB table create'\n tkMessageBox.showinfo(\"Message\", \"Sample DB Table Create\")", "def basic_table_creation():\n results = {\n 'From pyarrow arrays': pa.table([\n pa.array(['Kakashi', 'Itachi', 'Shisui'], type=pa.string()),\n pa.array(['Hatake', 'Uchiha', 'Uchiha'], type=pa.string())\n ], names=['first_name', 'last_name']),\n 'From List[dict]': pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ]),\n 'From Dict[str, list]': pa.Table.from_pydict({\n 'first_name': ['Kakashi', 'Itachi', 'Shisui'],\n 'last_name': ['Hatake', 'Uchiha', 'Uchiha'],\n }),\n 'From pandas df': pa.Table.from_pandas(pd.DataFrame([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])),\n }\n pretty_print_result_map(results)", "def create_train_table(conn):\n execute_sql_script(conn, \"03_create_train_table.sql\")", "def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())", "def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()", "def setUp(self):\n self.conn = seed.connect_to_db(\"testing\")\n self.cur = self.conn.cursor()\n\n seed.cur = self.conn.cursor()\n seed.conn = self.conn\n\n self.tables = [\n {\n \"name\": \"people\", \n \"schema\": [(\"firstname\", \"10\", \"VARCHAR\"), (\"lastname\", \"10\", \"VARCHAR\"), (\"age\", \"3\", \"INTEGER\"), (\"active\", \"1\", \"BOOLEAN\")]\n },\n {\n \"name\": \"animals\",\n \"schema\": [(\"animal_id\", \"7\", \"INTEGER\"), (\"name\", \"10\", \"VARCHAR\"), (\"species\", \"20\", \"VARCHAR\")]\n },\n {\n \"name\":\"testformat1\",\n \"schema\": [(\"name\", \"10\", \"VARCHAR\"), (\"valid\", \"1\", \"BOOLEAN\"), (\"count\", \"3\", \"INTEGER\")]\n }\n ]\n for table in self.tables:\n seed.create_table(table[\"name\"], table[\"schema\"])", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def data_table_creation(cursor, connection_to_db):\n\n cursor.execute(\"\"\"\n\n CREATE TABLE IF NOT EXISTS data(\n question TEXT NOT NULL,\n answer TEXT NULL,\n question_type TEXT NOT NULL,\n question_type_answers TEXT NULL,\n PRIMARY KEY(question)\n );\n\n \"\"\")\n\n connection_to_db.commit()", "def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )", "def test_rt_table(self) -> None:\n expected = Fixtures.next_table()\n expected.description = '\"hello!\" said no one'\n expected.tags.sort()\n\n self.get_proxy().put_table(table=expected)\n actual: Table = self.get_proxy().get_table(table_uri=checkNotNone(expected.key))\n actual.last_updated_timestamp = None\n actual.tags.sort()\n\n self.assertEqual(expected, actual)", "def create_prediction_table():\n try:\n conn = create_connection()\n c = conn.cursor()\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS predtable\n (age NUMERIC,workclass TEXT,fnlwgt NUMERIC,education TEXT,education_num NUMERIC,marital_status TEXT,occupation TEXT,relationship TEXT,race TEXT,sex TEXT,capital_gain NUMERIC,capital_loss NUMERIC,hours_per_week NUMERIC,native_country TEXT,predicted_class NUMERIC,model_class TEXT, time_of_prediction TEXT)\"\"\")\n\n except Exception as e:\n pass", "def test_table_definition(self):\n create_table(LowercaseKeyModel)\n create_table(CapitalizedKeyModel)\n\n delete_table(LowercaseKeyModel)\n delete_table(CapitalizedKeyModel)", "def create_new_table():\n dataset = create_dataset()\n table_id = \"{}.{}.corona_cases_table\".format(client.project, dataset.dataset_id)\n table = bigquery.Table(table_id)\n table = client.create_table(table, exists_ok=True)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n return table", "def create_table(enter_data_base: str, enter_table: str, enter_data: list):\n if type(enter_table) is not str:\n raise ValueError('имя таблицы должно быть строковым!') # тут переделать, ошибка должна выводиться графически\n\n con = sqlite3.connect(enter_data_base)\n cur = con.cursor()\n q = \"\"\"\n CREATE TABLE {table} ( \n Name {txt}, \n Author {txt}, \n Published year {txt})\n \"\"\"\n cur.execute(q.format(table=enter_table, txt='TEXT'))\n cur.execute('INSERT INTO ' + enter_table + ' VALUES(?, ?, ?)', enter_data)\n con.commit()\n cur.close()\n con.close()", "def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )", "def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results", "def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')", "def build_table(type_, test_type, device_name, thresholds):\n x = PrettyTable() \n x.field_names = [device_name] + thresholds\n \n \"Chrome,\" + test_type + \",\" + str(notAfter_date) + \",\" + thresholds[index], \",fail\"\n \n ##read all Chromep entries\n ##get all test_type rows\n ##loop rows\n ##show table", "def test_create_table_successfully (self):\n\n new_table = self.wrapper.create_table(self.table, [self.bob, self.jane])\n self.assertIsNone(new_table)", "def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)", "def test_temp_table(self):\n self.assertEqual([\"test\", [[\"c1\", \"INT\"], [\"c2\", \"VARCHAR(200)\"]]],\n grammar._TEMP_TABLE_EXPR.parseString(\"test(c1 INT, c2 VARCHAR(200));\").asList())", "def setup_table(database, schema_name, table_name, sample_data):\n table = create_table(database, schema_name, table_name)\n connection = database.engine.connect()\n\n logger.info('Enabling CDC on %s.%s...', schema_name, table_name)\n connection.execute(f'exec sys.sp_cdc_enable_table @source_schema=\\'{schema_name}\\', '\n f'@source_name=\\'{table_name}\\', @supports_net_changes=1, @role_name=NULL')\n\n logger.info('Adding %s rows into %s.%s...', len(sample_data), schema_name, table_name)\n connection.execute(table.insert(), sample_data)\n\n return table" ]
[ "0.7220285", "0.7129346", "0.707731", "0.70759505", "0.7056779", "0.69891703", "0.69740117", "0.6933229", "0.6916446", "0.6874953", "0.6792691", "0.6781502", "0.6639353", "0.66176784", "0.65448433", "0.6538979", "0.6519175", "0.65118295", "0.6496484", "0.6479034", "0.64755267", "0.64702666", "0.64698523", "0.6424298", "0.64237154", "0.64180964", "0.64109313", "0.6396887", "0.6395621", "0.6394825" ]
0.8160891
0
Create a table for the census tract to example_sample_submission data
def create_example_sample_submission_table(conn): execute_sql_script(conn, "07_create_example_sample_submission_table.sql")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_example_test_table(conn):\n execute_sql_script(conn, \"06_create_example_test_table.sql\")", "def create_train_table(conn):\n execute_sql_script(conn, \"03_create_train_table.sql\")", "def create_submission(pred_sub, name_of_the_file='submission'):\n\n df_sub = pd.DataFrame(pred_sub, columns=['Prediction'])\n df_sub.index.name = 'Id'\n df_sub.index = np.arange(1, 10001)\n df_sub[df_sub['Prediction'] == 0] = -1\n df_sub.to_csv(name_of_the_file + '.csv',index_label='Id')\n\n print('submission file created as \"'+ name_of_the_file+'.csv\"')", "def create_new_table():\n dataset = create_dataset()\n table_id = \"{}.{}.corona_cases_table\".format(client.project, dataset.dataset_id)\n table = bigquery.Table(table_id)\n table = client.create_table(table, exists_ok=True)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n return table", "def create_prediction_table():\n try:\n conn = create_connection()\n c = conn.cursor()\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS predtable\n (age NUMERIC,workclass TEXT,fnlwgt NUMERIC,education TEXT,education_num NUMERIC,marital_status TEXT,occupation TEXT,relationship TEXT,race TEXT,sex TEXT,capital_gain NUMERIC,capital_loss NUMERIC,hours_per_week NUMERIC,native_country TEXT,predicted_class NUMERIC,model_class TEXT, time_of_prediction TEXT)\"\"\")\n\n except Exception as e:\n pass", "def create_features_table():\n\n table_name = 'features'\n sql_query = f\"\"\"CREATE OR REPLACE TABLE `{GCP_PROJECT}.{BQ_TEMP_DATASET}.{table_name}`\n AS\n SELECT COALESCE(osm_id, osm_way_id) AS osm_id, osm_version, osm_timestamp, 'point' AS feature_type, all_tags, geometry FROM `{GCP_PROJECT}.{BQ_SOURCE_DATASET}.points` \n UNION ALL\n SELECT COALESCE(osm_id, osm_way_id) AS osm_id, osm_version, osm_timestamp, 'line' AS feature_type, all_tags, geometry FROM `{GCP_PROJECT}.{BQ_SOURCE_DATASET}.lines`\n UNION ALL\n SELECT COALESCE(osm_id, osm_way_id) AS osm_id, osm_version, osm_timestamp, 'multilinestring' AS feature_type, all_tags, geometry FROM `{GCP_PROJECT}.{BQ_SOURCE_DATASET}.multilinestrings`\n UNION ALL\n SELECT COALESCE(osm_id, osm_way_id) AS osm_id, osm_version, osm_timestamp, 'multipolygon' AS feature_type, all_tags, geometry FROM `{GCP_PROJECT}.{BQ_SOURCE_DATASET}.multipolygons`\n UNION ALL\n SELECT COALESCE(osm_id, osm_way_id) AS osm_id, osm_version, osm_timestamp, 'other_relation' AS feature_type, all_tags, geometry FROM `{GCP_PROJECT}.{BQ_SOURCE_DATASET}.other_relations` \n \"\"\"\n query_job = bq.query(sql_query)", "def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df", "def build_table(type_, test_type, device_name, thresholds):\n x = PrettyTable() \n x.field_names = [device_name] + thresholds\n \n \"Chrome,\" + test_type + \",\" + str(notAfter_date) + \",\" + thresholds[index], \",fail\"\n \n ##read all Chromep entries\n ##get all test_type rows\n ##loop rows\n ##show table", "def trial_table():\n # Read data\n included = read_npy_file('trials.included.npy')\n fb_type = read_npy_file('trials.feedbackType.npy')\n fb_type = fb_type.astype(int)\n fb_time = read_npy_file('trials.feedback_times.npy')\n go_cue = read_npy_file('trials.goCue_times.npy')\n trial_intervals = read_npy_file('trials.intervals.npy')\n rep_num = read_npy_file('trials.repNum.npy')\n response_choice = read_npy_file('trials.response_choice.npy')\n response_times = read_npy_file('trials.response_times.npy')\n visual_left = read_npy_file('trials.visualStim_contrastLeft.npy')\n visual_right = read_npy_file('trials.visualStim_contrastRight.npy')\n visual_times = read_npy_file('trials.visualStim_times.npy')\n\n for j in range(len(trial_intervals)):\n nwb_file.add_trial(trial_intervals[j, 0], trial_intervals[j, 1])\n\n nwb_file.add_trial_column(\n 'included',\n 'Importantly, while this variable gives inclusion criteria according '\n 'to the definition of disengagement (see manuscript Methods), it does '\n 'not give inclusion criteria based on the time of response, as used '\n 'for most analyses in the paper.',\n np.ravel(included)\n )\n nwb_file.add_trial_column(\n 'go_cue',\n 'The \\'goCue\\' is referred to as the \\'auditory tone cue\\' in the manuscript.',\n np.ravel(go_cue)\n )\n nwb_file.add_trial_column(\n 'visual_stimulus_time',\n 'Times are relative to the same time base as every other time in the dataset, '\n 'not to the start of the trial.',\n np.ravel(visual_times)\n )\n nwb_file.add_trial_column(\n 'visual_stimulus_left_contrast',\n 'Proportion contrast. A value of 0.5 means 50% contrast. 0 is a blank '\n 'screen: no change to any pixel values on that side (completely undetectable).',\n np.ravel(visual_left)\n )\n nwb_file.add_trial_column(\n 'visual_stimulus_right_contrast',\n 'Proportion contrast. A value of 0.5 means 50% contrast. 0 is a blank '\n 'screen: no change to any pixel values on that side (completely undetectable).',\n np.ravel(visual_right)\n )\n nwb_file.add_trial_column(\n 'response_time',\n 'Times are relative to the same time base as every other time in the dataset, '\n 'not to the start of the trial.',\n np.ravel(response_times)\n )\n nwb_file.add_trial_column(\n 'response_choice',\n 'Enumerated type. The response registered at the end of the trial, '\n 'which determines the feedback according to the contrast condition. '\n 'Note that in a small percentage of cases (~4%, see manuscript Methods) '\n 'the initial wheel turn was in the opposite direction. -1 for Right '\n 'choice (i.e. correct when stimuli are on the right); +1 for left '\n 'choice; 0 for Nogo choice.',\n np.ravel(response_choice)\n )\n nwb_file.add_trial_column(\n 'feedback_time',\n 'Times are relative to the same time base as every other time in the dataset, '\n 'not to the start of the trial.',\n np.ravel(fb_time)\n )\n nwb_file.add_trial_column(\n 'feedback_type',\n 'Enumerated type. -1 for negative feedback (white noise burst); +1 for '\n 'positive feedback (water reward delivery).',\n np.ravel(fb_type)\n )\n nwb_file.add_trial_column(\n 'rep_num',\n 'Trials are repeated if they are \"easy\" trials (high contrast stimuli '\n 'with large difference between the two sides, or the blank screen '\n 'condition) and this keeps track of how many times the current '\n 'trial\\'s condition has been repeated.',\n np.ravel(rep_num)\n )", "def create_tables_county_m(new_york_data, conn, county):\r\n\r\n try:\r\n # Write the new DataFrame to a new SQLite table\r\n new_york_data[(new_york_data.County == county)].to_sql(\r\n county + \"_stg\", conn, if_exists=\"replace\"\r\n )\r\n except Exception as e:\r\n print(f\"Failed while creating stage table for {county} with exception {e}\")\r\n finally:\r\n conn.commit()", "def build_submission(y_pred, id_submission):\n y_pred_ = zero_to_neg(y_pred)\n ret = np.ones((len(y_pred_), 2))\n for i in range(len(y_pred_)):\n ret[i] = np.array([i+1, y_pred_[i]])\n ret = ret.astype(int)\n sub = pd.DataFrame(data = ret)\n sub.columns = ['Id', 'Prediction']\n sub.to_csv('pred_' + id_submission + '.csv', index=None)", "def sample_table(self):\n if self[SAMPLE_EDIT_FLAG_KEY]:\n _LOGGER.debug(\"Generating new sample_table DataFrame\")\n self[SAMPLE_EDIT_FLAG_KEY] = False\n new_df = self._get_table_from_samples(index=self.st_index)\n self._sample_table = new_df\n return new_df\n\n _LOGGER.debug(\"Returning stashed sample_table DataFrame\")\n return self._sample_table", "def setup_table(database, schema_name, table_name, sample_data):\n table = create_table(database, schema_name, table_name)\n connection = database.engine.connect()\n\n logger.info('Enabling CDC on %s.%s...', schema_name, table_name)\n connection.execute(f'exec sys.sp_cdc_enable_table @source_schema=\\'{schema_name}\\', '\n f'@source_name=\\'{table_name}\\', @supports_net_changes=1, @role_name=NULL')\n\n logger.info('Adding %s rows into %s.%s...', len(sample_data), schema_name, table_name)\n connection.execute(table.insert(), sample_data)\n\n return table", "def _generate_samples(samples_data_table=None):\n samples_data_table = samples_data_table or dict()\n\n con_name = f\"Auto_Sample_Test_{datetime.now()}\"\n con_result_soup = BeautifulSoup(_post_con(con_name), \"xml\")\n con_uri = con_result_soup.find(\"con:container\")[\"uri\"]\n\n sample_list = list()\n for i in range(1, 97, 2):\n well = (\n 'ABCDEFGH'[(i - 1) % 8] + ':' + '%01d' % ((i - 1) // 8 + 1,))\n letter = 'ABCDEFGH'[i % 8]\n to_add = api_types.Sample(f\"test{i}{letter}\")\n to_add.location = well\n to_add.con = api_types.Container(\n con_name,\n \"96 well plate\",\n \"\",\n con_uri)\n\n for data_name, data_value in samples_data_table.items():\n if \"udf\" in data_name:\n udf_name = data_name.strip(\"udf_\")\n to_add.udf_to_value[udf_name] = data_value\n elif \"adapter\" in data_name:\n to_add.adapter = data_value\n sample_list.append(to_add)\n return sample_list", "def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()", "def create_example_test(row, vocab):\n context, utterance = row[:2]\n distractors = row[2:]\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n\n # Distractor sequences\n for i, distractor in enumerate(distractors):\n dis_key = \"distractor_{}\".format(i)\n dis_len_key = \"distractor_{}_len\".format(i)\n # Distractor Length Feature\n dis_len = len(next(vocab._tokenizer([distractor])))\n example.features.feature[dis_len_key].int64_list.value.extend([dis_len])\n # Distractor Text Feature\n dis_transformed = transform_sentence(distractor, vocab)\n example.features.feature[dis_key].int64_list.value.extend(dis_transformed)\n return example", "def create_table(build_list):\n # extract list of files:\n all_files = build_list.split(',')\n\n output_file = os.getcwd() + \"/fpkm_table.tbl\"\n verbalise(\"B\", \"saving built table to file \", output_file)\n\n awk_cmd = \"\"\"awk '!($10~/FPKM/){\\\n gene_sample[$1,FILENAME]=$9;\\\n samples[FILENAME]=1;genes[$1]=1}\\\n END{printf \"%s\\t\", \"genes\";\\\n for(g in genes){printf \"%s\\t\", g};print \"\";\\\n for(s in samples){printf \"%s\\t\",s;\\\n for(g in genes){printf \"%s\\t\", gene_sample[g,s]};\\\n print \"\"}}' \"\"\"\n\n full_cmd = awk_cmd + \" \".join(all_files) + \">\" + str(output_file)\n\n # build table:\n os.system(full_cmd)\n\n # shorten filenames within table for easier reading!\n #output\n\n return output_file", "def submission(test_ids, pred_test, file_name):\n pred_test[pred_test < 0] = 0\n\n val_pred_df = pd.DataFrame(data={'fullVisitorId': test_ids,\n 'predictedRevenue': pred_test})\n\n val_pred_df = val_pred_df.groupby('fullVisitorId').sum().reset_index()\n\n val_pred_df.columns = ['fullVIsitorId', 'predictedLogRevenue']\n val_pred_df['predictedLogRevenue'] = val_pred_df['predictedLogRevenue']\n val_pred_df.to_csv('submission/'+file_name, index=False)", "def test_create(self):\n st = SampleTemplate.create(self.metadata, self.new_study)\n # The returned object has the correct id\n self.assertEqual(st.id, 2)\n\n # The relevant rows to required_sample_info have been added.\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n # sample_id study_id physical_location has_physical_specimen\n # has_extracted_data sample_type required_sample_info_status_id\n # collection_timestamp host_subject_id description\n exp = [[\"2.Sample1\", 2, \"location1\", True, True, \"type1\", 1,\n datetime(2014, 5, 29, 12, 24, 51), \"NotIdentified\",\n \"Test Sample 1\", 42.42, 41.41],\n [\"2.Sample2\", 2, \"location1\", True, True, \"type1\", 1,\n datetime(2014, 5, 29, 12, 24, 51), \"NotIdentified\",\n \"Test Sample 2\", 4.2, 1.1],\n [\"2.Sample3\", 2, \"location1\", True, True, \"type1\", 1,\n datetime(2014, 5, 29, 12, 24, 51), \"NotIdentified\",\n \"Test Sample 3\", 4.8, 4.41]]\n self.assertEqual(obs, exp)\n\n # The relevant rows have been added to the study_sample_columns\n obs = self.conn_handler.execute_fetchall(\n \"SELECT study_id, column_name, column_type FROM \"\n \"qiita.study_sample_columns WHERE study_id=2 \"\n \"order by column_name\")\n\n # study_id, column_name, column_type\n exp = [[2L, 'int_column', 'integer'], [2, \"str_column\", \"varchar\"]]\n self.assertEqual(obs, exp)\n\n # The new table exists\n self.assertTrue(exists_table(\"sample_2\", self.conn_handler))\n\n # The new table hosts the correct values\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")\n # sample_id, str_column\n exp = [['2.Sample1', 1, \"Value for sample 1\"],\n ['2.Sample2', 2, \"Value for sample 2\"],\n ['2.Sample3', 3, \"Value for sample 3\"]]\n self.assertEqual(obs, exp)", "def create_questions_table(conn):\n execute_sql_script(conn, \"04_create_questions_table.sql\")", "def make_lof_table(data_table, my_genes, my_samples, summary_func):\n table_header = [\"Gene\"] + my_samples + [\n \"Missense:Benign\", \"Missense:Possibly\", \"Missense:Probably\",\n \"MissenseNA\", \"Indel\", \"Nonsense\", \"Frameshift\", \"Splice-site\",\n \"Synonymous\"]\n table_records = []\n\n gs_lookup = group_data_by_gs(data_table)\n for gene in my_genes:\n synonymous = missense_benign = missense_possibly = missense_probably = \\\n missense_na = frameshift = nonsense = splice = indel = 0\n\n out_row = [gene]\n for sample in my_samples:\n normalized = [0]\n # Count mutations of each type for this gene and sample\n for entry in gs_lookup[gene][sample]:\n if entry['muttype'] == 'Silent':\n synonymous += 1\n continue\n if entry['muttype'] == 'Intron':\n # Shouldn't be here; ignore\n continue\n\n if entry['muttype'] == 'Missense_Mutation':\n if entry['consequence'] == 'benign':\n missense_benign += 1\n elif entry['consequence'] == 'possibly':\n missense_possibly += 1\n elif entry['consequence'] == 'probably':\n missense_probably += 1\n elif entry['consequence'] == 'NA':\n missense_na += 1\n else:\n print(\"Unhandled missense consequence level:\",\n entry['consequence'], file=sys.stderr)\n elif entry['muttype'] == 'Nonsense_Mutation':\n nonsense += 1\n elif entry['muttype'] == 'Splice_Site':\n splice += 1\n elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):\n frameshift += 1\n elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):\n indel += 1\n else:\n print(\"Unhandled mutation type:\", entry['muttype'],\n file=sys.stderr)\n continue\n\n normalized.append(entry['normalized'])\n # Summarize the normalized mutation counts for this gene and sample\n out_row.append(summary_func(normalized))\n out_row.extend((missense_benign, missense_possibly, missense_probably,\n missense_na, indel, nonsense, frameshift, splice,\n synonymous))\n table_records.append(out_row)\n\n return pandas.DataFrame.from_records(table_records, columns=table_header)", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def generate_table(self, rows):\n ...", "def add_dataset_table(database):\n\n # Connecting to the database file\n conn = sqlite3.connect(database)\n c = conn.cursor()\n\n # Add table and set primary key column\n c.execute(\"\"\"CREATE TABLE dataset (\n dataset_ID INTEGER PRIMARY KEY,\n dataset_name TEXT,\n sample TEXT,\n platform TEXT\n )\"\"\")\n\n conn.commit()\n conn.close()\n return", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()", "def copy_csv_to_example_sample_submission_table(conn, csv_file):\n COPY_EXAMPLE_SAMPLE_SUBMISSION = \"12_copy_example_sample_submission_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)", "def make_dtrees_table(tablename, input_basename, output_basename):\n data = read_dhalo_trees(input_basename)\n add_depth_first_index(data) \n write_sql_server_native_file(tablename, data, \n output_basename+\".dat\", \n output_basename+\".sql\")", "def create_tables(cur, country_json, xml_state, body_json):\n print(\"Creating the 3 first tables...\")\n cur.execute('CREATE TABLE IF NOT EXISTS country_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_total_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_total_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_total_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_albums(ID INTEGER PRIMARY KEY, state TEXT, year INTEGER, genre TEXT, album TEXT, amount INTEGER)')\n for idx, album in enumerate(country_json[body_json['state']]):\n cur.execute('INSERT INTO country_albums VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' %\n (idx, body_json['state'], body_json['year'], body_json['genre'], album,\n xml_state[0][0][idx].text))", "def create_data_table(df: pd.DataFrame) -> pd.DataFrame:\n\n df = df.copy()\n\n # Normalize times by labeling all of today's data with its future label, 00:00\n # tomorrow (as that's the timestamp marking the end of the 24-hour data collection\n # period). No need to adjust data not from today; it's already been adjusted and is\n # labeled with the date whose 00:00 marked the end of data collection (i.e., data\n # generated on Mar 20 is labeled Mar 21).\n normalized_dates = df[Columns.DATE].dt.normalize()\n is_at_midnight = df[Columns.DATE] == normalized_dates\n df.loc[~is_at_midnight, Columns.DATE] = normalized_dates[\n ~is_at_midnight\n ] + pd.Timedelta(days=1)\n df[Columns.DATE] = df[Columns.DATE].dt.strftime(r\"%Y-%m-%d\")\n\n df = df.drop(\n columns=[\n Columns.IS_STATE,\n Columns.LOCATION_NAME,\n Columns.OUTBREAK_START_DATE_COL,\n Columns.DAYS_SINCE_OUTBREAK,\n Columns.POPULATION,\n Columns.STAGE,\n Columns.COUNT_TYPE,\n ]\n )\n\n df = (\n df.pivot_table(\n index=[\n c\n for c in df.columns\n if c not in [Columns.CASE_TYPE, Columns.CASE_COUNT]\n ],\n columns=Columns.CASE_TYPE,\n values=Columns.CASE_COUNT,\n aggfunc=\"first\",\n )\n .reset_index()\n .sort_values([Columns.COUNTRY, Columns.STATE, Columns.DATE])\n )\n\n for col in CaseInfo.get_info_items_for(\n InfoField.CASE_TYPE, count=Counting.TOTAL_CASES\n ):\n df[col] = pd.to_numeric(df[col], downcast=\"integer\")\n\n # save_path = Paths.DATA / \"data_table.csv\"\n # df.to_csv(save_path, index=False)\n # print(f\"Saved data to {save_path.relative_to(Paths.ROOT)}\")\n\n return df" ]
[ "0.603935", "0.5993586", "0.5964438", "0.5935723", "0.58772194", "0.5860783", "0.5800112", "0.5777341", "0.5726033", "0.5690624", "0.56006396", "0.5599972", "0.55904704", "0.5590239", "0.5584303", "0.5566211", "0.55599475", "0.55486864", "0.5546934", "0.55461943", "0.5542591", "0.55318207", "0.55274326", "0.55184144", "0.55180377", "0.5515072", "0.550891", "0.55040306", "0.54949373", "0.546594" ]
0.75246966
0
Composite function that copies all CSV files into the database
def copy_csv_files(data_files_dict): # Depending on your local settings, you may need to specify a user and password, e.g. # conn = psycopg2.connect(dbname=DBNAME, user="postgres", password="password") conn = psycopg2.connect(dbname=DBNAME) for name, files in data_files_dict.items(): csv_file = files[0] # skip the header; this info is already in the table schema next(csv_file) if name == "train": copy_csv_to_train_table(conn, csv_file) elif name == "questions": copy_csv_to_questions_table(conn, csv_file) elif name == "lectures": copy_csv_to_lectures_table(conn, csv_file) elif name == "example_test": copy_csv_to_example_test_table(conn, csv_file) elif name == "example_sample_submission": copy_csv_to_example_sample_submission_table(conn, csv_file) print(f"""Successfully loaded CSV file into `{name}` table """) conn.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_files(conn: Connection, path: Path) -> None:\n sql = \"INSERT OR IGNORE INTO Files (filename) VALUES (?)\"\n run_sql_on_csv(conn, path, sql, (str,))", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def bulk_copy_to_db(self):\n database = PostgreSQLCommon()\n\n try:\n file = open(self.file_name_hash)\n database.bulk_copy(file, self.storage_table)\n\n m.info('Bulk insert from %s has been successfully completed!'\n % self.file_name_hash)\n except Exception as err:\n m.error('OOps! Bulk insert operation FAILED! Reason: %s' % str(err))\n finally:\n database.close()\n\n if os.path.exists(self.file_name_hash):\n os.remove(self.file_name_hash)", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def collect_csv(source_dir, dest_dir):\n source_dir = Path(source_dir)\n dest_dir = Path(dest_dir)\n for csvfile in source_dir.rglob(\"*.csv\"):\n species = normalized_species(csvfile)\n species_dir = dest_dir / species\n species_dir.mkdir(exist_ok=True, parents=True)\n date_time = normalized_datetime(csvfile)\n print(f\"Renaming {csvfile} to {species_dir / (date_time + '.csv')}\")\n csvfile.rename(species_dir / (date_time + \".csv\"))", "def load(*args):\r\n\r\n #args[0].to_csv(str(PATH.joinpath('./data/{}.csv'.format(args[1]))),index=False)\r\n\r\n try: # it will fail if duplicates\r\n args[0].to_sql('cmf', con=engine, if_exists='append', index=False)\r\n except:\r\n pass", "def loading_data_to_sqlite(list_files):\n engine = connecting_database()\n if engine is None:\n return False\n\n print()\n print(\"-\".rjust(60, \"-\"))\n print(\"Loading data\".center(60))\n print(\"-\".rjust(60, \"-\"))\n\n for filename in list_files:\n name, ext = os.path.splitext(filename)\n if ext != '.csv':\n print(\">> WARNING: CSV file invalid!\")\n return False\n\n print(f\">> Populating the table: stg_{name}\")\n df = pd.read_csv(path + inputfile + filename, sep=',', header=0)\n df.to_sql(f\"stg_{name}\", con=engine, index=False, if_exists='replace')\n print(\"-\".rjust(60, \"-\"))\n\n return True", "def test_reading_with_multiple_files(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testmultifiles (\n a int,\n b int,\n c float,\n PRIMARY KEY (a, b)\n )\"\"\")\n\n num_rows_per_file = 100\n num_files = 10\n tempfiles = []\n\n for i in range(num_files):\n tempfiles.append(self.get_temp_file(prefix='testreadmult{}'.format(i), suffix='.csv'))\n\n for i in range(num_files):\n with open(tempfiles[i].name, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=['a', 'b', 'c'])\n for k in range(num_rows_per_file):\n writer.writerow({'a': i, 'b': k, 'c': 2.0})\n\n def import_and_check(temp_files_str):\n self.session.execute(\"TRUNCATE testmultifiles\")\n\n logger.debug(\"Importing csv files {}\".format(temp_files_str))\n self.run_cqlsh(cmds=\"COPY ks.testmultifiles FROM '{}'\".format(temp_files_str))\n\n assert [[num_rows_per_file * len(tempfiles)]] == rows_to_list(self.session.execute(\"SELECT COUNT(*) FROM testmultifiles\"))\n\n import_and_check(','.join([tempfile.name for tempfile in tempfiles]))\n import_and_check(os.path.join(gettempdir(), 'testreadmult*.csv'))\n import_and_check(','.join([os.path.join(gettempdir(), 'testreadmult[0-4]*.csv'),\n os.path.join(gettempdir(), 'testreadmult[5-9]*.csv')]))", "def copy_csv_to_train_table(conn, csv_file):\n COPY_TRAIN = \"08_copy_train_to_table.psql\"\n copy_expert_psql_script(conn, COPY_TRAIN, csv_file)", "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = mdb.ImportData(key, tmp_file)\n result = mongo_insert.import_data()\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def import_data(data_dir, *files):\n added = [0, 0, 0]\n errors = [0, 0, 0]\n fnl_lst = []\n for filepath in files:\n start_time = time.time()\n added = 0\n collection_name = filepath.split(\".\")[0]\n with open(os.path.join(data_dir, filepath)) as file:\n reader = csv.reader(file, delimiter=\",\")\n header = False\n\n for row in reader:\n try:\n if not header:\n header = [h.strip(\"\\ufeff\") for h in row]\n else:\n data = {header[i]:v for i, v in enumerate(row)}\n cursor = db[collection_name]\n cursor.insert_one(data)\n added +=1\n except Exception as e:\n print(e)\n fnl_lst.append((added,0,added,time.time()-start_time))\n return fnl_lst", "def main(csvfile, dbfile, verbose=False):\n CONN = sqlite3.connect(dbfile)\n cursor = CONN.cursor()\n create_schema(cursor)\n process_data(cursor, csvfile, verbose=verbose)\n CONN.commit()\n CONN.close()", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def merge(parent_folder):\n parent_folder = Path(parent_folder)\n\n address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))\n\n frames = []\n\n #: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL\n for address_csv_file in address_csv_files:\n temp = pd.read_csv(\n address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']\n )\n\n frames.append(temp)\n\n #: merge all csv's\n merged = pd.concat(frames)\n merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')", "def process_csvs(conn: Connection, basedir: Path) -> None:\n process_files(conn, basedir/\"files.csv\")\n process_notes(conn, basedir/\"notes.csv\")\n process_links(conn, basedir/\"links.csv\")\n process_clusters(conn, basedir/\"clusters.csv\")\n process_bibliography(conn, basedir/\"bibliography.csv\")\n process_citations(conn, basedir/\"citations.csv\")", "def loadCSV(input_file):", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def process_file(cur, conn, table, filepath):\n\n taxi_table_insert = (\"\"\"\n INSERT INTO {} (trip_id, taxi_id, trip_sec, trip_mile)\n VALUES (%s, %s, %s, %s);\n \"\"\".format(table))\n\n # open csv file\n # https://stackoverflow.com/questions/17444679/reading-a-huge-csv-file\n df = pd.read_csv(filepath)\n\n df = df[['Trip ID', 'Taxi ID', 'Trip Seconds', 'Trip Miles']]\n\n df.dropna(inplace=True)\n\n # insert trip records\n for index, row in df.iterrows():\n cur.execute(taxi_table_insert, row)\n conn.commit()", "def write_test_data(sql):\n for fname in sorted(glob.glob(\"mock_data/*.csv\")):\n print(fname)\n with open(fname, 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\", quoting=csv.QUOTE_MINIMAL)\n i = 0\n for row in reader:\n if i == 0:\n if row != '' and ''.join(row) != '':\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + commajoin(row, [], 0) + \" VALUES\\n\")\n else:\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + \" VALUES\\n\")\n i += 1\n continue\n if row == '' or ''.join(row) == '':\n continue\n if i > 1:\n sql.write(\",\\n\")\n sql.write(commajoin(row, list(range(len(row))), 4))\n i += 1\n sql.write(\";\\n\\n\")", "def copy_csv_to_lectures_table(conn, csv_file):\n COPY_LECTURES = \"10_copy_lectures_to_table.psql\"\n copy_expert_psql_script(conn, COPY_LECTURES, csv_file)", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n # use the func to insert data from these files to database's fact and dim tables\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def execute(cls, path: Path) -> None:\n file_csv_convert = FileCsvConvert.create_by_path_csv_convert(path)\n list_convert_table = cls._load_csv(file_csv_convert, path)\n file_csv_convert.value.convert_table_type.value.model.save_all(list_convert_table)", "def copy_csv_to_example_test_table(conn, csv_file):\n COPY_EXAMPLE_TEST = \"11_copy_example_test_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def convert_csv_to_SQLite3(self,\n csv_path: str=None, # Path to .csv \n destination: str=None, # Where to create .db\n db_name: str=None, # Database name\n table_name: str=None, # table name\n **kwargs # Custom arguments for reader and writter\n ):\n # With scribe reader, read a .csv \n # **kwargs, are used in params in the subclass Scibe_File_Writter\n # **Kwargs Over-write convert_csv_to_db params\n # Inherits from scribe_readers.Scribe_File_Reader\n self.read_from_csv(csv_path, **kwargs) # Inherits from scribe_readers.Scribe_File_Reader\n if db_name != None:\n destination = f\"{destination}\\{db_name}.db\"\n self.db_name = db_name\n conn = self.create_sqlite_connection(destination) # Inherits from scribe_writers_Scribe_Scribe_SQLite_Writer\n # Create connection also creates new db if it does not exist.\n self.create_new_sqlite_table(conn=conn,\n schema=self.dtypes,\n table_name=f\"tbl_{table_name}\",\n close_conn =False)\n \n \"\"\"Insert data into SQLite database\"\"\"\n\n table_name=f\"tbl_{table_name}\"\n self.insert_into_sqlite_table(conn,\n csv_path,\n table_name,\n self.shape,\n self.delimiter)" ]
[ "0.68402326", "0.6816599", "0.661251", "0.6598817", "0.62749064", "0.62011576", "0.6185768", "0.6172839", "0.6161306", "0.61159104", "0.60363084", "0.6004497", "0.59940296", "0.59888047", "0.5987941", "0.59727305", "0.5962325", "0.5925651", "0.5914285", "0.5898657", "0.588313", "0.5868484", "0.5868041", "0.58628535", "0.5858451", "0.5857518", "0.58472586", "0.5837432", "0.58234817", "0.58209974" ]
0.70384914
0
Copy the CSV contents of the train data into the table
def copy_csv_to_train_table(conn, csv_file): COPY_TRAIN = "08_copy_train_to_table.psql" copy_expert_psql_script(conn, COPY_TRAIN, csv_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_data():\n table = create_new_table()\n filename = '/home/nineleaps/Downloads/covid_info_.csv'\n dataset_ref = client.dataset(table.dataset_id)\n table_ref = dataset_ref.table(table.table_id)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, table.dataset_id, table.table_id))", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def import_training_data(target_col = 'label'):\n dir = os.path.dirname(os.path.dirname(__file__)) # go up one level to get root of this experiment\n path = os.path.join(dir, 'data','train.csv')\n utils_logr.info('Loading data from {} as pandas df'.format(path))\n df = pd.read_csv(path)\n y = df[target_col]\n df = df.drop(target_col, axis=1)\n return df, y", "def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def copy_csv_to_example_test_table(conn, csv_file):\n COPY_EXAMPLE_TEST = \"11_copy_example_test_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)", "def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df", "def generate_training_df(df, id_csv):\n\n train_df = fetch_training_df(df)\n \n for column_name in ['song_id', 'track_id']:\n train_df[column_name] = train_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n \n train_df.drop(['year'], axis=1, inplace=True)\n train_df = merge_id_into_df(train_df, id_csv)\n train_df.drop(['song_id', 'track_id'], axis=1, inplace=True)\n\n return train_df", "def pre_process_data():\n data_list, header_list = Parser.__parse_csv_data(Parser.training_data_file)\n table = pandas.DataFrame(data_list, columns=header_list)\n table.drop(['date', 'employee id'], axis=1, inplace=True)\n unique_categories = table['category'].unique()\n unique_expense_desc = table['expense description'].unique()\n unique_tax_name = table['tax name'].unique()\n\n column_index = {\n 'input': {},\n 'output': {}\n }\n\n column_index['input']['pre-tax amount'] = {\n 'column_index': 0,\n 'type': 'int'\n }\n\n column_index['input']['tax amount'] = {\n 'column_index': 1,\n 'type': 'int'\n }\n\n index = 2\n\n for i in range(len(unique_expense_desc)):\n column_index['input'][unique_expense_desc[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n index += len(unique_expense_desc)\n\n for i in range(len(unique_tax_name)):\n column_index['input'][unique_tax_name[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n for i in range(len(unique_categories)):\n column_index['output'][unique_categories[i]] = {'value': i}\n\n Parser.__save_column_index(column_index)", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def csv_to_vw(loc_csv, loc_output, train=True):\n start = datetime.now()\n print(\"\\nTurning %s into %s. Is_train_set? %s\" %\n (loc_csv, loc_output, train))\n\n with open(loc_output, \"wb\") as outfile:\n for e, row in enumerate(DictReader(open(loc_csv))):\n\n # Creating the features\n numerical_features = \"\"\n categorical_features = \"\"\n for k, v in row.items():\n if k not in [\"Label\", \"Id\"]:\n if \"I\" in k: # numerical feature, example: I5\n if len(str(v)) > 0: # check for empty values\n numerical_features += \" %s:%s\" % (k, v)\n if \"C\" in k: # categorical feature, example: C2\n if len(str(v)) > 0:\n categorical_features += \" %s\" % v\n\n # Creating the labels\n if train: # we care about labels\n if row['Label'] == \"1\":\n label = 1\n else:\n label = -1 # we set negative label to -1\n outfile.write(\"%s '%s |i%s |c%s\\n\" % (\n label, row['Id'], numerical_features, categorical_features))\n\n else: # we dont care about labels\n outfile.write(\"1 '%s |i%s |c%s\\n\" % (\n row['Id'], numerical_features, categorical_features))\n\n # Reporting progress\n if e % 1000000 == 0:\n print(\"%s\\t%s\" % (e, str(datetime.now() - start)))\n\n print(\"\\n %s Task execution time:\\n\\t%s\" %\n (e, str(datetime.now() - start)))", "def train(self, trainfile):", "def reformat_csv_header(self, path, train_file, test_file):\n\n \"\"\"\n \"id\",\"comment_text\",\"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"\n \"\"\"\n\n train = pd.read_csv(os.path.join(path, train_file))\n test = pd.read_csv(os.path.join(path, test_file))\n train = train.drop('id', axis=1)\n test = test.drop('id', axis=1)\n for label in [\"jobflag\"]:\n test[label] = pd.Series(0, index=test.index)\n temp_path = os.path.join(path, \"temp\")\n if not os.path.isdir(temp_path):\n os.mkdir(temp_path)\n train.to_csv(os.path.join(temp_path, train_file),\n index=False, header=False)\n test.to_csv(os.path.join(temp_path, test_file),\n index=False, header=False)\n return temp_path", "def loadCSV(input_file):", "def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test", "def insert_csv(self, file, tablename, sep=','):\n filehandel = open(file, 'r')\n self.cursor.copy_from(filehandel, tablename, sep)\n self.connection.commit()", "def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()", "def set_learning_data(self, df):\n\t\tdf.to_csv(self.learningDataFile)", "def create_initial_csv():\n\tif os.path.exists(args.train):\n\t\tprint(\"--Training data input found: \", args.train)\n\t\t#quick and dirty create csv file\n\t\theaders = os.system(\"echo idorigh,idresph,origbytes,respbytes,origpkts,resppkts,duration > log.csv\")\n\t\tbrocut = os.system(\"cat \"+str(args.train)+\"| bro-cut id.orig_h id.resp_h orig_bytes resp_bytes orig_pkts resp_pkts duration | sed 's/\t/\\,/g' | sed '/-/d'>> log.csv\")\n\t\t\n\telse:\n\t\tprint(\"Bro training data input \"+str(args.train)+\" not found - needs to be in working directory\")\n\t\texit()", "def import_test():\n if os.path.exists(\"test.csv\"):\n #print (\"--testing data imported to data frame\\n\")\n test_df = pd.read_csv(\"test.csv\", index_col=0)\n else:\n print(\"training CSV not found\")\n exit()\n \n return test_df", "def create_train_table(conn):\n execute_sql_script(conn, \"03_create_train_table.sql\")", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def copy_csv_to_example_sample_submission_table(conn, csv_file):\n COPY_EXAMPLE_SAMPLE_SUBMISSION = \"12_copy_example_sample_submission_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def copy_csv_files(data_files_dict):\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n for name, files in data_files_dict.items():\n csv_file = files[0]\n # skip the header; this info is already in the table schema\n next(csv_file)\n if name == \"train\":\n copy_csv_to_train_table(conn, csv_file)\n elif name == \"questions\":\n copy_csv_to_questions_table(conn, csv_file)\n elif name == \"lectures\":\n copy_csv_to_lectures_table(conn, csv_file)\n elif name == \"example_test\":\n copy_csv_to_example_test_table(conn, csv_file)\n elif name == \"example_sample_submission\":\n copy_csv_to_example_sample_submission_table(conn, csv_file)\n\n print(f\"\"\"Successfully loaded CSV file into `{name}` table\n \"\"\")\n\n conn.close()", "def read_ct_data(train_start, train_count, eval_start, eval_count):\n data = pd.read_csv('/opt/train.csv')\n\n # Dropping the id column\n data.drop(['ID_code'], axis=1, inplace=True)\n\n data = data.values\n return (data[train_start:train_start + train_count],\n data[eval_start:eval_start + eval_count])" ]
[ "0.682733", "0.6637438", "0.65697265", "0.6363471", "0.6350087", "0.6322782", "0.63010466", "0.61652935", "0.6139673", "0.6114018", "0.6067298", "0.606431", "0.6044262", "0.6012118", "0.60105383", "0.6004982", "0.5983898", "0.5981103", "0.59738964", "0.5959424", "0.59585667", "0.5931792", "0.5922887", "0.5915807", "0.5907675", "0.59015393", "0.58750105", "0.58748144", "0.58742696", "0.58586836" ]
0.7570099
0
Copy the csv contents of the lectures data into the table
def copy_csv_to_lectures_table(conn, csv_file): COPY_LECTURES = "10_copy_lectures_to_table.psql" copy_expert_psql_script(conn, COPY_LECTURES, csv_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_csv_to_train_table(conn, csv_file):\n COPY_TRAIN = \"08_copy_train_to_table.psql\"\n copy_expert_psql_script(conn, COPY_TRAIN, csv_file)", "def copy_csv_to_questions_table(conn, csv_file):\n COPY_QUESTIONS = \"09_copy_questions_to_table.psql\"\n copy_expert_psql_script(conn, COPY_QUESTIONS, csv_file)", "def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))", "def insert_data():\n table = create_new_table()\n filename = '/home/nineleaps/Downloads/covid_info_.csv'\n dataset_ref = client.dataset(table.dataset_id)\n table_ref = dataset_ref.table(table.table_id)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, table.dataset_id, table.table_id))", "def extractionTitleRatings(cur, conn):\n fh = open(pathTitleRatings)\n reader = csv.reader(fh, delimiter = '\\t')\n firstLine = True\n for row in reader:\n if firstLine : firstLine = False # Read header\n else :\n idTitulo = int(row[0][2:])\n valuacionMedia = float(row[1])\n nombreVoto = int(row[2])\n # print(clasificacionInsert.format(idTitulo, valuacionMedia, nombreVoto))\n # REGISTER DATA IN CLASIFICACION TABLE\n cur.execute(clasificacionInsert.format(idTitulo, valuacionMedia, nombreVoto))\n conn.commit()", "def copy_csv_to_example_test_table(conn, csv_file):\n COPY_EXAMPLE_TEST = \"11_copy_example_test_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)", "def read_csv():", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def copy_csv_to_example_sample_submission_table(conn, csv_file):\n COPY_EXAMPLE_SAMPLE_SUBMISSION = \"12_copy_example_sample_submission_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def copy_csv_files(data_files_dict):\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n for name, files in data_files_dict.items():\n csv_file = files[0]\n # skip the header; this info is already in the table schema\n next(csv_file)\n if name == \"train\":\n copy_csv_to_train_table(conn, csv_file)\n elif name == \"questions\":\n copy_csv_to_questions_table(conn, csv_file)\n elif name == \"lectures\":\n copy_csv_to_lectures_table(conn, csv_file)\n elif name == \"example_test\":\n copy_csv_to_example_test_table(conn, csv_file)\n elif name == \"example_sample_submission\":\n copy_csv_to_example_sample_submission_table(conn, csv_file)\n\n print(f\"\"\"Successfully loaded CSV file into `{name}` table\n \"\"\")\n\n conn.close()", "def insert_csv(self, file, tablename, sep=','):\n filehandel = open(file, 'r')\n self.cursor.copy_from(filehandel, tablename, sep)\n self.connection.commit()", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def load_subjects_to_db():\n try:\n with open(configuration.get_file_location(\"materias.csv\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\";\")\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n logging.info(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n subject = MateriaClass.Materia(row[1], row[0])\n logging.info(subject.print())\n sql = connectSQLite.save_subject(subject)\n for row in sql.fetchall():\n logging.info(row)\n sql = connectSQLite.get_db().close()\n except Exception as error:\n logging.info(\"FALSE, exception ocurred\")\n print(error)\n # line_count += 1\n # print(f'Processed {line_count} lines.')", "def write_table_to_file(table):\n with open(\"story.csv\", \"w\") as file:\n for record in table:\n row = ';'.join(record)\n file.write(row + \"\\n\")", "def update_subjects_to_db():\n with open(configuration.get_file_location(\"materias.csv\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\";\")\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n logging.info(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n subject = MateriaClass.Materia(row[1], row[0])\n logging.info(subject.print())\n connectSQLite.update_subject(subject)\n # line_count += 1\n # print(f'Processed {line_count} lines.')", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def csv(self, section=\"main\", column_headers=True):\n table_end = \"\\r\\n\\r\\n\\r\\n\"\n table_head_pos = self.header_dictionary[section]\n table_end_pos = self.raw_data[table_head_pos:].find(table_end)\n if table_end_pos == -1:\n table_end_pos = len(self.raw_data)\n else:\n table_end_pos += table_head_pos\n if column_headers:\n a = \",\".join(self.columns()) + \"\\n\"\n else:\n a = \"\"\n return a + self.raw_data[table_head_pos:table_end_pos].replace(\"\\t\",\",\").replace(\"\\r\",\"\")", "def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df", "def get_table_from_file():\n with open(\"story.csv\", \"r\") as file:\n lines = file.readlines()\n table = [element.replace(\"\\n\", \"\").split(\";\") for element in lines]\n return table", "def etl_operations():\n tap = SQLTaps(db_type='mysql',\n username='root',\n password='',\n host='localhost',\n db_name='ETLtestDb')\n\n conn = tap.get_connection()\n\n query = 'SELECT id, filename, student_xml FROM StudentsData'\n\n rows = tap.get_rows(conn, query)\n\n rows_json = tap.covert_ResultProxy_to_JSON(rows)\n\n result_list = rows_json.get('result')\n converter = Convert()\n\n csv_row_list = list()\n\n headers = list()\n\n for row in result_list:\n xml_content = base64.b64decode(row.get('student_xml').encode())\n csv_content = converter.xml_to_csv(xml_content)\n headers = csv_content.get('columns')\n csv_row_list.append(csv_content.get('values'))\n\n csv_target('students.csv', csv_row_list, headers)", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def MaterializeData(self, output_path):\n output_file_name = os.path.join(output_path, self.file_name)\n\n if self.verbose:\n print 'Writing file: %s' % output_file_name\n\n csv_output_file = open(output_file_name, 'wb')\n csv_writer = csv.writer(csv_output_file)\n\n for row in self.table_data:\n csv_writer.writerow(row)\n\n csv_output_file.close()", "def loadCSV(input_file):", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def generate_csv_table(table_values):\n\n with open('ayasdi_assignment.csv', 'wb') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n filewriter.writerows(table_values)" ]
[ "0.64943576", "0.6417047", "0.62275773", "0.61845875", "0.6169708", "0.6109317", "0.6084161", "0.6071365", "0.6028989", "0.5982964", "0.5907622", "0.5895916", "0.5880129", "0.58722913", "0.58722913", "0.58679646", "0.5858774", "0.5850394", "0.5835062", "0.5832906", "0.58297986", "0.57919616", "0.5780959", "0.577929", "0.5760103", "0.5754795", "0.57267165", "0.5722471", "0.5708459", "0.5699094" ]
0.7012755
0
Copy the csv contents of the example test data into the table
def copy_csv_to_example_test_table(conn, csv_file): COPY_EXAMPLE_TEST = "11_copy_example_test_to_table.psql" copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_data():\n table = create_new_table()\n filename = '/home/nineleaps/Downloads/covid_info_.csv'\n dataset_ref = client.dataset(table.dataset_id)\n table_ref = dataset_ref.table(table.table_id)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, table.dataset_id, table.table_id))", "def copy_csv_to_train_table(conn, csv_file):\n COPY_TRAIN = \"08_copy_train_to_table.psql\"\n copy_expert_psql_script(conn, COPY_TRAIN, csv_file)", "def copy_csv_to_example_sample_submission_table(conn, csv_file):\n COPY_EXAMPLE_SAMPLE_SUBMISSION = \"12_copy_example_sample_submission_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def import_test():\n if os.path.exists(\"test.csv\"):\n #print (\"--testing data imported to data frame\\n\")\n test_df = pd.read_csv(\"test.csv\", index_col=0)\n else:\n print(\"training CSV not found\")\n exit()\n \n return test_df", "def copy_csv_to_questions_table(conn, csv_file):\n COPY_QUESTIONS = \"09_copy_questions_to_table.psql\"\n copy_expert_psql_script(conn, COPY_QUESTIONS, csv_file)", "def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')", "def write_test_data(sql):\n for fname in sorted(glob.glob(\"mock_data/*.csv\")):\n print(fname)\n with open(fname, 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\", quoting=csv.QUOTE_MINIMAL)\n i = 0\n for row in reader:\n if i == 0:\n if row != '' and ''.join(row) != '':\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + commajoin(row, [], 0) + \" VALUES\\n\")\n else:\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + \" VALUES\\n\")\n i += 1\n continue\n if row == '' or ''.join(row) == '':\n continue\n if i > 1:\n sql.write(\",\\n\")\n sql.write(commajoin(row, list(range(len(row))), 4))\n i += 1\n sql.write(\";\\n\\n\")", "def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])", "def test_csvfile(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n connection = apsw.Connection(\":memory:\")\n cursor = connection.cursor()\n connection.createmodule(\"csvfile\", VTModule(CSVFile))\n cursor.execute(\n f\"\"\"CREATE VIRTUAL TABLE test USING csvfile('{serialize('test.csv')}')\"\"\",\n )\n\n sql = 'SELECT * FROM test WHERE \"index\" > 11'\n data = list(cursor.execute(sql))\n assert data == [(12.0, 13.3, \"Platinum_St\"), (13.0, 12.1, \"Kodiak_Trail\")]\n\n sql = \"\"\"INSERT INTO test (\"index\", temperature, site) VALUES (14, 10.1, 'New_Site')\"\"\"\n cursor.execute(sql)\n sql = 'SELECT * FROM test WHERE \"index\" > 11'\n data = list(cursor.execute(sql))\n assert data == [\n (12.0, 13.3, \"Platinum_St\"),\n (13.0, 12.1, \"Kodiak_Trail\"),\n (14.0, 10.1, \"New_Site\"),\n ]\n\n sql = \"DELETE FROM test WHERE site = 'Kodiak_Trail'\"\n cursor.execute(sql)\n sql = 'SELECT * FROM test WHERE \"index\" > 11'\n data = list(cursor.execute(sql))\n assert data == [\n (12.0, 13.3, \"Platinum_St\"),\n (14.0, 10.1, \"New_Site\"),\n ]\n\n connection.close()\n\n # test garbage collection\n with open(\"test.csv\", encoding=\"utf-8\") as fp:\n updated_contents = fp.read()\n assert (\n updated_contents\n == \"\"\"\"index\",\"temperature\",\"site\"\n10.0,15.2,\"Diamond_St\"\n11.0,13.1,\"Blacktail_Loop\"\n12.0,13.3,\"Platinum_St\"\n14.0,10.1,\"New_Site\"\n\"\"\"\n )", "def setup(self):\n self.rows = test_helpers.fetch_sample_teradata_rows()\n self.csv_path = 'not/a/real/path'", "def test_from_file_csv(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.csv')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def prepare_data_test(fname):\n # Read data\n data = pd.read_csv(fname)\n return data", "def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()", "def test_dataframe(test_csv):\n\tif not os.path.isfile(test_csv):\n\t\tlogger.error(\"path {} does not exit\".format(test_csv))\n\t\tsys.exit(\"---test csv file--- does not exits\")\n\n\ttest_df = pd.read_csv(test_csv)\n\ttest_df['Case ID'] = test_df['Case ID'].apply(lambda x:\n\t\t\t\t\t\t\t\t\t\t\t\t\t str(x)+'.png')\n\n\ttest_df['Gender'] = test_df['Sex'].apply(lambda x:\n\t\t\t\t\t\t\t\t\t\t\t\t0 if x=='M' else 1)\n\t\n\ttest_df.rename(columns={'Ground truth bone age (months)':\n\t\t\t\t\t\t\t\t 'Age(months)'}, inplace=True)\n\n\treturn test_df", "def test_write(test_filtered_data):\r\n valuation_service.write_data(test_filtered_data,directory='Test_files/test_top_products.csv')\r\n\r\n test_data_r = pd.read_csv('Test_files/test_top_products.csv')\r\n\r\n assert_frame_equal(test_data_r,test_filtered_data)", "def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))", "def copy_csv_files(data_files_dict):\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n for name, files in data_files_dict.items():\n csv_file = files[0]\n # skip the header; this info is already in the table schema\n next(csv_file)\n if name == \"train\":\n copy_csv_to_train_table(conn, csv_file)\n elif name == \"questions\":\n copy_csv_to_questions_table(conn, csv_file)\n elif name == \"lectures\":\n copy_csv_to_lectures_table(conn, csv_file)\n elif name == \"example_test\":\n copy_csv_to_example_test_table(conn, csv_file)\n elif name == \"example_sample_submission\":\n copy_csv_to_example_sample_submission_table(conn, csv_file)\n\n print(f\"\"\"Successfully loaded CSV file into `{name}` table\n \"\"\")\n\n conn.close()", "def read_csv_as_table(csv_input_file_name, skip_first_line=False):\n output = []\n with open(csv_input_file_name, 'r') as fin:\n csv_content = csv.reader(fin, delimiter=',')\n if skip_first_line:\n next(csv_content, None)\n for row in csv_content:\n output.append(row)\n return output", "def copy_csv_to_lectures_table(conn, csv_file):\n COPY_LECTURES = \"10_copy_lectures_to_table.psql\"\n copy_expert_psql_script(conn, COPY_LECTURES, csv_file)", "def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def testExampleCSVGeneration(ref):\n df = generate_dataframe()\n outpath = os.path.join(ref.tmp_dir, 'csv_result.csv')\n df.to_csv(outpath, index=False)\n columns = ref.all_fields_except(['random'])\n ref.assertCSVFileCorrect(outpath, 'dataframe_result.csv',\n check_data=columns, check_types=columns)", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]", "def autogen_dataset_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n test_path='tests/data/dummy_tabular_test/test.csv',\n seed=42,\n sep=',')", "def test_source_copy_round_trip(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testcopyto (\n a int,\n b text,\n c float,\n d uuid,\n PRIMARY KEY (a, b)\n )\"\"\")\n\n insert_statement = self.session.prepare(\"INSERT INTO testcopyto (a, b, c, d) VALUES (?, ?, ?, ?)\")\n args = [(i, str(i), float(i) + 0.5, uuid4()) for i in range(1000)]\n execute_concurrent_with_args(self.session, insert_statement, args)\n\n results = list(self.session.execute(\"SELECT * FROM testcopyto\"))\n\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))\n\n commandfile = self.get_temp_file()\n with open(commandfile.name, 'w') as f:\n f.write('USE ks;\\n')\n f.write(\"COPY ks.testcopyto TO '{name}' WITH HEADER=false;\".format(name=tempfile.name))\n\n self.run_cqlsh(cmds=\"SOURCE '{name}'\".format(name=commandfile.name))\n\n # import the CSV file with COPY FROM\n self.session.execute(\"TRUNCATE ks.testcopyto\")\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n\n commandfile = self.get_temp_file()\n with open(commandfile.name, 'w') as f:\n f.write('USE ks;\\n')\n f.write(\"COPY ks.testcopyto FROM '{name}' WITH HEADER=false;\".format(name=tempfile.name))\n\n self.run_cqlsh(cmds=\"SOURCE '{name}'\".format(name=commandfile.name))\n new_results = list(self.session.execute(\"SELECT * FROM testcopyto\"))\n assert sorted(results) == sorted(new_results)" ]
[ "0.6870561", "0.68621397", "0.68282795", "0.6661536", "0.64871234", "0.63820755", "0.6304575", "0.623684", "0.6180814", "0.61601466", "0.6142755", "0.6124967", "0.6119393", "0.60797185", "0.60796", "0.6040634", "0.6039739", "0.6036316", "0.6031299", "0.60189676", "0.5994174", "0.5987539", "0.5981039", "0.5963775", "0.5963775", "0.5952859", "0.5951919", "0.5941521", "0.59362257", "0.59280086" ]
0.7520836
0
Copy the csv contents of the census tract to example sample submission data into the table
def copy_csv_to_example_sample_submission_table(conn, csv_file): COPY_EXAMPLE_SAMPLE_SUBMISSION = "12_copy_example_sample_submission_to_table.psql" copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_csv_to_train_table(conn, csv_file):\n COPY_TRAIN = \"08_copy_train_to_table.psql\"\n copy_expert_psql_script(conn, COPY_TRAIN, csv_file)", "def copy_csv_to_example_test_table(conn, csv_file):\n COPY_EXAMPLE_TEST = \"11_copy_example_test_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)", "def insert_data():\n table = create_new_table()\n filename = '/home/nineleaps/Downloads/covid_info_.csv'\n dataset_ref = client.dataset(table.dataset_id)\n table_ref = dataset_ref.table(table.table_id)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, table.dataset_id, table.table_id))", "def copy_csv_to_questions_table(conn, csv_file):\n COPY_QUESTIONS = \"09_copy_questions_to_table.psql\"\n copy_expert_psql_script(conn, COPY_QUESTIONS, csv_file)", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def copy_csv_to_lectures_table(conn, csv_file):\n COPY_LECTURES = \"10_copy_lectures_to_table.psql\"\n copy_expert_psql_script(conn, COPY_LECTURES, csv_file)", "def submission(all_preds, ORIGINAL, OUTPUT, VER, WRMSSEscore):\n submission = pd.read_csv(ORIGINAL+'sample_submission.csv')[['id']]\n submission = submission.merge(all_preds, on=['id'], how='left').fillna(0)\n submission.to_csv(OUTPUT + 'submission_v'+str(VER) + \"_\" + str(round(WRMSSEscore, 3)) + '.csv', index=False)", "def main():\n\n preprocessed_file = preprocess_clinical_trials()\n\n preprocessed_file.to_csv(PREPROCESSED_CLINICAL_TRIALS_FILE_PATH, index=False)", "def import_test():\n if os.path.exists(\"test.csv\"):\n #print (\"--testing data imported to data frame\\n\")\n test_df = pd.read_csv(\"test.csv\", index_col=0)\n else:\n print(\"training CSV not found\")\n exit()\n \n return test_df", "def create_example_sample_submission_table(conn):\n execute_sql_script(conn, \"07_create_example_sample_submission_table.sql\")", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def process_file(cur, conn, table, filepath):\n\n taxi_table_insert = (\"\"\"\n INSERT INTO {} (trip_id, taxi_id, trip_sec, trip_mile)\n VALUES (%s, %s, %s, %s);\n \"\"\".format(table))\n\n # open csv file\n # https://stackoverflow.com/questions/17444679/reading-a-huge-csv-file\n df = pd.read_csv(filepath)\n\n df = df[['Trip ID', 'Taxi ID', 'Trip Seconds', 'Trip Miles']]\n\n df.dropna(inplace=True)\n\n # insert trip records\n for index, row in df.iterrows():\n cur.execute(taxi_table_insert, row)\n conn.commit()", "def doSubmission(data, subfile='datasets/submission.csv'):\n import csv\n data.to_csv(subfile,\n columns=['TRIP_ID', 'LATITUDE', 'LONGITUDE'], index=None,\n quoting=csv.QUOTE_NONNUMERIC)", "def upload_csv_data(self, upload_file):\n db = DataBase(self.DATABASE_DATA)\n db.insert_data_from_file(\n 'triagedata.historicdata',\n ('clinic_id', 'severity', 'date_received', 'date_seen'),\n upload_file,\n ','\n )", "def test_part1_code():\n\n # Simple test for reader\n test_table = read_csv_file(\"test_case.csv\") # create a small CSV for this test\n print_table(test_table)\n print()\n\n # Test the writer\n cancer_risk_table = read_csv_file(\"cancer_risk05_v4_county.csv\")\n write_csv_file(cancer_risk_table, \"cancer_risk05_v4_county_copy.csv\")\n cancer_risk_copy = read_csv_file(\"cancer_risk05_v4_county_copy.csv\")\n\n # Test whether two tables are the same\n for row in range(len(cancer_risk_table)):\n for col in range(len(cancer_risk_table[0])):\n if cancer_risk_table[row][col] != cancer_risk_copy[row][col]:\n print(\"Difference at\", row, col, cancer_risk_table[row][col], cancer_risk_copy[row][col])", "def files_to_submissions(self):\n url = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-latest-data-source-details.csv\"\n self.parse_file(url)", "def main():\n df_path = './DuReader_reformatted/DuReader_for_dbCombinedPara500-150-sample10000.csv'\n df = pd.read_csv(df_path, sep='\\t', index_col=0).dropna() # drop 2 nan question and 8 nan title\n epoch = 5 # about 6 hours\n total_time_list, back_end_time_list = get_time_avg(df['question'].tolist(), epoch)\n df['time_avg'] = total_time_list\n df['backend_time'] = back_end_time_list\n new_df_path = os.path.splitext(df_path)[0] + '-whole-epoch-' + str(epoch) + '.csv'\n df.to_csv(new_df_path, sep='\\t')\n print('file successfully saved to ', new_df_path)", "def prepare_participants_for_metadata_export(path_to_samples_info, tsca_id): \n raw = pd.read_table(path_to_samples_info)\n print( \"%d Participants in this batch\" % raw['individual_id'].unique().shape[0] )\n # Data to upload\n data = pd.DataFrame(raw.individual_id.drop_duplicates()).rename(columns={'individual_id':'entity:participant_id'})\n return data", "def import_experiments_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)", "def create_submission(pred_sub, name_of_the_file='submission'):\n\n df_sub = pd.DataFrame(pred_sub, columns=['Prediction'])\n df_sub.index.name = 'Id'\n df_sub.index = np.arange(1, 10001)\n df_sub[df_sub['Prediction'] == 0] = -1\n df_sub.to_csv(name_of_the_file + '.csv',index_label='Id')\n\n print('submission file created as \"'+ name_of_the_file+'.csv\"')", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def insert_csv(self, file, tablename, sep=','):\n filehandel = open(file, 'r')\n self.cursor.copy_from(filehandel, tablename, sep)\n self.connection.commit()", "def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()", "def test_source_copy_round_trip(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testcopyto (\n a int,\n b text,\n c float,\n d uuid,\n PRIMARY KEY (a, b)\n )\"\"\")\n\n insert_statement = self.session.prepare(\"INSERT INTO testcopyto (a, b, c, d) VALUES (?, ?, ?, ?)\")\n args = [(i, str(i), float(i) + 0.5, uuid4()) for i in range(1000)]\n execute_concurrent_with_args(self.session, insert_statement, args)\n\n results = list(self.session.execute(\"SELECT * FROM testcopyto\"))\n\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))\n\n commandfile = self.get_temp_file()\n with open(commandfile.name, 'w') as f:\n f.write('USE ks;\\n')\n f.write(\"COPY ks.testcopyto TO '{name}' WITH HEADER=false;\".format(name=tempfile.name))\n\n self.run_cqlsh(cmds=\"SOURCE '{name}'\".format(name=commandfile.name))\n\n # import the CSV file with COPY FROM\n self.session.execute(\"TRUNCATE ks.testcopyto\")\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n\n commandfile = self.get_temp_file()\n with open(commandfile.name, 'w') as f:\n f.write('USE ks;\\n')\n f.write(\"COPY ks.testcopyto FROM '{name}' WITH HEADER=false;\".format(name=tempfile.name))\n\n self.run_cqlsh(cmds=\"SOURCE '{name}'\".format(name=commandfile.name))\n new_results = list(self.session.execute(\"SELECT * FROM testcopyto\"))\n assert sorted(results) == sorted(new_results)", "def test_semmeddb_csv_to_tsv():\n t = PandasTransformer()\n nodes_file = os.path.join(resource_dir, \"semmed/semmeddb_test_nodes.csv\")\n edges_file = os.path.join(resource_dir, \"semmed/semmeddb_test_edges.csv\")\n output = os.path.join(target_dir, \"semmeddb_test_tsv_export\")\n\n t.parse(nodes_file)\n t.parse(edges_file)\n\n # save output as TSV in a tar archive\n t.save(output, extension='tsv')", "def import_training_data(target_col = 'label'):\n dir = os.path.dirname(os.path.dirname(__file__)) # go up one level to get root of this experiment\n path = os.path.join(dir, 'data','train.csv')\n utils_logr.info('Loading data from {} as pandas df'.format(path))\n df = pd.read_csv(path)\n y = df[target_col]\n df = df.drop(target_col, axis=1)\n return df, y", "def populate_table_from_csv(csv_file, csv_encoding='iso-8859-15'):\n try:\n with open(file=csv_file, mode='r', encoding=csv_encoding) as input_file:\n # Could find a good place to add iterators/generators/comprehensions elsewhere, so made a new function\n # Also, yet another pylint false positive. The below line isn't supposed to be assigned to anything.\n [add_customer(*l.split(',')) for l in input_file if 'Id,Name,Last_name,' not in l] # pylint: disable=W0106\n except Exception as e:\n logger.error(\"Failed to load records from csv file %s into database %s: %s\", csv_file, customer_db.database, e)", "def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df", "def copy_csv_files(data_files_dict):\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n for name, files in data_files_dict.items():\n csv_file = files[0]\n # skip the header; this info is already in the table schema\n next(csv_file)\n if name == \"train\":\n copy_csv_to_train_table(conn, csv_file)\n elif name == \"questions\":\n copy_csv_to_questions_table(conn, csv_file)\n elif name == \"lectures\":\n copy_csv_to_lectures_table(conn, csv_file)\n elif name == \"example_test\":\n copy_csv_to_example_test_table(conn, csv_file)\n elif name == \"example_sample_submission\":\n copy_csv_to_example_sample_submission_table(conn, csv_file)\n\n print(f\"\"\"Successfully loaded CSV file into `{name}` table\n \"\"\")\n\n conn.close()", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')" ]
[ "0.68695116", "0.6782291", "0.6613778", "0.6355474", "0.61145604", "0.61135346", "0.59616476", "0.58550143", "0.5852218", "0.58010006", "0.5678556", "0.56541306", "0.564899", "0.5647298", "0.5637213", "0.5631169", "0.56293553", "0.5625885", "0.5548812", "0.5544546", "0.5515721", "0.5510245", "0.5486631", "0.54809743", "0.5462806", "0.545029", "0.542966", "0.5420117", "0.54194224", "0.541819" ]
0.7247699
0
Given a DB connection and a file path to a SQL script, open up the SQL script and execute it
def execute_sql_script(conn, script_filename): file_contents = open_sql_script(script_filename) cursor = conn.cursor() cursor.execute(file_contents) conn.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_script(file_name):\n conn = psycopg2.connect(config['SQLALCHEMY_DATABASE_URI'])\n cur = conn.cursor()\n sql_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)\n cur.execute(open(sql_file, 'r').read())\n conn.commit()\n cur.close()\n conn.close()", "def execute_script_from_file(self, filename):\n filename = os.path.join(self.curr_dir, filename)\n # Connect to db\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with open(filename, \"r\", encoding=\"utf-8\") as sql_file:\n sql_script = sql_file.read()\n\n # all SQL commands (split on ';')\n sql_commands = filter(None, sql_script.split(\";\"))\n # Execute every command from the input file\n for command in sql_commands:\n # This will skip and report errors\n # For example, if the tables do not yet exist, this will skip over\n # the DROP TABLE commands\n try:\n cursor.execute(command)\n except OperationalError as msg:\n print(\"Command skipped: \", msg)\n conn.commit()\n conn.close()", "def open_sql_script(script_filename):\n dir = os.path.dirname(__file__)\n relative_filename = os.path.join(dir, 'sql', script_filename)\n\n file_obj = open(relative_filename, 'r')\n file_contents = file_obj.read()\n file_obj.close()\n\n return file_contents", "def execute_sql_files(connection, sql_files):\n for filename in sql_files:\n statement = resource_text(filename)\n for sub_statement in statement.split(\";\"):\n if sub_statement.strip():\n connection.execute(text(sub_statement))", "def read_sql_from_file(self, filename):\n tmpLines = ''\n logger.info(\"Reading from {}\".format(filename))\n\n with open(filename, 'r') as fh:\n tmpLines = fh.readlines()\n \n sqlquery = \"\".join(tmpLines)\n cursor = self.conn.cursor()\n\n try:\n cursor.execute(sqlquery)\n except Exception as e:\n logger.info(e)\n sys.exit(1)\n return", "def sql_scripts_execute(self, sql_scripts, params={}):\n ps = self.parameter_handler(params)\n log.debug('Got parameters: %s', ps)\n cursor = self._get_cursor()\n for q in sql_scripts:\n with open(q, 'r') as s:\n sql_string_formatted = s.read().format(**ps)\n cursor.execute(sql.SQL(sql_string_formatted), ps)\n self.connection.commit()\n self.connection.close()", "def run_sql_file(self, sqlfile):\n try:\n queries = self.get_queries_from(sqlfile)\n queries_executed = 0\n for query in queries:\n if self._execute_query(query, values=None): # execute each query\n queries_executed += 1\n print(\"{} Executed queries from {}\".format(queries_executed, sqlfile))\n except pymysql.InternalError as error:\n print(error.args[1])", "def run_sql_from_file(conn, path, replace={}):\n with open(path, 'r') as f:\n query = [s.strip() + ';' for s in f.read().split(';')[:-1]]\n for s in query:\n for k, v in replace.items():\n s = s.replace(k, v)\n run_sql_from_string(conn, s)", "def open (self, sql_file):\n fd = open(sql_file, 'r')\n sql = fd.read()\n fd.close()\n self.sql = sql.replace(UTF_8_STR, \"\")", "def DBExecuteScript( DB: sqlite3.Connection, sql:str, *args ):\n assert isinstance( DB, sqlite3.Connection )\n DB.executescript( sql )\n DB.commit()", "def run_sql_file(filename, connection, version, lastversion):\n cursor = connection.cursor()\n for line in open(filename):\n cursor.execute(line)\n connection.commit()\n cursor.execute(\n \"update ecs.versionTable SET ecs.versionTable.version='{}' \"\n \"where ecs.versionTable.version ='{}';\".format(version, lastversion))\n connection.commit()\n print(\"VersionTable updated. Current version is now: {}\".format(version))", "def copy_expert_psql_script(conn, script_filename, csv_file):\n file_contents = open_sql_script(script_filename)\n cursor = conn.cursor()\n cursor.copy_expert(file_contents, csv_file)\n conn.commit()", "def execute_queries_from_file(self, file_name, file_path=test_data_path):\n if file_path:\n with open(file_path + file_name, 'rb') as file:\n query = sqlalchemy.sql.text(file)\n else:\n with open(file_name, 'rb') as file:\n query = sqlalchemy.sql.text(file)\n self.execute_query(query)\n return self", "def main():\n args = sys.argv[1:]\n if len(args) != 2:\n print('Usage: sqlite.py db SQL', file=sys.stderr)\n return\n\n db, sql = args\n\n print('Opening {}'.format(args[0]))\n conn = connect(db)\n\n for q in sql.split(';'):\n print('Executing {}'.format(q))\n with conn:\n conn.execute(q)", "def execute_sql(sql_stmt, host_in='client'):\n #db = create_engine(host_in,'')\n #sql = sqltext(sql_stmt) \n #return db.execute(sql)\n with open('temp.sql','w') as sql:\n sql.write(sql_stmt)\n\n proc=sp.Popen(\"mysql < temp.sql\",stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out,err = proc.communicate()\n sp.Popen(\"rm temp.sql\",stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n return out.strip(),err.strip()", "def _open_sql_file(dbname):\n try:\n dbpath = pathlib.Path(dbname).resolve()\n conn = sqlite3.connect(f\"{dbpath.as_uri()}?mode=ro\", timeout=1, uri=True)\n c = conn.cursor()\n except sqlite3.Error as e:\n sys.exit(f\"An error occurred opening sqlite file: {e.args[0]} {dbname}\")\n return (conn, c)", "def read_sql_from_file(path, conn):\n with open(path, 'r', encoding='utf-8') as f:\n qu = f.read()\n \n df = read_sql(qu, conn)\n \n return df", "def sqlfile(path, **kw):\n sql = path.read_text()\n return sql.format(**kw)", "def run_command(settings_filename, query_filename):\n conn = connect_to_db(settings_filename)\n cursor = conn.cursor()\n\n query = load_query(query_filename)\n cursor.execute(query)\n conn.commit()\n \n cursor.close()\n conn.close()\n\n return True", "def submit_job_snowflake(self, sql_file_path):\n try:\n self.get_cursor()\n sql_file_path = str(sql_file_path).strip()\n self.snowflake_query_exec(self.cur, self.conn.schema, sql_file_path)\n except:\n self.cur.close()\n raise Exception(\"Snowflake step Failed, Job failed\")\n finally:\n self.cur.close()", "def run_setup_script(self, script_path):\n try:\n f = open(script_path, 'r')\n setup_script = f.read()\n # print(setup_script)\n c = self.conn.cursor()\n c.executescript(setup_script)\n except (Error, IOError) as e:\n print('[Datanase] Error:')\n print(e)", "def get_sql_from_file(self, sql_path):\n with open(sql_path, 'r') as f:\n sql = \"\"\n for line in f.readlines():\n line = line.strip()\n if not line.startswith(\"--\"):\n sql += \" \" + line\n return sql", "def execute_sql(db_name, sql):\n db_path = 'db/' + db_name + '.db'\n db_file = Path(db_path)\n if not db_file.is_file():\n log(\"execute_sql: Calling %s, but doesn't exist\", db_path, 'warning')\n create_db(db_name)\n db_con = sqlite3.connect(db_path)\n c = db_con.cursor()\n c.execute(sql)\n # output = c.fetchone()\n output = c.fetchall()\n logging.getLogger('sql').info(output)\n db_con.commit()\n db_con.close()\n return output", "def connect_sql(path):\n\n global connection, cursor\n\n connection = sqlite3.connect(path)\n cursor = connection.cursor()\n connection.commit()", "def execute_script(self, script, nolog=True, close=True):\n self._check_connection()\n if not nolog: # pragma: no cover\n lines = script.split(\"\\n\")\n if len(lines) > 20:\n self.LOG(\"SQL start + \",\n \"\\n\".join([repr(x) for x in lines[:20]]))\n else:\n self.LOG(\"SQL start + \",\n \"\\n\".join([repr(x) for x in lines]))\n cur = self._connection.cursor()\n res = cur.executescript(script)\n if close:\n cur.close()\n if not nolog:\n self.LOG(\"SQL end\") # pragma: no cover\n else:\n return res", "def run_sql_from_string(conn, statement):\n statement = sqlalchemy.text(statement)\n conn.execute(statement)", "def parse_sql(filename=gconf.DB_builder.SQL_CONSTRUCTION):\n data = open(filename, 'r').readlines()\n statements = []\n delimiter = ';'\n stmt = ''\n\n for line_no, line in enumerate(data):\n if not line.strip():\n continue\n\n if line.startswith('--'):\n continue\n\n if 'delimiter' in line:\n delimiter = line.split()[1]\n continue\n\n if delimiter not in line:\n stmt += line.replace(delimiter, ';')\n continue\n\n if stmt:\n stmt += line\n statements.append(stmt.strip())\n stmt = ''\n else:\n statements.append(line.strip())\n return statements", "def get_sql_from_file(file_name=None):\n # File does not exist\n if path.isfile(file_name) is False:\n logger.error(\"File load error: {}\".format(file_name))\n return None\n\n with open(file_name, \"r\") as sql_file:\n result = sql_file.read().split(';')\n result.pop() #Drop the last entry\n for idx, statement in enumerate(result):\n result[idx] = statement + \";\"\n return result", "def execute_and_commit_sql(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()", "def crtdb(dbfilepath, crtdb_sql_path):\n\t\n\tcrtdb_sql = \"\"\n\twith open(crtdb_sql_path) as f:\n\t\tcrtdb_sql = f.read()\n\n\tconn = sqlite3.connect(dbfilepath)\n\tcurs = conn.cursor()\n\tcurs.executescript(crtdb_sql)\n\tconn.commit()" ]
[ "0.80921817", "0.7998528", "0.770649", "0.726337", "0.7130588", "0.7008225", "0.6983888", "0.6938017", "0.682747", "0.6824145", "0.6633691", "0.6620906", "0.66191906", "0.66088134", "0.6569327", "0.64927316", "0.6413945", "0.6385292", "0.6356701", "0.62582904", "0.6225003", "0.62132", "0.6177962", "0.61077696", "0.6068319", "0.60507274", "0.6040837", "0.603034", "0.60175705", "0.5951568" ]
0.85166866
0
Given a file path, open the file and return its contents We assume that the file path is always inside the sql directory
def open_sql_script(script_filename): dir = os.path.dirname(__file__) relative_filename = os.path.join(dir, 'sql', script_filename) file_obj = open(relative_filename, 'r') file_contents = file_obj.read() file_obj.close() return file_contents
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_contents(self):\n with open(self.sql_file, 'r') as sql:\n text = sql.read()\n # text = text.replace('\\n', '\\n\\n')\n # text=sql.read()\n # TODO: fix some text replacement issues here\n # https://github.com/andialbrecht/sqlparse/issues/313\n return self.filter_text(text)", "def open_and_read_file(file_path):\n contents = open(file_path).read()\n # your code goes here\n\n return contents", "def open_and_read_file(file_path):\n\n # your code goes here\n return open(file_path).read()", "def sqlfile(path, **kw):\n sql = path.read_text()\n return sql.format(**kw)", "def read_sql_from_file(path, conn):\n with open(path, 'r', encoding='utf-8') as f:\n qu = f.read()\n \n df = read_sql(qu, conn)\n \n return df", "def readFile(self, path):\n return self.session.request('diag/files/?q=%s'\n % (path))", "def get_sql_string_from_file(path: str) -> str:\n\n with open(path, 'r') as r:\n sql = r.read()\n return sql.replace('\\n', '').replace('\\t', '')", "def open_and_read_file(file_path):\n\n # your code goes here\n with open(file_path) as open_file:\n open_file = open_file.read()\n return open_file", "def loadSQL_beddays(filepath='O:\\Administration\\\\02 - Økonomi og PDK\\Medarbejdermapper\\Kasper\\Focus1 - Ad hoc opgaver\\Lungemed sengedage og visitationer\\Lungemed.sql'):\n content = open(filepath, 'r').read()\n return content", "def open_and_read_file(file_path):\n\n # your code goes here\n file_ = open(file_path).read()\n\n return file_", "def open_and_read_file(file_path):\n\n # Read the file, return text as a string titled \"contents\"\n contents = open(file_path).read()\n\n # Return contents of your file as one long string\n return contents", "def get_sql_path(file_path: str) -> str:\n dag_dir = configuration.get('core', 'dags_folder')\n return os.path.join(dag_dir, file_path)", "def open_and_read_file(file_path):\n\n file = open(file_path)\n file = file.read()\n\n return file", "def open_and_read_file(file_path):\n\n # your code goes here\n file_name = (open(file_path)).read()\n return file_name", "def read_file(path: str):\n try:\n return pd.read_csv(f\"{Config.DATA_RAW_DIRECTORY}/{path}\")\n except OSError as ex:\n if Config.VERBOSE:\n print(ex)\n return None", "def open_and_read_file(file_path):\n\n # your code goes here\n\n f = open(file_path, \"r\")\n text = f.read()\n\n return text", "def get_content_from_file(path):\n\n\t\tPathUtil.ensure_path_exists(path)\n\t\twith open(path) as file:\n\t\t\tfile_content = file.read()\n\t\treturn file_content", "def get_sql_from_file(self, sql_path):\n with open(sql_path, 'r') as f:\n sql = \"\"\n for line in f.readlines():\n line = line.strip()\n if not line.startswith(\"--\"):\n sql += \" \" + line\n return sql", "def get_file_contents(path):\n try:\n with open(path) as f:\n return f.read()\n except IOError:\n return None", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def loadSQL_visitations(filepath='O:\\Administration\\\\02 - Økonomi og PDK\\Medarbejdermapper\\Kasper\\Focus1 - Ad hoc opgaver\\Lungemed sengedage og visitationer\\Lungemed_visitationsoprindelse_nogroup.sql'):\n content = open(filepath, 'r').read()\n return content", "def read(path):", "def open_and_read_file(file_path):\n\n # your code goes here\n text_file = open(file_path)\n text_string= text_file.read()\n text_file.close()\n return text_string", "def _open_sql_file(dbname):\n try:\n dbpath = pathlib.Path(dbname).resolve()\n conn = sqlite3.connect(f\"{dbpath.as_uri()}?mode=ro\", timeout=1, uri=True)\n c = conn.cursor()\n except sqlite3.Error as e:\n sys.exit(f\"An error occurred opening sqlite file: {e.args[0]} {dbname}\")\n return (conn, c)", "def get_data_query(file_name):\n with open(file_name, 'r') as graphql_query:\n return graphql_query.read()", "def _findfile(self, path):\n return DataSource._findfile(self, self._fullpath(path))", "def _file_load(path):\n _, _, file_path = path.split('/', 2)\n with open(file_path, 'r', encoding='utf-8') as file_handle:\n return file_handle.read()", "def open_file(path):\n input_file = os.path.join(path)\n with open(input_file) as f:\n dataset = f.read()\n return dataset", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()" ]
[ "0.6938102", "0.68973416", "0.6869283", "0.68430454", "0.68159455", "0.67649686", "0.6757924", "0.67353684", "0.6700045", "0.6696577", "0.66157943", "0.65953904", "0.65915734", "0.6547246", "0.65232974", "0.6515384", "0.6509697", "0.64971465", "0.64961016", "0.64459", "0.6377716", "0.63741344", "0.63736117", "0.6370218", "0.6349239", "0.63266134", "0.6323127", "0.63122255", "0.6311912", "0.63082874" ]
0.73147106
0
Given a DB connection and a file path to a PSQL script, open up the PSQL script and use it to run copy_expert
def copy_expert_psql_script(conn, script_filename, csv_file): file_contents = open_sql_script(script_filename) cursor = conn.cursor() cursor.copy_expert(file_contents, csv_file) conn.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_sql_script(conn, script_filename):\n file_contents = open_sql_script(script_filename)\n cursor = conn.cursor()\n cursor.execute(file_contents)\n conn.commit()", "def open_sql_script(script_filename):\n dir = os.path.dirname(__file__)\n relative_filename = os.path.join(dir, 'sql', script_filename)\n\n file_obj = open(relative_filename, 'r')\n file_contents = file_obj.read()\n file_obj.close()\n\n return file_contents", "def execute_script(file_name):\n conn = psycopg2.connect(config['SQLALCHEMY_DATABASE_URI'])\n cur = conn.cursor()\n sql_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)\n cur.execute(open(sql_file, 'r').read())\n conn.commit()\n cur.close()\n conn.close()", "def execute_script_from_file(self, filename):\n filename = os.path.join(self.curr_dir, filename)\n # Connect to db\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with open(filename, \"r\", encoding=\"utf-8\") as sql_file:\n sql_script = sql_file.read()\n\n # all SQL commands (split on ';')\n sql_commands = filter(None, sql_script.split(\";\"))\n # Execute every command from the input file\n for command in sql_commands:\n # This will skip and report errors\n # For example, if the tables do not yet exist, this will skip over\n # the DROP TABLE commands\n try:\n cursor.execute(command)\n except OperationalError as msg:\n print(\"Command skipped: \", msg)\n conn.commit()\n conn.close()", "def __call__(self, dbio, *args, **kwargs):\n sql, f = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(\"'copy_expert' will run\\n{}\".format(sql))\n cur = dbio.conn.cursor()\n cur.copy_expert(sql, f)\n cur.close()\n dbio.conn.commit()\n f.close()\n else:\n logger.info(\"'copy_expert' will run\\n{}\".format(sql))\n f.close()", "def copy_csv_to_example_test_table(conn, csv_file):\n COPY_EXAMPLE_TEST = \"11_copy_example_test_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)", "def _open_sql_file(dbname):\n try:\n dbpath = pathlib.Path(dbname).resolve()\n conn = sqlite3.connect(f\"{dbpath.as_uri()}?mode=ro\", timeout=1, uri=True)\n c = conn.cursor()\n except sqlite3.Error as e:\n sys.exit(f\"An error occurred opening sqlite file: {e.args[0]} {dbname}\")\n return (conn, c)", "def psql(self, dbname, query=None, filename=None, username=None):\n psql = self.get_bin_path(\"psql\")\n psql_params = [\n psql, \"-XAtq\", \"-h{}\".format(self.host), \"-p {}\".format(self.port), dbname\n ]\n\n if query:\n psql_params.extend((\"-c\", query))\n elif filename:\n psql_params.extend((\"-f\", filename))\n else:\n raise QueryException('Query or filename must be provided')\n\n # Specify user if needed\n if username:\n psql_params.extend((\"-U\", username))\n\n # start psql process\n process = subprocess.Popen(\n psql_params,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n # wait untill it finishes and get stdout and stderr\n out, err = process.communicate()\n return process.returncode, out, err", "def copy_db():\n local('ssh %s pg_dump -U djangoproject -c djangoproject | psql djangoproject' % env.hosts[0])", "def copy_into_postgres(df, conn, fname):\n print(f\"`{fname}`: Loading data...\")\n\n null_cols = [\n \"col_a\", \"col_b\", \"col_c\", \"col_d\", \"col_e\", \"col_f\", \"col_g\", \"col_h\", \"col_i\", \"col_j\", \"col_k\", \"col_l\", \"col_m\", \n \"col_n\", \"col_o\", \"col_p\", \"col_q\", \"col_r\", \"col_s\", \"col_t\", \"col_u\", \"col_v\", \"col_w\", \"col_x\", \"col_y\", \"col_z\",\n ]\n parsed_cols = [\"file_index\", \"file_name\"] + null_cols[:df.shape[1]]\n insert_cols = \", \".join(parsed_cols)\n\n df.insert(loc=0, column=\"file_name\", value=fname)\n\n with conn.cursor() as curs:\n with io.StringIO() as csv_buffer:\n df.to_csv(csv_buffer, sep=\",\", header=False, index=True)\n csv_buffer.seek(0)\n curs.copy_expert(f\"COPY extract_loader_landing_zone ({insert_cols}) FROM STDIN (FORMAT csv, DELIMITER ',', HEADER FALSE);\", file=csv_buffer)\n conn.commit()\n\n print(f\"`{fname}`: Loaded data!\")\n\n return None", "def execute_sql(sql_stmt, host_in='client'):\n #db = create_engine(host_in,'')\n #sql = sqltext(sql_stmt) \n #return db.execute(sql)\n with open('temp.sql','w') as sql:\n sql.write(sql_stmt)\n\n proc=sp.Popen(\"mysql < temp.sql\",stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out,err = proc.communicate()\n sp.Popen(\"rm temp.sql\",stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n return out.strip(),err.strip()", "def open (self, sql_file):\n fd = open(sql_file, 'r')\n sql = fd.read()\n fd.close()\n self.sql = sql.replace(UTF_8_STR, \"\")", "def read_sql_from_file(self, filename):\n tmpLines = ''\n logger.info(\"Reading from {}\".format(filename))\n\n with open(filename, 'r') as fh:\n tmpLines = fh.readlines()\n \n sqlquery = \"\".join(tmpLines)\n cursor = self.conn.cursor()\n\n try:\n cursor.execute(sqlquery)\n except Exception as e:\n logger.info(e)\n sys.exit(1)\n return", "def structure_and_repopulate_db() -> None:\n with open('db.sql', encoding=\"utf-8\") as f:\n commands = f.read().strip().split(';')\n commands = [command.strip() for command in commands]\n for command in commands:\n my_cursor.execute(command)\n my_db.commit()\n print('Source structure created, data repopulated')", "def db(filename = 'P51-11'):\n import pdb\n sys.argv[1:] = ['-v', filename]\n pdb.run('extract.main()')", "def execute_sql_files(connection, sql_files):\n for filename in sql_files:\n statement = resource_text(filename)\n for sub_statement in statement.split(\";\"):\n if sub_statement.strip():\n connection.execute(text(sub_statement))", "def copy_csv_to_example_sample_submission_table(conn, csv_file):\n COPY_EXAMPLE_SAMPLE_SUBMISSION = \"12_copy_example_sample_submission_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)", "def copy_csv_to_lectures_table(conn, csv_file):\n COPY_LECTURES = \"10_copy_lectures_to_table.psql\"\n copy_expert_psql_script(conn, COPY_LECTURES, csv_file)", "def copy_expert(self, table, path, delimiter=';'):\n connect = self.connect()\n\n if connect is not None:\n with open(path, 'r') as fs:\n query = \"COPY {} \" \\\n \"FROM STDIN \" \\\n \"WITH CSV HEADER DELIMITER '{}'\".format(table,\n delimiter)\n with connect.cursor() as cur:\n cur.copy_expert(sql=query, file=fs)\n connect.commit()\n cur.close()\n connect.close()\n\n return path", "def copy_file_to_table(self, schema, table, filepath):\n fields = \", \".join(self.schemas[schema][table][0])\n sql = f'set role {self.write_role}; ' \\\n f'COPY {schema}.{table}( {fields} ) FROM stdin WITH DELIMITER \\',\\' CSV header;'\n return sql, open(filepath, 'r')", "def run_sql_from_file(conn, path, replace={}):\n with open(path, 'r') as f:\n query = [s.strip() + ';' for s in f.read().split(';')[:-1]]\n for s in query:\n for k, v in replace.items():\n s = s.replace(k, v)\n run_sql_from_string(conn, s)", "def run_command(settings_filename, query_filename):\n conn = connect_to_db(settings_filename)\n cursor = conn.cursor()\n\n query = load_query(query_filename)\n cursor.execute(query)\n conn.commit()\n \n cursor.close()\n conn.close()\n\n return True", "def sql_scripts_execute(self, sql_scripts, params={}):\n ps = self.parameter_handler(params)\n log.debug('Got parameters: %s', ps)\n cursor = self._get_cursor()\n for q in sql_scripts:\n with open(q, 'r') as s:\n sql_string_formatted = s.read().format(**ps)\n cursor.execute(sql.SQL(sql_string_formatted), ps)\n self.connection.commit()\n self.connection.close()", "def pg(file):\n global_config = get_config(file)\n config = global_config.get(\"postgres\")\n databases = config.get(\"databases\")\n for db in databases:\n result = postgres.load(config, db)\n print_result(db, result)", "def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")", "def run_sql_file(self, sqlfile):\n try:\n queries = self.get_queries_from(sqlfile)\n queries_executed = 0\n for query in queries:\n if self._execute_query(query, values=None): # execute each query\n queries_executed += 1\n print(\"{} Executed queries from {}\".format(queries_executed, sqlfile))\n except pymysql.InternalError as error:\n print(error.args[1])", "def copy_csv_to_questions_table(conn, csv_file):\n COPY_QUESTIONS = \"09_copy_questions_to_table.psql\"\n copy_expert_psql_script(conn, COPY_QUESTIONS, csv_file)", "def run_sql_file(filename, connection, version, lastversion):\n cursor = connection.cursor()\n for line in open(filename):\n cursor.execute(line)\n connection.commit()\n cursor.execute(\n \"update ecs.versionTable SET ecs.versionTable.version='{}' \"\n \"where ecs.versionTable.version ='{}';\".format(version, lastversion))\n connection.commit()\n print(\"VersionTable updated. Current version is now: {}\".format(version))", "def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)", "def import_file(filepath, db):\n # Logging\n log_main = logging.getLogger(__name__)\n log_import = log_main.getChild('import_files')\n log_import = log_import.getChild(filepath.split('/')[-1])\n log_import.info('started')\n start = time()\n\n # Variables used in data processing\n memory_buff = StringIO()\n curr = None\n cols = ['tweetID', 'date', 'message', 'username', 'userID', 'language',\n 'longitude', 'latitude', 'retweet']\n sql = \"\"\"COPY \"raw_tweets\" (\"tweetID\", \"date\", \"message\", \"username\", \"userID\", \"language\", \"longitude\", \"latitude\", \"retweet\") \n FROM STDIN \n WITH (FORMAT CSV, HEADER TRUE, DELIMITER '\\t');\n \"\"\"\n \n # Try reading the file\n try:\n df = pd.read_csv(filepath, \n usecols=cols, engine='c', \n memory_map=True, low_memory=False,\n dtype={'userID': np.int64, 'tweetID': np.int64})\n except Exception as e:\n log_import.warn('error on read_csv')\n memory_buff.close()\n print (e)\n return\n\n # Attempt to open up a connection to database.\n try:\n connn = db.connect()\n conn = db.raw_connection()\n curr = conn.cursor()\n except (Exception) as e:\n log_import.warn('error on server connection')\n memory_buff.close()\n if curr is not None:\n curr.close()\n print (e)\n return\n\n # Try copying the files to table.\n try:\n # Save to our buffer\n df[cols].to_csv(memory_buff, sep='\\t',\n header=True, index=False, encoding='utf-8')\n\n # Point buffer to start of memory block\n memory_buff.seek(0)\n\n # Copy records using native Postgres COPY command (FAST)\n curr.copy_expert(sql, memory_buff)\n\n # Save transaction and commit to DB\n conn.commit()\n except (Exception) as e:\n log_import.warn('error while copying to database')\n memory_buff.close()\n if curr is not None:\n curr.close()\n print (e)\n return\n finally:\n memory_buff.close()\n if curr is not None:\n curr.close()\n log_import.info('finished ({:.2f})'.format(time() - start))\n return" ]
[ "0.6780042", "0.67421824", "0.6734249", "0.64080065", "0.6145905", "0.59611", "0.59009856", "0.58451056", "0.5779275", "0.57705617", "0.57494783", "0.57482135", "0.57234544", "0.5701453", "0.56711847", "0.5633896", "0.5608548", "0.5586253", "0.55794865", "0.55444336", "0.55082244", "0.5478147", "0.5446645", "0.5432556", "0.53938705", "0.53936297", "0.53932685", "0.5383177", "0.53595763", "0.53569835" ]
0.80731106
0
Match the correlation id to get the correct response as part of the requestreply pattern
def on_response(self, ch, method, props, body): if self.corr_id == props.correlation_id: self.response = body
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply", "def reply(self, msg_id, response):\n return self.hub.reply(self.get_private_key(), msg_id, response)", "def handle_id_answer(self, response_text):\n id_answer = None\n try:\n json_response = json.loads(response_text)\n id_answer = json_response[\"request\"]\n except json.decoder.JSONDecodeError:\n elements_splitted = response_text.split(\"|\")\n if elements_splitted and len(elements_splitted) >= 2:\n id_answer = elements_splitted[1]\n return id_answer", "def process_one_result(reply):\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])", "def reply(self, private_key, msg_id, response):\n return self._samp_hub.reply(private_key, msg_id, response)", "def make_reply(msg):\n reply = ''\n if msg is not None:\n for i in range(len(messages)):\n if msg == message[i]:\n reply = m_responses[i]\n return reply", "def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):\n # Since view no is always zero in the current setup\n looper.run(eventually(checkSufficientRepliesRecvd,\n client1.inBox,\n sent1.reqId,\n 2,\n retryWait=.5,\n timeout=5))\n originalRequestResponsesLen = nodeCount * 2\n duplicateRequestRepliesLen = nodeCount # for a duplicate request we need to\n client1.nodestack._enqueueIntoAllRemotes(sent1, None)\n\n def chk():\n assertLength([response for response in client1.inBox\n if (response[0].get(f.RESULT.nm) and\n response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or\n (response[0].get(OP_FIELD_NAME) == REQACK and\n response[0].get(f.REQ_ID.nm) == sent1.reqId)],\n originalRequestResponsesLen + duplicateRequestRepliesLen)\n\n looper.run(eventually(\n chk,\n retryWait=1,\n timeout=20))", "def set_reply(msg):\n \n result = Message(msg.content, correlation_id=msg.correlation_id ) \n return result", "def reply_object():\n reply_object = {\"code\": \"\"}\n return reply_object", "def respond(self, request_id, response):\n response['rdf:type'] = self.response_type\n response['response_to'] = uri(request_id)\n\n LOG.debug(\n 'Responding to request {0} with {1}.'.format(request_id, response))\n\n response_triples = []\n for key, values in response.iteritems():\n if not isinstance(values, list):\n values = [values]\n for value in values:\n response_triples.append(Triple(bnode('id'), key, value))\n\n self.sc.insert(response_triples)", "def _response_context(mf):\n\n\tprops = mf['properties']\n\n\t# get replies\n\tresponses = props.get('in-reply-to')\n\tif responses:\n\t\tresponse_type = 'in reply to'\n\t\tfor response in responses:\n\t\t\tresponse = response[0]\n\t\t\tif isinstance(response, dict):\n\t\t\t\t# the following is not correct\n\t\t\t\tresponse = response.get('url')\n\n\t\t\tif response:\n\t\t\t\t# make and return string with type and list of URLs\n\t\t\t\tresponse = None\n\n\treturn None", "def parse_res_id(response):\n pass", "def tunnel_recv_handler(self, payload):\n _log.analyze(self.node.id, \"+ CLIENT\", {'payload': payload})\n if 'msg_uuid' in payload and payload['msg_uuid'] in self.replies and 'cmd' in payload and payload['cmd']=='REPLY':\n kwargs = {}\n if 'key' in payload:\n kwargs['key'] = payload['key']\n if 'value' in payload:\n kwargs['value'] = payload['value']\n if 'response' in payload:\n kwargs['value'] = calvinresponse.CalvinResponse(encoded=payload['response'])\n self.replies.pop(payload['msg_uuid'])(**kwargs)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def rpc_match():", "def response(self, req, request_id, body):\n session = get_session()\n asyncrequest = model_query(session, AsyncRequest, filter=AsyncRequest.request_id == request_id).one()\n if not asyncrequest.expire:\n return responeutils.agentrespone(session, request_id, body)\n else:\n return responeutils.agentrespone(get_cache(), request_id, body)", "def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'", "def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass", "def get_reply_fields(self): \n def alter_request_edges(self, jdata):\n \"\"\"\n From the jsonified request template, converts\n \"edges\" : { \"node\" : { \"key1\" : value1, ... } }\n to something resembling a reply message body:\n \"edges\" : [ { \"key1\" : value, ... } }\n so that flatten_json can be run against it to extract\n valid field names.\n \"\"\"\n if isinstance(jdata, list):\n for entry in jdata:\n self._alter_request_edges(entry)\n if isinstance(jdata, dict):\n for key in jdata:\n if key == \"edges\":\n edge_dict = jdata[key]\n jdata[key] = []\n for subkey in edge_dict:\n jdata[key].append(edge_dict[subkey]) \n self._alter_request_edges(jdata[key]) \n\n json1 = re.sub(r'([z-zA-z0-9_-]+)(?:\\(.*?\\))*\\s*([\\[\\{])', r'\"\\1\" : \\2', self.template_text)\n json2 = re.sub(r'\\.*([a-zA-Z0-9]+)\\s*\\n', r'\"\\1\" : true,\\n', json1)\n json3 = re.sub(r'(\"[a-zA-Z0-9_-]+\"\\s*:[^,]+),(\\s*\\n\\s*[\\}\\]].*)', r'\\1\\2', json2)\n jdata = json.loads(json3)\n alter_request_edges(jdata)\n jreply = self.flatten_json(jdata, self.flatpath)\n self.reply_fields = [ key for key in jdata[0] ]\n return self._reply_fields", "def handle_response(self, order):\n print config.RESP_PROMPT + \" sending results of order %s...\" % (order.uuid)\n node = order.node\n responder_type = node[config.BEACON_TYPE_IND]\n params = node[config.PARAMS_IND]\n \n ip = params.get(config.NODE_IP_KEY)\n port = params.get(config.NODE_PORT_KEY)\n \n responder_class = self.response_map.get(responder_type) # get this from the beacon map based on beacon type\n responder = responder_class() # instantiate the object\n try:\n success = responder.send_response(params, order.response)\n except Exception, e:\n print \"%s Error connecting to %s:%s (%s)\" % (config.RESP_PROMPT, ip, port, e)\n success = False\n \n return success", "def get_reply(self, expected_reply = None):\n try:\n # Wait for the header character\n # Don't put anything in this while, because if losses packets if you do so\n if expected_reply:\n while not self.conn.read() == expected_reply:\n pass\n\n # Initialize empty packet where the received stream will be saved\n packet = bitstring.BitStream()\n\n if expected_reply == '>':\n message_id = Message.READY_2_CONFIGURE\n rospy.logdebug(\"Sonar altimeter in configuration mode\")\n reply = Reply(packet.append(\"0x{:02X}\".format(ord('>'))), id=message_id)\n return reply\n\n elif expected_reply == '$':\n message_id = Message.DATA\n rospy.logdebug(\"Received valid packet with sensor data\")\n else:\n rospy.logdebug(\"Received packet with configuration parameters\")\n message_id = Message.CONFIGURATION_PARAM\n\n # Convert each caracter from received string stream in the bitstream\n while True:\n current_line = self.conn.readline()\n for char in current_line:\n # This saves what is inside ord(char) in a two digit hex\n packet.append(\"0x{:02X}\".format(ord(char)))\n\n # Try to parse\n try:\n reply = Reply(packet, id = message_id)\n break\n except PacketIncomplete:\n rospy.logdebug(\"Received packet incomplete\")\n # Keep looking\n continue\n\n except select.error as (code,msg):\n if code == errno.EINTR:\n raise KeyboardInterrupt()\n raise\n\n rospy.logdebug(\"Received %s: %s\", reply.name, reply.payload)\n return reply", "def process_refill_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tdef process_response(return_message_type):\n\t\t\tfor feedback in message.feedbacks.all():\n\t\t\t\tfeedback.note = Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()]\n\t\t\t\tfeedback.save()\n\t\t\ttemplate = 'messages/refill_questionnaire_responses/' + \\\n\t\t\t Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()] + \\\n\t\t\t '.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=return_message_type, content=content, previous_message=message)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\n\t\t# Switch on type of response\n\t\t# a - Haven't gotten the chance\n\t\tif response.lower() == 'a':\n\t\t\t# Schedule a medication reminder for later\n\t\t\tone_hour = datetime.datetime.now() + datetime.timedelta(hours=1)\n\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# b - Too expensive\n\t\telif response.lower() == 'b':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone needs to refill\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# c - Concerned about side effects\n\t\telif response.lower() == 'c':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone has side effects\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# d - Other\n\t\telif response.lower() == 'd':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.OPEN_ENDED_QUESTION)\n\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def reply(self, reply_id):\r\n return Reply(self, reply_id)", "def test_brands_reply(self):\n # 1. Setup service channel / dispatch channel\n # 2. send a post to brand\n # 3. Reply with custom response\n # 4. Route a reply\n # 5. check there is no extra responses created\n # 6. create a matchable and repeat 1-5\n brand = 'brand'\n channel, dispatch_channel = self.setup_channels(brand)\n user = self._create_db_user(email='[email protected]', password='test', is_superuser=True)\n user.account = self.account\n user.save()\n profiles = set()\n\n def do_test(matchable):\n profile = gen_profile()\n user_name = profile['user_name']\n profiles.add(user_name)\n post = self._create_db_post(\n '@%s I need some carrot' % brand,\n channel=channel,\n user_profile=profile)\n\n response = Response.objects.get(id=id_from_post_id(post.id))\n self.assertIsInstance(response.matchable, matchable.__class__)\n assert response.matchable == matchable\n\n # post custom response\n creative = \"U could find some carrot there\"\n self.login(user.email, 'test')\n data = dict(creative=creative,\n response=str(response.id),\n latest_post=str(response.post.id))\n resp = self.client.post('/commands/custom_response', data=json.dumps(data))\n resp = json.loads(resp.data)\n\n # check responses and conversations\n self.assertEqual(Response.objects(conversation_id=None).count(), 0)\n self.assertEqual(\n Response.objects(channel__in=[channel, channel.inbound_channel, channel.outbound_channel]).count(),\n 0)\n self.assertEqual(Response.objects(conversation_id=response.conversation.id).count(), 1)\n self.assertEqual(Response.objects(channel__in=[dispatch_channel]).count(), len(profiles))\n\n matchable = EmptyMatchable.get()\n do_test(matchable)\n\n matchable = self._create_db_matchable('Here is your carrot',\n intention_topics=['carrot'],\n channels=[channel.inbound_channel])\n do_test(matchable)", "def test_make_reply(self):\n msg_helper = MessageHelper()\n msg = msg_helper.make_inbound('inbound')\n reply = msg_helper.make_reply(msg, 'reply content')\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def handle_response(message_dict):\n c.master_command(bot_token, user_token, discuss_bot_id, discussion_chat_id,\n message_dict)", "def _respond(self, answers=[], authority=[], additional=[], rCode=OK):\n response = Message(rCode=rCode)\n for (section, data) in [(response.answers, answers),\n (response.authority, authority),\n (response.additional, additional)]:\n section.extend([\n RRHeader(name, record.TYPE, getattr(record, 'CLASS', IN),\n payload=record)\n for (name, record) in data])\n return response", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=6)\n\n self.request_buffer.process_reply(reply)\n self.assertEqual(len(self.request_buffer.requests), 5)" ]
[ "0.6301974", "0.60582995", "0.59054214", "0.57754266", "0.5729422", "0.57087106", "0.5684049", "0.56805116", "0.5631793", "0.5624594", "0.56170404", "0.55817276", "0.5493033", "0.5485892", "0.5485892", "0.5457697", "0.54559404", "0.5450454", "0.54025334", "0.5382339", "0.53653383", "0.5353953", "0.534011", "0.53339726", "0.53329265", "0.5330236", "0.53167707", "0.5304327", "0.529362", "0.52927625" ]
0.6833892
0
returns Kronecker graph tensor product of k copies of graph g Takes a graph g and int k and multiplies g's adjacency matrix by itself k times, returns resulting graph. For some reason graphs gotten this way are nicest when the initial graph has as self edge on every node, so by default this function adds self edges before the multiplication and strips them afterwards.
def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True): adj = nx.adjacency_matrix(g).todense() if add_self_edges: for i in range(len(adj)): adj[i, i] = 1 mat = adj for i in range(k - 1): mat = np.kron(mat, adj) if strip_self_edges: for i in range(len(mat)): mat[i, i] = 0 name = "kronecker(%s, %s, %s, %s)" % ( g.name if g.name else hash(g), k, add_self_edges, strip_self_edges) return nx.Graph(mat, name=name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def make_mult_op0(k: int):\n op = make_mult_op(k)\n i = np.concatenate((np.arange(0, k), np.arange(k + 1, 2 * k + 1)))\n j = i[:, np.newaxis]\n\n def op0(v: V) -> M:\n \"\"\"Multiplication operator function.\n\n :v: Vector of shape (2 * 4 + 1,).\n :returns: Toeplitz matrix m of shape (2 * k, 2 * k).\n\n \"\"\"\n m = op(v)\n m0: M = m[i, j]\n return m0\n return op0", "def MycielskiGraph(k=1, relabel=True):\n g = Graph()\n g.name(\"Mycielski Graph \" + str(k))\n\n if k<0:\n raise ValueError(\"parameter k must be a nonnegative integer\")\n\n if k == 0:\n return g\n\n if k == 1:\n g.add_vertex(0)\n return g\n\n if k == 2:\n g.add_edge(0,1)\n return g\n\n g0 = MycielskiGraph(k-1)\n g = MycielskiStep(g0)\n g.name(\"Mycielski Graph \" + str(k))\n if relabel: g.relabel()\n\n return g", "def tensor_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_directed_edges_cross_edges(G, H))\n if not GH.is_directed():\n GH.add_edges_from(_undirected_edges_cross_edges(G, H))\n return GH", "def power(G, k):\n if k <= 0:\n raise ValueError('k must be a positive integer')\n H = nx.Graph()\n H.add_nodes_from(G)\n # update BFS code to ignore self loops.\n for n in G:\n seen = {} # level (number of hops) when seen in BFS\n level = 1 # the current level\n nextlevel = G[n]\n while nextlevel:\n thislevel = nextlevel # advance to next level\n nextlevel = {} # and start a new list (fringe)\n for v in thislevel:\n if v == n: # avoid self loop\n continue\n if v not in seen:\n seen[v] = level # set the level of vertex v\n nextlevel.update(G[v]) # add neighbors of v\n if k <= level:\n break\n level += 1\n H.add_edges_from((n, nbr) for nbr in seen)\n return H", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue", "def power_matrix(A, k):\n nrow = np.shape(A)[0]\n A0 = np.identity(nrow) \n for k in range(q):\n A0 = np.dot(A0, A)\n \n return A0", "def NKStarGraph(n,k):\n from sage.combinat.permutation import Arrangements\n #set from which to permute\n set = [str(i) for i in range(1,n+1)]\n #create dict\n d = {}\n for v in Arrangements(set,k):\n v = list(v) # So we can easily mutate it\n tmp_dict = {}\n #add edges of dimension i\n for i in range(1,k):\n #swap 0th and ith element\n v[0], v[i] = v[i], v[0]\n #convert to str and add to list\n vert = \"\".join(v)\n tmp_dict[vert] = None\n #swap back\n v[0], v[i] = v[i], v[0]\n #add other edges\n tmp_bit = v[0]\n for i in set:\n #check if external\n if not (i in v):\n v[0] = i\n #add edge\n vert = \"\".join(v)\n tmp_dict[vert] = None\n v[0] = tmp_bit\n d[\"\".join(v)] = tmp_dict\n return Graph(d, name=\"(%d,%d)-star\"%(n,k))", "def kron(a, b):\r\n a = tensor.as_tensor_variable(a)\r\n b = tensor.as_tensor_variable(b)\r\n if (a.ndim + b.ndim <= 2):\r\n raise TypeError('kron: inputs dimensions must sum to 3 or more. '\r\n 'You passed %d and %d.' % (a.ndim, b.ndim))\r\n o = tensor.outer(a, b)\r\n o = o.reshape(tensor.concatenate((a.shape, b.shape)),\r\n a.ndim + b.ndim)\r\n shf = o.dimshuffle(0, 2, 1, * range(3, o.ndim))\r\n if shf.ndim == 3:\r\n shf = o.dimshuffle(1, 0, 2)\r\n o = shf.flatten()\r\n else:\r\n o = shf.reshape((o.shape[0] * o.shape[2],\r\n o.shape[1] * o.shape[3]) +\r\n tuple([o.shape[i] for i in range(4, o.ndim)]))\r\n return o", "def knn(X, k=1):\n from ..utils.fast_distance import euclidean_distance\n\n if np.size(X) == X.shape[0]:\n X = np.reshape(X, (np.size(X), 1))\n try:\n k = int(k)\n except:\n \"k cannot be cast to an int\"\n if np.isnan(k):\n raise ValueError('k is nan')\n if np.isinf(k):\n raise ValueError('k is inf')\n k = min(k, X.shape[0] - 1)\n\n # create the distance matrix\n dist = euclidean_distance(X)\n sorted_dist = dist.copy()\n sorted_dist.sort(0)\n\n # neighbour system\n bool_knn = dist < sorted_dist[k + 1]\n bool_knn += bool_knn.T\n # xor diagonal\n bool_knn ^= np.diag(np.diag(bool_knn))\n dist *= (bool_knn > 0)\n return wgraph_from_adjacency(dist)", "def kron_prod(matList):\n ret = matList[0]\n for i in range(1, len(matList)):\n ret = kron(ret, matList[i])\n return ret", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def propagation(features, graph, k=1):\n for _ in range(k):\n features = torch.matmul(features, graph)\n return features", "def tf_kron(a: tf.Tensor,\n b: tf.Tensor) -> tf.Tensor:\n assert len(a.shape) == 2, \"a, should be 2x2 tensor\"\n assert len(b.shape) == 2, \"b, should be 2x2 tensor\"\n a_shape = list(b.shape) \n b_shape = list(b.shape)\n return tf.reshape(tf.reshape(a,[a_shape[0],1,a_shape[1],1])*tf.reshape(b,[1,b_shape[0],1,b_shape[1]]),[a_shape[0]*b_shape[0],a_shape[1]*b_shape[1]])", "def wrapped_kronecker(operator_1, operator_2):\n return scipy.sparse.kron(operator_1, operator_2, 'csc')", "def clique_percolation(k: int, g: nx.Graph):\n cliques = [frozenset(clq) for clq in nx.enumerate_all_cliques(g) if len(clq) == k]\n\n graph_of_cliques = nx.Graph()\n for clq in cliques:\n is_isolate = True # this clique is not connected to any other clique\n for other_clq in cliques:\n if clq is other_clq:\n continue\n if len(clq.intersection(other_clq)) >= k-1 and not graph_of_cliques.has_edge(clq, other_clq):\n is_isolate = False\n graph_of_cliques.add_edge(clq, other_clq)\n if is_isolate:\n graph_of_cliques.add_node(clq)\n\n # create communities:\n communities = []\n for component in nx.connected_components(graph_of_cliques):\n communities.append(sorted(frozenset.union(*component)))\n\n return sorted(communities, key=len)", "def G(k):\n return k^(k>>1)", "def strong_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_nodes_cross_edges(G, H))\n GH.add_edges_from(_edges_cross_nodes(G, H))\n GH.add_edges_from(_directed_edges_cross_edges(G, H))\n if not GH.is_directed():\n GH.add_edges_from(_undirected_edges_cross_edges(G, H))\n return GH", "def make_mult_op(k: int) -> Callable[[V], M]:\n def op(v: V) -> M:\n \"\"\"Multiplication operator function.\n\n :v: Vector of shape (2 * 4 + 1,).\n :returns: Toeplitz matrix m of shape (2 * k + 1, 2 * k + 1).\n\n \"\"\"\n c = v[2 * k:]\n r = np.flip(v[:2 * k + 1])\n m: M = toeplitz(c, r)\n return m\n return op", "def _shape_mb(self, t, k, requires_repeat=False):\n if requires_repeat:\n t = t.repeat(*((1, k)+(1,)*(len(t.shape)-2)))\n \n src_shape = t.shape\n dest_shape = (src_shape[0]*k, src_shape[1]//k) + src_shape[2:]\n \n return t.view(*dest_shape)", "def power_jordan(A, k):\n JA, P, jordan_bloques, nilp = canonica_jordan(A)\n nrowA = np.shape(A)[0]\n JK = np.zeros_like(A)\n m0 = 0 # inicializacion de indexador\n\n for m, BJ in zip(nilp, jordan_bloques):\n F = np.array([factorial(k)/(factorial(j)*factorial(k-j))*a**k for j in range(m)])\n BJK = np.zeros_like(m)\n for j in range(m):\n BJK[j, j:] = F[1:m-j+1]\n JK[m0:m, m0:m] = BJK\n m0 = m\n\n invP = gauss_jordan(P)\n invPJK = np.dot(invP, JK)\n AK = np.dot(invPJK, P)\n\n return AK", "def mult(self, p, k):\n res = None\n while k != 0:\n if k % 2 == 1:\n res = self.add(res, p)\n p = self.add(p, p)\n k //= 2\n return res", "def __mul__(self, other):\n return Matrix3(\n self.i * other,\n self.j * other,\n self.k * other,\n )", "def MathonPseudocyclicMergingGraph(M, t):\n from sage.graphs.graph import Graph\n from sage.matrix.constructor import identity_matrix\n assert len(M) == 4\n assert M[0] == identity_matrix(M[0].nrows())\n A = sum(x.tensor_product(x) for x in M[1:])\n if t > 0:\n A += sum(x.tensor_product(M[0]) for x in M[1:])\n if t > 1:\n A += sum(M[0].tensor_product(x) for x in M[1:])\n return Graph(A)", "def k(self):\n return add(self.k_b(), self.k_m())", "def reduce_kcol_to_3col(G, k):\n\n G, H = prepare_grid(G)\n print(\"grid prepared\")\n N = len(G)\n H = create_kgrid(H, N, k)\n print(\"grid created\")\n H = add_pheripherals_per_edge(G.edges, H, k)\n print(\"peripherals added\")\n\n return H", "def tensorPowerIteration(T, k, max_steps = 100):\n dim = T.shape[0]\n res, weights = [], []\n for i in range(k):\n # intialization\n x = np.expand_dims(np.random.multivariate_normal(np.zeros(dim), np.identity(dim)),1)\n x /= np.linalg.norm(x)\n # tensor power iterations\n for step in range(max_steps): # compute T(I, x, x)/||T(I,x,x)||\n x = np.expand_dims(np.tensordot(T, x @ x.T, axes = 2),1)\n x /= np.linalg.norm(x)\n # update & deflation\n w = x.T @ np.tensordot(T, x @ x.T, axes = 2) # get the factor\n res.append(x[:,0])\n weights.append(w)\n T -= constructSymTensor(components = x, weights = w) # deflation\n return np.array(res).T, np.array(weights).T # return as a matrix", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);" ]
[ "0.6349474", "0.628217", "0.6246938", "0.6221783", "0.6156086", "0.6099495", "0.59082294", "0.5830467", "0.5819271", "0.5718691", "0.57003087", "0.5684733", "0.5603436", "0.56028265", "0.5598301", "0.557988", "0.5576591", "0.55397904", "0.55388963", "0.5536826", "0.5524773", "0.5524056", "0.5511459", "0.54779726", "0.545096", "0.5448844", "0.5416436", "0.5410492", "0.5407653", "0.53954583" ]
0.8255546
0
takes two graphs that have some nodes in commmon and This function takes a pair of graphs a, b with some corresponding nodes having the same label. Before running any matching algorithms them, their nodes and edges need to be shuffled because otherwise some algorithms may (inadvertently) match graphs by node label and give false impression of accuracy. This function permutes nodes and edges in on of the graphs and returns both graphs + the true matching.
def permute_graphs(a, b, seed=0): np.random.seed(seed) nodes = b.nodes() permuted_nodes = np.random.permutation(nodes) # matching of all labels of nodes in graph b to their new values match = gm.Matching(zip(nodes, permuted_nodes)) new_edges = [(match.get_b(x), match.get_b(y)) for x, y in b.edges()] permuted_edges = [(x, y) for x, y in np.random.permutation(new_edges)] unneeded_nodes = set(nodes).difference(set(a.nodes())) for node in unneeded_nodes: match.pop_b(node) name = "permuted_b(%s, %s, %s)" % ( a.name if a.name else hash(a), b.name if b.name else hash(b), seed) return a, nx.Graph(permuted_edges, name=name), match
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g", "def compare_graphs(self):\n\t\tpass", "def aga_compare_paths(adata1, adata2,\n adjacency_key='aga_adjacency_full_confidence'):\n import networkx as nx\n g1 = nx.Graph(adata1.add[adjacency_key])\n g2 = nx.Graph(adata2.add[adjacency_key])\n leaf_nodes1 = [str(x) for x in g1.nodes() if g1.degree(x) == 1]\n logg.msg('leaf nodes in graph 1: {}'.format(leaf_nodes1), v=5, no_indent=True)\n asso_groups1 = utils.identify_groups(adata1.smp['aga_groups'], adata2.smp['aga_groups'])\n asso_groups2 = utils.identify_groups(adata2.smp['aga_groups'], adata1.smp['aga_groups'])\n orig_names1 = adata1.add['aga_groups_order_original']\n orig_names2 = adata2.add['aga_groups_order_original']\n\n import itertools\n n_steps = 0\n n_agreeing_steps = 0\n n_paths = 0\n n_agreeing_paths = 0\n # loop over all pairs of leaf nodes in the reference adata1\n for (r, s) in itertools.combinations(leaf_nodes1, r=2):\n r2, s2 = asso_groups1[r][0], asso_groups1[s][0]\n orig_names = [orig_names1[int(i)] for i in [r, s]]\n orig_names += [orig_names2[int(i)] for i in [r2, s2]]\n logg.msg('compare shortest paths between leafs ({}, {}) in graph1 and ({}, {}) in graph2:'\n .format(*orig_names), v=4, no_indent=True)\n no_path1 = False\n try:\n path1 = [str(x) for x in nx.shortest_path(g1, int(r), int(s))]\n except nx.NetworkXNoPath:\n no_path1 = True\n no_path2 = False\n try:\n path2 = [str(x) for x in nx.shortest_path(g2, int(r2), int(s2))]\n except nx.NetworkXNoPath:\n no_path2 = True\n if no_path1 and no_path2:\n # consistent behavior\n n_paths += 1\n n_agreeing_paths += 1\n n_steps += 1\n n_agreeing_steps += 1\n continue\n elif no_path1 or no_path2:\n # non-consistent result\n n_paths += 1\n n_steps += 1\n continue\n if len(path1) >= len(path2):\n path_mapped = [asso_groups1[l] for l in path1]\n path_compare = path2\n path_compare_id = 2\n path_compare_orig_names = [[orig_names2[int(s)] for s in l] for l in path_compare]\n path_mapped_orig_names = [[orig_names2[int(s)] for s in l] for l in path_mapped]\n else:\n path_mapped = [asso_groups2[l] for l in path2]\n path_compare = path1\n path_compare_id = 1\n path_compare_orig_names = [[orig_names1[int(s)] for s in l] for l in path_compare]\n path_mapped_orig_names = [[orig_names1[int(s)] for s in l] for l in path_mapped]\n n_agreeing_steps_path = 0\n ip_progress = 0\n for il, l in enumerate(path_compare[:-1]):\n for ip, p in enumerate(path_mapped):\n if ip >= ip_progress and l in p:\n # check whether we can find the step forward of path_compare in path_mapped\n if (ip + 1 < len(path_mapped)\n and\n path_compare[il + 1] in path_mapped[ip + 1]):\n # make sure that a step backward leads us to the same value of l\n # in case we \"jumped\"\n logg.msg('found matching step ({} -> {}) at position {} in path{} and position {} in path_mapped'\n .format(l, path_compare_orig_names[il + 1], il, path_compare_id, ip), v=6)\n consistent_history = True\n for iip in range(ip, ip_progress, -1):\n if l not in path_mapped[iip - 1]:\n consistent_history = False\n if consistent_history:\n # here, we take one step further back (ip_progress - 1); it's implied that this\n # was ok in the previous step\n logg.msg(' step(s) backward to position(s) {} in path_mapped are fine, too: valid step'\n .format(list(range(ip - 1, ip_progress - 2, -1))), v=6)\n n_agreeing_steps_path += 1\n ip_progress = ip + 1\n break\n n_steps_path = len(path_compare) - 1\n n_agreeing_steps += n_agreeing_steps_path\n n_steps += n_steps_path\n n_paths += 1\n if n_agreeing_steps_path == n_steps_path: n_agreeing_paths += 1\n\n # only for the output, use original names\n path1_orig_names = [orig_names1[int(s)] for s in path1]\n path2_orig_names = [orig_names2[int(s)] for s in path2]\n logg.msg(' path1 = {},\\n'\n 'path_mapped = {},\\n'\n ' path2 = {},\\n'\n '-> n_agreeing_steps = {} / n_steps = {}.'\n .format(path1_orig_names,\n [list(p) for p in path_mapped_orig_names],\n path2_orig_names,\n n_agreeing_steps_path, n_steps_path), v=5, no_indent=True)\n Result = namedtuple('aga_compare_paths_result',\n ['frac_steps', 'n_steps', 'frac_paths', 'n_paths'])\n return Result(frac_steps=n_agreeing_steps/n_steps if n_steps > 0 else np.nan,\n n_steps=n_steps if n_steps > 0 else np.nan,\n frac_paths=n_agreeing_paths/n_paths if n_steps > 0 else np.nan,\n n_paths=n_paths if n_steps > 0 else np.nan)", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def CompareGraphsSpectrum(graph1, graph2):\n laplacian1 = nx.spectrum.laplacian_spectrum(graph1)\n laplacian2 = nx.spectrum.laplacian_spectrum(graph2)\n k1 = select_k(laplacian1)\n k2 = select_k(laplacian2)\n # take the fewer dimensions to describe the result\n k = min(k1, k2)\n # the similarity is the sum of the eukleidian distance of the most\n # important nodes\n similarity = sum((laplacian1[:k] - laplacian2[:k])**2)\n return similarity", "def isomorphic(graph1, graph2):\r\n\r\n gd1 = _TripleCanonicalizer(graph1).to_hash()\r\n gd2 = _TripleCanonicalizer(graph2).to_hash()\r\n return gd1 == gd2", "def compare_networks(model_1, model_2, pos=None, showfig=True, figsize=(15, 8), verbose=3):\n scores, adjmat_diff = bnlearn.network.compare_networks(model_1['adjmat'], model_2['adjmat'], pos=pos, showfig=showfig, width=figsize[0], height=figsize[1], verbose=verbose)\n return(scores, adjmat_diff)", "def compare_networks(model_1, model_2, pos=None, showfig=True, figsize=(15, 8), verbose=3):\n [scores, adjmat_diff] = network.compare_networks(model_1['adjmat'], model_2['adjmat'], pos=pos, showfig=showfig, width=figsize[0], height=figsize[1], verbose=verbose)\n return(scores, adjmat_diff)", "def intersect_igraphs(G1, G2):\n # Ginter = G1.__and__(G2) # This does not work with attributes.\n if G1.ecount() > G2.ecount(): # Iterate through edges of the smaller graph\n G1, G2 = G2, G1\n inter_nodes = set()\n inter_edges = []\n inter_edge_attributes = {}\n inter_node_attributes = {}\n edge_attribute_name_list = G2.edge_attributes()\n node_attribute_name_list = G2.vertex_attributes()\n for edge_attribute_name in edge_attribute_name_list:\n inter_edge_attributes[edge_attribute_name] = []\n for node_attribute_name in node_attribute_name_list:\n inter_node_attributes[node_attribute_name] = []\n for e in list(G1.es):\n n1_id = e.source_vertex[\"id\"]\n n2_id = e.target_vertex[\"id\"]\n try:\n n1_index = G2.vs.find(id = n1_id).index\n n2_index = G2.vs.find(id = n2_id).index\n except ValueError:\n continue\n if G2.are_connected(n1_index, n2_index):\n inter_edges.append((n1_index, n2_index))\n inter_nodes.add(n1_index)\n inter_nodes.add(n2_index)\n edge_attributes = e.attributes()\n for edge_attribute_name in edge_attribute_name_list:\n inter_edge_attributes[edge_attribute_name].append(edge_attributes[edge_attribute_name])\n\n # map nodeids to first len(inter_nodes) integers\n idmap = {n_index:i for n_index,i in zip(inter_nodes, range(len(inter_nodes)))}\n\n G_inter = ig.Graph()\n G_inter.add_vertices(len(inter_nodes))\n G_inter.add_edges([(idmap[e[0]], idmap[e[1]]) for e in inter_edges])\n for edge_attribute_name in edge_attribute_name_list:\n G_inter.es[edge_attribute_name] = inter_edge_attributes[edge_attribute_name]\n\n for n_index in idmap.keys():\n v = G2.vs[n_index]\n node_attributes = v.attributes()\n for node_attribute_name in node_attribute_name_list:\n inter_node_attributes[node_attribute_name].append(node_attributes[node_attribute_name])\n for node_attribute_name in node_attribute_name_list:\n G_inter.vs[node_attribute_name] = inter_node_attributes[node_attribute_name]\n\n return G_inter", "def merge_networks_in_series(n1, n2):\n new_l_size = n1.l_size + n2.l_size + 1 # One additional vertex in between.\n new_u_size = n1.u_size + n2.u_size\n\n # Connect the 0-pole and the inf-pole in the result network.\n new_link_edge = n1.zero_pole.insert_before()\n new_link_edge_opp = n2.inf_pole.insert_after()\n new_link_edge.opposite = new_link_edge_opp\n new_link_edge_opp.opposite = new_link_edge\n\n # Merge the 0-pole of n1 with the inf-pole of n2.\n n1.inf_pole.insert_all_after(n2.zero_pole)\n\n # Remove the link edges in n1 and n2 if they are not real.\n if not n1.is_linked:\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n if not n2.is_linked:\n n2.zero_pole.remove()\n n2.inf_pole.remove()\n\n # After a serial merge the poles are never linked.\n res = Network(new_link_edge, is_linked=False, l_size=new_l_size, u_size=new_u_size)\n res.type = 'S'\n return res\n\n # # Extract the poles from both networks.\n # first_net_zero_pole_edge = n1.zero_pole\n # first_net_inf_pole_edge = n1.inf_pole\n #\n # second_net_zero_pole_edge = n2.zero_pole\n # second_net_inf_pole_edge = n2.inf_pole\n #\n # # Create a new half edges for connecting the poles of the network. The\n # # edge will not be part from the edges list.\n # new_root_half_edge = first_net_zero_pole_edge.insert_after()\n # new_root_opposite = second_net_inf_pole_edge.insert_after()\n #\n # new_root_half_edge.opposite = new_root_opposite\n # new_root_opposite.opposite = new_root_half_edge\n #\n # # Get the half edges from both networks for merging\n # first_net_inf_pole_prior = first_net_inf_pole_edge.prior\n # second_net_zero_pole_edge_prior = second_net_zero_pole_edge.prior\n #\n # # Merge the both networks so that the inf-pole from the first network is\n # # identified with the zero-pole from the second one. Handling different\n # # while merging the two networks.\n # first_net_inf_pole_edge.prior = second_net_zero_pole_edge_prior\n # second_net_zero_pole_edge_prior.next = first_net_inf_pole_edge\n #\n # first_net_inf_pole_prior.next = second_net_zero_pole_edge\n # second_net_zero_pole_edge.prior = first_net_inf_pole_prior\n #\n # # Update the node numbers in the second network zero-pole edges\n # half_edge_walker = first_net_inf_pole_prior.next\n # while half_edge_walker != first_net_inf_pole_prior:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Check whether the original poles of the network that are merged are\n # # linked or not. If they are not linked then the corresponding half\n # # edges between them have to be removed.\n # if not n1.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # first_net_zero_pole_edge.remove()\n # first_net_inf_pole_edge.remove()\n #\n # if not n2.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # second_net_zero_pole_edge.remove()\n # second_net_inf_pole_edge.remove()\n #\n # # After a serial merge the poles are never linked.\n # res = Network(new_root_half_edge, is_linked=False,\n # l_size=new_l_size, u_size=new_u_size)\n # res.type = 'S'\n # return res", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break", "def common_dependency_targets(graph1, graph2, n1, n2, node_attrib='label',\n edge_attrib='label'):\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n n1_rels, n2_rels = defaultdict(list), defaultdict(list)\n\n for source_set, target_dict in ((n1_children, n1_rels), (n2_children, n2_rels)):\n for rel, target in source_set:\n target_dict[rel].append(target)\n\n common_rels = set(n1_rels) & set(n2_rels) # intersection\n common_deps = set()\n for rel in common_rels:\n for n1_target in n1_rels[rel]:\n n1_target_word = graph1.node[n1_target][node_attrib]\n for n2_target in n2_rels[rel]:\n n2_target_word = graph2.node[n2_target][node_attrib]\n if n1_target_word == n2_target_word:\n common_deps.add( (n1_target, n2_target) )\n return common_deps", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict", "def merge(self, a, b):\n old_id, target_id = sorted((self.node_id[a], self.node_id[b]), key = lambda id: len(self.groups[id]))\n for node in self.groups[old_id]:\n self.node_id[node] = target_id\n self.groups[target_id] |= self.groups[old_id]\n del self.groups[old_id]", "def similar(g1, g2):\r\n return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def __modular_product(p1, p2, dist1=None, dist2=None, dist_tol=0):\n\n if dist1 is None:\n dist1 = distances(p1)\n dist1[p1.edges > 0] = p1.edges[p1.edges > 0]\n\n if dist2 is None:\n dist2 = distances(p2)\n dist2[p2.edges > 0] = p2.edges[p2.edges > 0]\n\n nodes = []\n scores = []\n rings1, members1 = get_rings(p1)\n rings2, members2 = get_rings(p2)\n\n rings_members1 = [node for cycle in members1 for node in cycle]\n rings_members2 = [node for cycle in members2 for node in cycle]\n\n for i in range(p1.numnodes):\n for j in range(p2.numnodes):\n if i in rings_members1 and j in rings_members2:\n # do not align parts of rings\n # whole rings will be aligned later\n continue\n weighted_freq, _ = compare_nodes(p1.nodes[i], p2.nodes[j])\n if weighted_freq > 0.0:\n nodes.append({\"n1\": i, \"n2\": j})\n scores.append(weighted_freq)\n\n for i in range(len(rings1)):\n for j in range(len(rings2)):\n # do not look at the number of nodes in the ring\n # it would be faster, but it results in wrong alignemtn for complex\n # ring systems created from multiple molecules\n weighted_freq, _ = compare_nodes(rings1[i], rings2[j])\n if weighted_freq > 0.0:\n nodes.append({\"n1\": p1.numnodes+i, \"n2\": p2.numnodes+j,\n \"members\": [members1[i], members2[j]]})\n scores.append(weighted_freq)\n\n n = len(nodes)\n scores = np.array(scores)\n edges = np.zeros((n, n))\n costs = np.zeros((n, n))\n\n for i in range(n):\n for j in range(i):\n\n if nodes[i][\"n1\"] == nodes[j][\"n1\"] or \\\n nodes[i][\"n2\"] == nodes[j][\"n2\"]:\n continue\n\n if nodes[i][\"n1\"] >= p1.numnodes: # ring node\n u = nodes[i][\"n1\"] - p1.numnodes\n v = nodes[i][\"n2\"] - p2.numnodes\n # get all nodes forming a ring system\n idxi1 = members1[u]\n idxi2 = members2[v]\n\n else:\n u = nodes[i][\"n1\"]\n v = nodes[i][\"n2\"]\n idxi1 = [u]\n idxi2 = [v]\n\n if nodes[j][\"n1\"] >= p1.numnodes: # ring node\n w = nodes[j][\"n1\"] - p1.numnodes\n s = nodes[j][\"n2\"] - p2.numnodes\n idxj1 = members1[w]\n idxj2 = members2[s]\n\n else:\n w = nodes[j][\"n1\"]\n s = nodes[j][\"n2\"]\n idxj1 = [w]\n idxj2 = [s]\n\n if len(set(idxi1) & set(idxj1)) > 0 or \\\n len(set(idxi2) & set(idxj2)) > 0:\n # do not connect node with itself\n continue\n\n is_connected = False\n # compute distances in graphs\n # for ring nodes find shortest distances\n # note: loop is faster than numpy (a lot of singletons to check)\n d1 = float(\"inf\")\n for p in idxi1:\n for q in idxj1:\n if p1.edges[p, q] > 0:\n is_connected = True\n if dist1[p, q] < d1:\n d1 = dist1[p, q]\n\n d2 = float(\"inf\")\n for p in idxi2:\n for q in idxj2:\n if p2.edges[p, q] > 0:\n is_connected = True\n if dist2[p, q] < d2:\n d2 = dist2[p, q]\n\n if math.fabs(d1 - d2) <= dist_tol:\n if is_connected:\n costs[i, j] = costs[j, i] = math.fabs(d1 - d2)\n edges[i, j] = edges[j, i] = 1.0\n return nodes, scores, edges, costs", "def rearange_links(links1, links2):\n l1 = links1[:].tolist()\n l2 = links2[:].tolist()\n idx = np.zeros(len(l1)).astype(int)-1\n Uidx1 = []\n Uidx2 = []\n for i in range(0, len(l1)):\n if l1[i] in l2:\n # Link is fully matched if it appear both in links1 and links2\n idx[l2.index(l1[i])]=i\n else:\n Uidx1.append(i)\n for i in range(0, len(idx)):\n if idx[i] <0:\n Uidx2.append(i)\n for i in range(0, len(Uidx1)):\n for j in range(0, len(Uidx2)):\n if links1[Uidx1[i], 0] == links2[Uidx2[j], 0]:\n # Matching links when they have the same one source.\n idx[Uidx2[j]]=Uidx1[i]\n Uidx1 = []\n for i in range(0, len(l1)):\n if i not in idx:\n Uidx1.append(i)\n \n j = 0\n for i in range(0, len(idx)):\n if idx[i]<0:\n # Matching links if one's source is the same with the other's target. And change switch one's source with its target.\n if links1[Uidx1[j]][1]==links2[i][0]:\n tmp = links1[Uidx1[j]][1]\n links1[Uidx1[j]][1] = links1[Uidx1[j]][0]\n links1[Uidx1[j]][0] = tmp\n idx[i]=Uidx1[j]\n j += 1\n links1 = links1[idx]\n return links1", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def test_graphid_operator_eq_and_neq():\n\n for xstr, ystr in itertools.product([\"g1\", \"g2\", \"y7\", \"z123\"], repeat=2):\n x = _ir.GraphId(xstr)\n y = _ir.GraphId(ystr)\n\n if xstr == ystr:\n assert x == y\n assert not (x != y)\n else:\n assert not (x == y)\n assert x != y", "def sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid):\n\n sys_el_a_id = system_el2kbid[el_a]\n sys_el_b_id = system_el2kbid[el_b]\n gol_el_a_id = gold_el2kbid[el_a]\n gol_el_b_id = gold_el2kbid[el_b]\n\n if sys_el_a_id.startswith('NIL'): sys_el_a_id = 'NIL'\n if sys_el_b_id.startswith('NIL'): sys_el_b_id = 'NIL'\n if gol_el_a_id.startswith('NIL'): gol_el_a_id = 'NIL'\n if gol_el_b_id.startswith('NIL'): gol_el_b_id = 'NIL'\n\n #print system_el2kbid\n \n return sys_el_a_id == sys_el_b_id == gol_el_a_id == gol_el_b_id", "def construct_graph(a, b, w, time_arr, imp_arr, cost_arr, dist_arr):\n graph = {}\n for index,nodes in enumerate(zip(a,b)):\n # add unadded nodes to graph's keys with empty list\n if nodes[0] not in graph.keys():\n graph[nodes[0]] = []\n if nodes[1] not in graph.keys():\n graph[nodes[1]] = []\n # add unadded destination nodes as list [dest, weight]\n if nodes[1] not in graph[nodes[0]]:\n graph[nodes[0]].append([nodes[1], w[index], time_arr[index], imp_arr[index], cost_arr[index], dist_arr[index]])\n if nodes[0] not in graph[nodes[1]]:\n graph[nodes[1]].append([nodes[0], w[index], time_arr[index], imp_arr[index], cost_arr[index], dist_arr[index]])\n return graph", "def permutations(graph1: list, graph2: list, degrees: tuple):\n degrees1 = degrees[0]\n degrees2 = degrees[1]\n check1 = []\n check2 = []\n for index, _ in enumerate(degrees1):\n degree = degrees1[index]\n temp = []\n for vertex, _ in enumerate(graph1[index]):\n if graph1[index][vertex] == 1:\n temp.append(degrees1[vertex])\n check1.append((degree, tuple(sorted(temp))))\n\n for index, _ in enumerate(degrees2):\n degree = degrees2[index]\n temp = []\n for vertex in range(len(graph2[index])):\n if graph2[index][vertex] == 1:\n temp.append(degrees2[vertex])\n check2.append((degree, tuple(sorted(temp))))\n\n return len(set(check1 + check2)) == len(set(check1))", "def construct_diff(self,\n algorithm: str,\n graph1: GraphWithRepetitiveNodesWithRoot,\n graph2: GraphWithRepetitiveNodesWithRoot) -> GraphMap:\n if len(graph1) > len(graph2):\n graph1, graph2 = graph2, graph1\n\n graph_printer = GraphPrinter(graph1, graph2)\n\n graph1_str_representation = graph_printer.print_graph1()\n graph2_str_representation = graph_printer.print_graph2()\n\n program_input = graph1_str_representation + graph2_str_representation\n program_input = '\\n'.join(program_input)\n\n location = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n if self.SUPPORTED_ALGORITHMS:\n exe_filename = AlgorithmCompiler().compile(algorithm)\n else:\n exe_filename = self.EXE_FILENAME\n\n cpp_algorithm = os.path.join(location, exe_filename)\n\n process = subprocess.Popen(cpp_algorithm, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n output = process.communicate(program_input.encode())[0].decode()\n\n return graph_printer.back_printer(output)", "def count_common_subgraphs(graph1, graph2, n1, n2,\n node_attrib='label', edge_attrib='label'):\n for graph in (graph1, graph2):\n assert nx.is_directed_acyclic_graph(graph)\n \n if graph1.node[n1][node_attrib] != graph2.node[n2][node_attrib]:\n return 0\n\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n\n if not n1_children or not n2_children:\n return 0\n else:\n result = 1 # neutral element of multiplication\n for n1_target, n2_target in common_dependency_targets(graph1, graph2, n1, n2,\n node_attrib=node_attrib):\n result *= (count_common_subgraphs(graph1, graph2,\n n1_target, n2_target,\n node_attrib='label',\n edge_attrib='label') + 2)\n return result - 1", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def side_renaming(network1, network2):\n\n # There is probably faster way to perform this, optimize later if needed\n for i in range(len(network1.nodes)):\n \n if (network1.nodes[i][\"group\"] == \"#fcae91FF\"):\n network1.nodes[i][\"T1\"] = \"0\"\n\n elif (network1.nodes[i][\"group\"] == \"#7828a0FF\"):\n network1.nodes[i][\"T1\"] = \"1\"\n \n else:\n print(\"Error with group encoding!\")\n \n \n for i in range(len(network2.nodes)):\n \n if (network2.nodes[i][\"group\"] == \"#fcae91FF\"):\n network2.nodes[i][\"T2\"] = \"0\"\n \n elif (network2.nodes[i][\"group\"] == \"#7828a0FF\"):\n network2.nodes[i][\"T2\"] = \"1\"\n \n else:\n print(\"This should not be printed! Error with group encoding!\")\n\n return network1, network2", "def merge(self, g1, g2):\n logger = logging.getLogger(__name__)\n \n \n g = BaseGraph()\n g.copy_graph_from(g1)\n\n plwn2sumo_dict = defaultdict(set)\n plwn2sumo_dict = self.get_plwn2sumo_dict()\n\n synset_on_vertex_dict = {}\n for node in g.all_nodes():\n synset_id = node.synset.synset_id\n if synset_id in synset_on_vertex_dict:\n logger.warning(\"ID of some synset is not unique.\")\n continue\n synset_on_vertex_dict[synset_id] = node\n\n num_of_edge = 0\n for edge in g2.all_edges():\n num_of_edge += 1\n logger.info(\"%d/%d\", num_of_edge, g2.num_edges())\n\n parent_sumo_concept = edge.source().sumo\n child_sumo_concept = edge.target().sumo\n\n if parent_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", parent_sumo_concept)\n continue\n if child_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", child_sumo_concept)\n continue\n\n for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:\n if parent_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", parent_syn_id)\n continue\n p_node = synset_on_vertex_dict[parent_syn_id]\n for child_syn_id in plwn2sumo_dict[child_sumo_concept]:\n if child_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", child_syn_id)\n continue\n ch_node = synset_on_vertex_dict[child_syn_id]\n \n g.add_edge(p_node,\n ch_node,\n [(\"rel\", edge.rel)],\n simply=True)\n \n\n return g", "def test_graph2():\n mol_graph1 = DGLGraph([(0, 1), (0, 2), (1, 2)])\n mol_graph2 = DGLGraph([(0, 1), (1, 2), (1, 3), (1, 4)])\n batch_mol_graph = dgl.batch([mol_graph1, mol_graph2])\n node_feats = torch.arange(batch_mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * batch_mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph1 = get_complete_graph(mol_graph1.number_of_nodes())\n complete_graph2 = get_complete_graph(mol_graph2.number_of_nodes())\n batch_complete_graph = dgl.batch([complete_graph1, complete_graph2])\n atom_pair_feats = torch.arange(batch_complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return batch_mol_graph, node_feats, edge_feats, batch_complete_graph, atom_pair_feats" ]
[ "0.68501484", "0.6609531", "0.6550218", "0.6478293", "0.64721924", "0.6455647", "0.6370949", "0.6257739", "0.62525046", "0.6149024", "0.6139504", "0.6138427", "0.6100626", "0.5999131", "0.5935091", "0.5918132", "0.5916235", "0.58819485", "0.5872874", "0.58498603", "0.58483785", "0.58316046", "0.5813339", "0.5785665", "0.5777276", "0.5775325", "0.5765835", "0.5764394", "0.5753707", "0.57470727" ]
0.7509719
0
returns randomly selected anchors and candidate sets Randomly selects a set of pairs of corresponding nodes anchors. For every node that in a that has a counterpart in b (and is not already an anchor) randomly selects a list of candidates, the list always contains true match. Candidate sets need not be disjoint.
def get_anchors_candidates(a, b, match, n_anchors, n_candidates, seed=0): random.seed(seed) a_nodes = set(a.nodes()) b_nodes = set(b.nodes()) anchors = random.sample(match.items(), n_anchors) a_anchors = set([x for x, _ in anchors]) b_anchors = set([y for _, y in anchors]) a_nodes.difference_update(a_anchors) b_nodes.difference_update(b_anchors) candidates = {} for node in a_nodes: if match.contains_a(node): cands = random.sample(b_nodes, n_candidates - 1) cands.append(match.get_b(node)) candidates[node] = list(set(cands)) return anchors, candidates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomMatching(self):\n\n # Generate random permutation of vertices\n permutation = range(self.n)\n shuffle(permutation)\n\n # Invert permutation list to get rank\n ranks = range(self.n)\n for idx, v in enumerate(permutation):\n ranks[v] = idx\n\n matched = set([])\n matches = set([])\n # Process in order of rank\n for v in ranks:\n if v in matched:\n continue\n\n def firstNeighbor(v):\n \"\"\"Find highest-priority unmatched neighbor of v.\"\"\"\n for u in ranks:\n if u in matched or not self.adjacent(u, v):\n continue\n\n return u\n\n u = firstNeighbor(v)\n if u:\n # Add match between u, v\n matches.add((v, u))\n\n # Track matched vertices\n matched.add(v)\n matched.add(u)\n\n return matches", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def _random_subset(self, pa_nodes, seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n # if x in pa_nodes:\n if pa_nodes.get(x, False):\n targets.add(x)\n else:\n pass\n return targets", "def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)", "def select(self, solutions):\r\n solutions = self.sort_solutions(solutions)\r\n # define coordinates for the two groups\r\n elitists_coords = [x for x in range(self.breeding_rules.elitist_candidates)]\r\n first_discarded_solution = int(len(solutions) - (len(solutions) * self.breeding_rules.discard_rate))\r\n crossover_coords = [x for x in range(first_discarded_solution)]\r\n # fill each breeding group with its possible participants, based on the coordinates defined above\r\n elitists = [solutions[x] for x in elitists_coords]\r\n crossover = [solutions[x] for x in crossover_coords]\r\n return elitists, crossover", "def build_candidates(allowed_nodes=[identity], best_candidates=[], nb_candidates=200):\n new_candidates = []\n length_limit = 4 # Maximal length of a program\n def random_node():\n return random.choice(allowed_nodes)\n\n # Until we have enough new candidates\n while (len(new_candidates) < nb_candidates):\n # Add 10 new programs\n for i in range(5):\n new_candidates += [[random_node()]]\n\n # Create new programs based on each best candidate\n for best_program in best_candidates:\n # Add one op on its right but limit the length of the program\n if len(best_program) < length_limit - 1:\n new_candidates += [[random_node()] + best_program]\n # Add one op on its left but limit the length of the program\n if len(best_program) < length_limit - 1:\n new_candidates += [best_program + [random_node()]]\n # Mutate one instruction of the existing program\n new_candidates += [list(best_program)]\n new_candidates[-1][random.randrange(0, len(best_program))] = random_node()\n\n # Truncate if we have too many candidates\n np.random.shuffle(new_candidates)\n return new_candidates[:nb_candidates]", "def anchors(self):\n dims = self.dims\n anchors = []\n for peak in self:\n possible_anchors = []\n for combination in combinations(range(dims), 2):\n spins = [peak[i] for i in combination]\n if any(s.res_num is None or s.atom is None for s in spins):\n continue\n res_nums = [spin.res_num for spin in spins]\n atoms = [spin.atom for spin in spins]\n elements = [atom[0] for atom in atoms]\n positions = [atom[1:] for atom in atoms]\n same_res_num = res_nums[0] == res_nums[1]\n valid_pairs = [set(('H', 'N')), set(('H', 'C'))]\n is_proton_heavy_pair = set(elements) in valid_pairs\n same_position = all(c[0] == c[1] for c in zip(*positions))\n if same_res_num and is_proton_heavy_pair and same_position:\n if '' in positions and set(elements) != set(('H', 'N')):\n # One of the atom names must have been 'H', 'N' or 'C'\n # Of these, only the amide proton anchor is valid\n continue\n if elements[0] == 'H':\n possible_anchors.append(combination)\n else:\n possible_anchors.append(combination[::-1])\n if len(possible_anchors) > 1:\n pa_sets = [set(pa) for pa in possible_anchors]\n overlap = set.intersection(*pa_sets)\n if overlap:\n # Ambiguous, overlapping anchors\n continue\n for poss_anc in possible_anchors:\n if poss_anc not in anchors:\n anchors.append(poss_anc)\n anchors = tuple(anchors)\n return anchors", "def permute_graphs(a, b, seed=0):\n np.random.seed(seed)\n nodes = b.nodes()\n permuted_nodes = np.random.permutation(nodes)\n\n # matching of all labels of nodes in graph b to their new values\n match = gm.Matching(zip(nodes, permuted_nodes))\n new_edges = [(match.get_b(x), match.get_b(y)) for x, y in b.edges()]\n permuted_edges = [(x, y) for x, y in np.random.permutation(new_edges)]\n unneeded_nodes = set(nodes).difference(set(a.nodes()))\n for node in unneeded_nodes:\n match.pop_b(node)\n name = \"permuted_b(%s, %s, %s)\" % (\n a.name if a.name else hash(a), b.name if b.name else hash(b), seed)\n return a, nx.Graph(permuted_edges, name=name), match", "def get_matching(self):\n verts, plaqs, d_verts, d_plaqs = self.get_stabs()\n\n # def get_matching(anyons, d_anyons):\n # edges = self.get_edges(anyons)\n # for i0, i1, weight in edges:\n # nxgraph.add_edge(i0, i1, weight=-weight)\n # output = nx.algorithms.matching.max_weight_matching(nxgraph, maxcardinality=True)\n # return [[d_anyons[i0], d_anyons[i1]] for i0, i1 in output]\n\n def get_matching(anyons, d_anyons):\n output = pm.getMatching(len(anyons), self.get_edges(anyons))\n return [[d_anyons[i0], d_anyons[i1], anyons[i0], anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]\n\n self.matching = []\n if verts:\n self.matching += get_matching(verts, d_verts)\n if plaqs:\n self.matching += get_matching(plaqs, d_plaqs)", "def random_select_revealed_node(self, alpha, index1, index2):\n same_nodes = set(self.graphs[index1].nodes()) & set(self.graphs[index2].nodes())\n s = int(alpha * len(same_nodes))\n logging.info(\"graph {}-{} random revealed nodes {}/{}.\".format\n (index1, index2, s, len(same_nodes)))\n revealed_nodes = random.sample(same_nodes, s)\n self.revealed_nodes[(index1, index2)] = set(revealed_nodes)\n self.same_nodes[(index1, index2)] = same_nodes\n return revealed_nodes", "def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]", "def likely_pairs(self, k=2):\n for a in self.G.nodes():\n if not self.eligible_node(a):\n continue\n for b in neighbourhood(self.G, a, k):\n if not self.eligible_node(b):\n continue\n yield (a, b)", "def rand_neighbours(self):\n # Initialize neighbours sets as empty sets:\n nodes_nei = [set() for _ in range(self.num_nodes)]\n\n for i,nd in enumerate(self.nodes):\n # Sample a set of indices (Which represent a set of nodes).\n # Those nodes will be nd's neighbours:\n nodes_nei[i].update(\\\n random.sample(range(self.num_nodes),self.nei))\n\n # Remove myself:\n nodes_nei[i].discard(i)\n\n # To make the graph undirected, we add i to be neighbour of all\n # i's neighbours:\n for j in nodes_nei[i]:\n nodes_nei[j].add(i)\n\n for i,nd in enumerate(self.nodes):\n # Initialize a list of neighbours:\n nd.set_neighbours(map(self.make_knode,list(nodes_nei[i])))", "def find_pairs(corpus, a, b):\n pairs = []\n it1 = CorpusIterator(corpus, a)\n it2 = CorpusIterator(corpus, b)\n\n def _is_pair(x, y):\n return x.document_id == y.document_id and x.index == y.index - 1\n\n def _cmp(x, y):\n if x.document_id == y.document_id:\n if x.index == y.index:\n return 0\n elif x.index > y.index:\n return 1\n else:\n return -1\n elif x.document_id > y.document_id:\n return 1\n else:\n return -1\n\n w1 = it1.get_next()\n w2 = it2.get_next()\n while w1 is not None and w2 is not None:\n cmp_value = _cmp(w1, w2)\n if cmp_value == -1:\n # w1 behind w2\n if _is_pair(w1, w2):\n pairs.append(w1)\n w1 = it1.get_next()\n w2 = it2.get_next()\n else:\n if w2.index == 0:\n it1.skip(w2)\n else:\n it1.skip(CorpusPosition(w2.document_id, w2.index - 1))\n w1 = it1.get_next()\n elif cmp_value == 1:\n # w1 ahead of w2\n it2.skip(w1)\n w2 = it2.get_next()\n else:\n raise ValueError('Iterators are tracking same token')\n\n return pairs", "def scatterSearch(graph, alpha, b):\n\n\t#Generate P\n\tP = []\n\trefSet = []\n\trefSet_pos = []\n\tc = 0\n\tpowb2 = pow(b,2)\n\twhile c < powb2:\n\t\tx = diversification(graph, alpha)\n\t\tS,Sp,cut_V = improvement(graph, x)\n\t\tif (not (S,Sp,cut_V) in P) and (not (Sp,S,cut_V) in P):\n\t\t\tP.append((S,Sp,cut_V))\n\t\t\tc = c + 1\n\n\t\t\t#Build RefSet\n\t\t\tif len(refSet) < b/2:\n\t\t\t\trefSet.append((S,Sp,cut_V))\n\t\t\t\trefSet_pos.append(c)\n\t\t\t\tif len(refSet) == b/2:\n\t\t\t\t\t#Get new min\n\t\t\t\t\tminrefSet, minrefSet_pos = getMinRS(refSet, b/2)\n\n\t\t\telif cut_V > minrefSet:\n\t\t\t\trefSet[minrefSet_pos] = (S,Sp,cut_V)\n\t\t\t\trefSet_pos[minrefSet_pos] = c\n\n\t\t\t\t#Get new min\n\t\t\t\tminrefSet, minrefSet_pos = getMinRS(refSet, b/2)\n\n\t#Build Diverse RefSet\n\tpos = randint(0,(c-1))\n\tfor i in range(0,(b/2)):\n\t\twhile (pos in refSet_pos):\n\t\t\t pos =randint(0,(c-1))\n\n\t\trefSet.append(P[pos])\n\t\trefSet_pos.append(pos)\n\n\t#Get new min\n\tminrefSet, minrefSet_pos = getMinRS(refSet, b)\t\t\n\n\n\tnewSolutions = True\n\twhile newSolutions:\n\t\tnewSolutions = False\n\t\t#Generate subset\n\t\tnewSubSet = generateSubSet(refSet,b)\n\t\tfor subSet in newSubSet:\n\t\t\ttrial_sol = combination(graph, subSet)\n\t\t\t#Improvement\n\t\t\tfor sol in trial_sol:\n\t\t\t\tI,Ip,cut_I = improvement(graph, sol)\n\n\t\t\t\t#Update RefSet\n\t\t\t\tif cut_I > minrefSet:\n\t\t\t\t\tif (not (I,Ip,cut_I) in refSet) and (not (Ip,I,cut_I) in refSet):\n\t\t\t\t\t\trefSet[minrefSet_pos] = (I,Ip,cut_I)\n\t\t\t\t\t\tnewSolutions = True\n\t\t\t\t\t\tminrefSet, minrefSet_pos = getMinRS(refSet, b)\n\n\treturn getMax(refSet,b)", "def crossover(self, a, b):\n start = random.randint(0, len(a)-1)\n end = random.randint(start+1, len(a))\n new_order = self.blank_order.copy()\n new_order[start:end] = a[start:end]\n for i in range(len(b)):\n city = b[i]\n if not new_order.__contains__(city):\n for j in range(len(new_order)):\n if new_order[j] == -1:\n new_order[j] = city\n break\n return new_order", "def splitNodes(matching):\n outer = set(range(self.n))\n inner = set([])\n for (u, v) in matching:\n if u in outer:\n outer.remove(u)\n if v in outer:\n outer.remove(v)\n inner.add(u)\n inner.add(v)\n return list(inner), list(outer)", "def prepare(self):\n A = np.append(self.a1, self.a2, axis=0)\n b = np.append(self.b1, self.b2, axis=0)\n A, b = shuffle(A, b, random_state=0)\n return A, b", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def get_all_pairs(G):\n # list all (start,dest) pairs between which the route must be computed\n pairs_list = [(start, dest) for dest in G.nodes for start in G.nodes]\n\n # shuffle all elements in-place\n random.shuffle(pairs_list)\n\n # generate a set from the list\n pairs_set = set(pairs_list)\n\n return pairs_list, pairs_set", "def crossover(self, candidates):\n xver = []\n for par1, par2 in candidates:\n n = min(par1.enc_path.shape[0], par2.enc_path.shape[0])\n x_point = np.random.randint(0, n - 1)\n child = Path()\n child.enc_path = np.vstack((par1.enc_path[0:x_point], par2.enc_path[x_point:]))\n xver.append(child)\n return xver", "def find_edges_seed(rna_seq_objs, xgmml_obj, args, stats):\n nodes_copy = rna_seq_objs\n while len(nodes_copy) > 2:\n nodes_copy[0].use_for_comparison = False\n seq_pairs = []\n # go through and find all the matches\n # then remove matches and start again till gone\n for x in range(1, len(nodes_copy) - 1):\n # new nodes_copy[0] each time a node is deleted\n pair = RNASequencePair(\n nodes_copy[0], nodes_copy[x], xgmml_obj\n )\n pair.energy_delta = abs(\n pair.sequence1.free_energy - pair.sequence2.free_energy\n )\n pair.edit_distance = Levenshtein.distance(\n pair.sequence1.sequence, pair.sequence2.sequence\n )\n seq_pairs.append(pair)\n process_seq_pairs(seq_pairs, args, stats)\n for x in seq_pairs:\n if x.is_valid_edge:\n x.sequence2.use_for_comparison = False\n # delete nodes which already belong to an edge\n new_nodes_copy = [\n z for z in nodes_copy if z.use_for_comparison\n ]\n print 'Number of RNA sequences reduced from %d to %d ' % (\n len(nodes_copy), len(new_nodes_copy)\n )\n nodes_copy = new_nodes_copy", "def match(image1,image2,threshold,useRansac=False,t_orientation=30,t_scale=0.5):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n #\r\n # REPLACE THIS CODE WITH YOUR SOLUTION (ASSIGNMENT 5, QUESTION 3)\r\n #\r\n # Generate five random matches (for testing purposes)\r\n # matched_pairs = []\r\n # num = 5\r\n # for i in range(num):\r\n # matched_pairs.append([keypoints1[i],keypoints2[i]])\r\n # return DisplayMatches(im1, im2, matched_pairs)\r\n\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n\r\n #q3\r\n matched_pairs = []\r\n between_angles = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))\r\n for i, row in enumerate(between_angles):\r\n \tratio = sorted(row)[0] / sorted(row)[1]\r\n \tif ratio <= threshold:\r\n\t \tmatched_pairs.append([keypoints1[i], keypoints2[np.where(row == sorted(row)[0])[0][0]]])\r\n # print(matched_pairs)\r\n if useRansac is False:\r\n return DisplayMatches(im1, im2, matched_pairs)\r\n\t# \r\n\r\n #q4\r\n repetition = 10\r\n subsets = [[]] * repetition\r\n for i in range(repetition):\r\n r = random.randint(0, len(matched_pairs))\r\n for match in matched_pairs:\r\n ds1, ds2 = matched_pairs[r][1][2]/matched_pairs[r][0][2], match[1][2]/match[0][2]\r\n do1, do2 = (matched_pairs[r][1][3]-matched_pairs[r][0][3]), (match[1][3]-match[0][3])\r\n if abs(ds2 - ds1) <= t_scale * ds1 and abs(do2 - do1) % (2 * math.pi) <= t_orientation:\r\n subsets[i].append(match)\r\n\r\n max_i, max_len = 0, subsets[0]\r\n for i in range(10):\r\n l = len(subsets[i])\r\n if l > max_len:\r\n max_len = l\r\n max_i = i\r\n\r\n im3 = DisplayMatches(im1, im2, subsets[max_i])\r\n return im3", "def _pair_intersection(\n cls,\n availabilities_a: List[\"Availability\"],\n availabilities_b: List[\"Availability\"],\n ) -> List[\"Availability\"]:\n result = []\n\n # yay for O(b*a) time! I am sure there is some fancy trick to make this faster,\n # but we're dealing with less than 100 items in total, sooo.. ¯\\_(ツ)_/¯\n for a in availabilities_a:\n for b in availabilities_b:\n if a.overlaps(b, True):\n result.append(a.intersect_with(b))\n\n return result", "def BFTM_(adj_list,labels):\n G_prime = nx.Graph()\n num_clusters = list(np.unique(labels))\n clusters = {i:[] for i in num_clusters}\n hood = {n.id:[i for i in num_clusters if i != labels[n.id]] for n in adj_list}\n \n #Add nodes to clusters\n for idx,n in enumerate(adj_list):\n clusters[labels[idx]].append(n.id)\n \n root_cluster = random.choice(num_clusters)\n root_id = random.choice(list(clusters[root_cluster]))\n queue = [adj_list[root_id]]\n clusters[labels[root_id]].remove(root_id)\n \n \n #BFTM\n while len(queue) > 0:\n node = queue.pop(0)\n for c_id in hood[node.id]:\n if len(clusters[c_id]) > 0:\n sample_id = random.choice(clusters[c_id])\n clusters[labels[sample_id]].remove(sample_id)\n queue.append(adj_list[sample_id])\n hood[sample_id].remove(labels[node.id])\n G_prime.add_edge(node,adj_list[sample_id])\n hood[node.id] = None\n #Handle leftover nodes\n if len(queue) == 0:\n remaining = [c for i,c in clusters.items() if len(c) > 0]\n for rem_cluster in remaining:\n for n in rem_cluster:\n added = False\n while not added:\n rand_n = random.choice(list(G_prime.nodes))\n if labels[rand_n.id] != labels[n.id]:\n G_prime.add_edge(n,rand_n)\n added = True\n \n \n #Cliqify\n for node in list(G_prime.nodes):\n if G_prime.degree(node) < len(num_clusters) - 1:\n for _1_hop in list(G_prime.neighbors(node)):\n for _2_hop in list(G_prime.neighbors(_1_hop)):\n if _2_hop != node and G_prime.degree(_2_hop) < len(num_clusters) - 1:\n G_prime.add_edge(node,_2_hop)\n \n return G_prime", "def anchor_pairs(self):\n # TODO unit test for this method\n def _anchors(given_anchor):\n if given_anchor is not None:\n yield given_anchor\n else:\n yield from anchors.Anchor\n for src_anch in _anchors(self.orig_anchor):\n for dest_anch in _anchors(self.dest_anchor):\n yield (src_anch, dest_anch)", "def create_pairs(listA, listB):\n\n pairs = []\n labels = []\n for i in range(0,len(listA)):\n pairs += [[listA[i],listB[i]]] # same\n\n compare_to = i\n while compare_to == i: #not comparing to itself\n compare_to = random.randint(0,len(listA)-1)\n\n pairs += [[listA[i], listB[compare_to]]] # different\n\n labels += [1, 0]\n return np.array(pairs), np.array(labels)", "def build_matches(self, noise=0):\n for player1_index in range(len(self.players)):\n for player2_index in range(player1_index, len(self.players)):\n pair = (\n self.players[player1_index], self.opponents[player2_index])\n match = self.build_single_match(pair, noise)\n yield (player1_index, player2_index), match", "def neighbors(node, test_set):\r\n result = set()\r\n for neighbor in node.neighbors:\r\n if neighbor in test_set:\r\n result.add(neighbor)\r\n return result", "def find_clusters_of_candidates(candidates):\n\tprint \"no candidates:\", len(candidates)\n\tclusters = []\n\tfor candidate in candidates:\n\t\tcluster_found = False\n\t\tfor cluster in clusters:\n\t\t\tif len([x for x in cluster if x[0]<=candidate[0] <= max(x[0]+3,x[-1])]):\n\t\t\t\tcluster.append(candidate)\n\t\t\t\tcluster_found = True\n\t\t\t\tbreak\n\t\tif not cluster_found: # start new cluster - cluster is a list of candidates\n\t\t\tclusters.append([candidate])\n\n\t# lets look at isolated cases too - gnomad might already\n\t# have some periodic expansions there\n\treasonable_clusters = [c for c in clusters if len(c)>=1]\n\treturn reasonable_clusters" ]
[ "0.6388406", "0.59702134", "0.5779511", "0.57395196", "0.56252724", "0.5541956", "0.55121106", "0.543829", "0.5397814", "0.5316234", "0.5306503", "0.52857965", "0.52767396", "0.5274424", "0.5258052", "0.52311355", "0.5156326", "0.51288533", "0.51273835", "0.51248735", "0.5106845", "0.50996685", "0.5081751", "0.5081371", "0.50749433", "0.50733757", "0.5056769", "0.50565577", "0.5050043", "0.50441414" ]
0.8069033
0
Normalize the data from a series of Azure cloud API calls into a Python dict object containing very specific portions of the original data.
def normalize_data(vms, vm_statuses, nics, public_ips): normalized_data = {} for vm_id in vms: vm_data = vms[vm_id] name = vm_data['name'] nic_id = vm_data['nic_id'] nic_data = nics[nic_id] public_ip_id = nic_data['public_ip_id'] public_ip_data = public_ips[public_ip_id] public_ip = public_ip_data['address'] public_dns_name = public_ip_data['fqdn'] status = vm_statuses[vm_id] source = "Azure" instance_data = { 'public_ip': public_ip, 'public_dns_name': public_dns_name, 'status': status, 'source': source } normalized_data[name] = instance_data return normalized_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_process_raw(raw: dict) -> dict:\n api_data = raw.get(\"data\", {}).get(\"apiList\", [])\n return {api[\"id\"]: api for api in api_data}", "def parse_api_call(api_resp):\n\n data = {}\n if 'resultSets' in api_resp:\n dictionary_key = 'resultSets'\n elif 'resultSet' in api_resp:\n dictionary_key = 'resultSet'\n\n if isinstance(api_resp[dictionary_key], list):\n for result_set in api_resp[dictionary_key]:\n headers = result_set['headers']\n if len(headers) > 0:\n if isinstance(headers[0], dict):\n add_on = headers[0]['columnNames']\n keep_header = headers[1]['columnNames']\n col_ind = 0\n col_count = 0\n add_count = 0\n for col_name in keep_header:\n col_count += 1\n if col_count <= 5:\n continue\n else:\n keep_header[col_count] += '_' + add_on[col_ind]\n add_count += 1\n if add_count == 2:\n add_count = 0\n col_ind = 1\n headers = keep_header\n values = result_set['rowSet']\n name = result_set['name']\n data[name] = [dict(zip(headers, value)) \n for value in values]\n else:\n result_set = api_resp[dictionary_key]\n headers = result_set['headers']\n if isinstance(headers[0], dict):\n add_on = headers[0]['columnNames']\n keep_header = headers[1]['columnNames']\n col_ind = 0\n col_count = -1\n add_count = 0\n for col_name in keep_header:\n col_count += 1\n if col_count <= 4:\n continue\n else:\n keep_header[col_count] += '_' + add_on[col_ind].replace(' ', '_')\n add_count += 1\n if add_count == 3:\n add_count = 0\n col_ind += 1\n headers = keep_header\n \n values = result_set['rowSet']\n name = result_set['name']\n data[name] = [dict(zip(headers, value)) \n for value in values]\n\n return data", "def _parse_api_base_data (self, netflix_page_data):\n api_data = {};\n important_fields = [\n 'API_BASE_URL',\n 'API_ROOT',\n 'BUILD_IDENTIFIER',\n 'ICHNAEA_ROOT'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n api_data.update({important_field: netflix_page_data.get(important_field, '')})\n return api_data\n\n for item in netflix_page_data:\n if 'serverDefs' in dict(item).keys():\n for important_field in important_fields:\n api_data.update({important_field: item['serverDefs']['data'][important_field]})\n return api_data", "def massage_api_response(api_data):\n return_dict = defaultdict(list)\n legs = api_data['legs'][0]\n\n return_dict['start_address'].append(legs['start_address'])\n return_dict['end_address'].append(legs['end_address'])\n return_dict['distance'].append(legs['distance']['text'])\n return_dict['duration'].append(legs['duration']['text'])\n if 'duration_in_traffic' in legs:\n (return_dict['duration_in_traffic']\n .append(legs['duration_in_traffic']['text']))\n return_dict['travel_mode'].append(legs['steps'][0]['travel_mode'])\n\n for instruction in legs['steps']:\n (return_dict['instructions']\n .append(BeautifulSoup(instruction['html_instructions'],\n 'html.parser').get_text()))\n return_dict['step_distance'].append(instruction['distance'])\n return return_dict", "def normalize(self, response):\n normalize_data = { 'source': self.provider_name, 'photos': [] }\n raw_data = response.json()\n \n for photo in raw_data['photos']['photo']:\n current_photo = self._build_photos_url(photo)\n normalize_data['photos'].append({\n 'name': photo['title'],\n \"thumbnail\": current_photo['thumbnail'],\n \"original\": current_photo['original']\n })\n\n return normalize_data", "def _transform(self, resource_from_api):\n for (project_id, backend_services) in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id,\n 'id': backend_service.get('id'),\n 'creation_timestamp': parser.format_timestamp(\n backend_service.get('creationTimestamp'),\n self.MYSQL_DATETIME_FORMAT),\n 'name': backend_service.get('name'),\n 'description': backend_service.get('description'),\n 'affinity_cookie_ttl_sec': self._to_int(\n backend_service.get('affinityCookieTtlSec')),\n 'backends': parser.json_stringify(\n backend_service.get('backends', [])),\n 'cdn_policy': parser.json_stringify(\n backend_service.get('cdnPolicy', {})),\n 'connection_draining': parser.json_stringify(\n backend_service.get('connectionDraining', {})),\n 'enable_cdn': self._to_bool(\n backend_service.get('enableCDN')),\n 'health_checks': parser.json_stringify(\n backend_service.get('healthChecks', [])),\n 'iap': parser.json_stringify(\n backend_service.get('iap', {})),\n 'load_balancing_scheme': backend_service.get(\n 'loadBalancingScheme'),\n 'port': self._to_int(backend_service.get('port')),\n 'port_name': backend_service.get('portName'),\n 'protocol': backend_service.get('protocol'),\n 'region': backend_service.get('region'),\n 'session_affinity': backend_service.get(\n 'sessionAffinity'),\n 'timeout_sec': backend_service.get('timeoutSec'),\n 'raw_backend_service':\n parser.json_stringify(backend_service)}", "def consolidate_data(data):\n out = list()\n ## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]}\n for cat,shows in data.items():\n for show in shows:\n show['category'] = cat.title()\n out.append(show)\n return out", "def _prepare_data(\n self,\n request_data: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Any]:\n if request_data is None:\n request_data = {}\n request_data['page.rows'] = self._rows_in_page\n if self._current_row:\n request_data['page.number'] = \\\n self._current_row // self._rows_in_page + 1\n else:\n # Page number starts from 0\n page_number = self._min_row // self._rows_in_page\n # But for request page number starts from 1\n request_data['page.number'] = page_number + 1\n self._current_row = self._rows_in_page * page_number\n return request_data", "def process_data(data):\n info = {\n 'cities': [],\n 'temperatures': [],\n 'humidities': [],\n }\n cities = data['list']\n for city in cities:\n main_data = city['main']\n info['cities'].append(city['name'])\n info['temperatures'].append(main_data['temp'])\n info['humidities'].append(main_data['humidity'])\n\n return info", "def getInitialData(nsmapi):\r\n # Done 6-1-2020\r\n # TODO extract ids not using the regex?\r\n initData = {}\r\n\r\n url = f\"/healthcheck\"\r\n print(\"Running basic healthcheck\")\r\n healthcheckData = nsmapi.call(url, method=\"PUT\", message='{\"id\":[\"default\"]}')\r\n initData[\"healthcheck\"] = healthcheckData\r\n\r\n for i in range(20):\r\n print(f\".\", end=\"\", flush=True)\r\n time.sleep(.5)\r\n print(\"\")\r\n\r\n print(\"Getting initial sensor data\")\r\n url = \"/sensors\"\r\n basicData = json.dumps(nsmapi.call(url))\r\n dataType = url[1:].replace(\"/\", \"_\")\r\n initData[dataType] = []\r\n for id in re.findall(\"\\\"sensorId\\\":.*?, \\\"name\\\":.*?,\", basicData):\r\n if id[-1] == \",\":\r\n id = id[:-1]\r\n id = id.replace(\"\\\"\", \"\")\r\n id = id.replace(\": \", \":\")\r\n num, name = id.split(\",\")\r\n num = num.split(\":\")[-1]\r\n name = name.split(\":\")[-1]\r\n idName = f\"{num},{name}\"\r\n initData[dataType].append(idName)\r\n\r\n print(\"Getting initial domain data\")\r\n url = \"/domain\"\r\n basicData = json.dumps(nsmapi.call(url))\r\n dataType = url[1:].replace(\"/\", \"_\")\r\n initData[dataType] = []\r\n for id in re.findall(\"\\\"id\\\":.*?, \\\"name\\\":.*?,\", basicData):\r\n if id[-1] == \",\":\r\n id = id[:-1]\r\n id = id.replace(\"\\\"\", \"\")\r\n id = id.replace(\": \", \":\")\r\n num, name = id.split(\",\")\r\n num = num.split(\":\")[-1]\r\n name = name.split(\":\")[-1]\r\n idName = f\"{num},{name}\"\r\n initData[dataType].append(idName)\r\n\r\n policyURLs = [\r\n \"/domain/{domainId}/ipspolicies\",\r\n \"/domain/{domainId}/firewallpolicy\",\r\n \"/domain/{domainId}/connectionlimitingpolicies\",\r\n \"/domain/{domainId}/qospolicy\",\r\n \"/protectionoptionspolicy\",\r\n \"/domain/{domainId}/malwarepolicy\",\r\n \"/domain/{domainId}/policygroups\"\r\n ]\r\n\r\n print(\"Getting initial policy data\")\r\n initData[\"policy\"] = {}\r\n for domain in initData[\"domain\"]:\r\n domainId, domainName = domain.split(\",\")\r\n initData[\"policy\"][domainId] = {}\r\n for url in policyURLs:\r\n url = url.replace(\"{domainId}\", domainId)\r\n policyData = nsmapi.call(url)\r\n key = list(policyData.keys())[0]\r\n policyType = url.split(\"/\")[-1].replace(\"policy\", \"\").replace(\"policies\", \"\")\r\n initData[\"policy\"][domainId][policyType] = []\r\n for policy in policyData[key]:\r\n policy = json.dumps(policy)\r\n # pattern = \"\\\"([^\\\"]*?)(id|ID|iD|Id){0,1}(name){0,1}\\\": (.*?),\" - don't seem to work\r\n # extracted = re.findall(pattern, policy) - don'tens seem to works\r\n # initData[\"policy\"][domainId][policyType][\"full\"] = policy\r\n for polK, polV in json.loads(policy).items():\r\n if \"omain\" not in polK.lower():\r\n if \"name\" in polK.lower():\r\n name = polV\r\n elif \"id\" in polK.lower():\r\n id = polV\r\n initData[\"policy\"][domainId][policyType].append((id,name))\r\n\r\n print(\"Got Initial Data\")\r\n\r\n return initData", "def normalize_server_list_json(server_list):\n myservers = dict()\n global most_fields\n #most_fields = dict()\n #most_fields = {'none': 0} # too lazy to make complex condition\n\n for server in server_list:\n \"\"\"\n Iterate over servers and cherry pick wanted variables/data\n \"\"\"\n myservers[server['name']] = {\n \"name\": server['name'],\n \"flavor_id\": server['flavor']['id'],\n \"flavor_name\": str(server['flavor']['name']),\n \"image_id\": server['image']['id'],\n \"region_name\": server['location']['region_name'],\n \"project_id\": server['location']['project']['id'],\n \"access_ip4\": server['accessIPv4'],\n \"access_ip6\": server['accessIPv6'],\n \"interface_ip4\": server['interface_ip'],\n \"created_at\": server['created_at'],\n \"updated_at\": server['updated'],\n \"terminated_at\": server['terminated_at'],\n \"status\": server['status'],\n \"power_state\": server['power_state'],\n \"provider_ip_zone\": server['RAX-PUBLIC-IP-ZONE-ID:publicIPZoneId'],\n \"host_id\": server['host_id'],\n \"id\": server['id'],\n \"tenant_id\": server['tenant_id']\n }\n\n # @TODO: move this to function add checks when some fields are missing\n if len(server['volumes']) > 0:\n i = 0\n for vol in server['volumes']:\n myservers[server['name']].update({\n \"vol\" + str(i) + '_id': vol['id'],\n \"vol\" + str(i) + '_name': vol['name'],\n \"vol\" + str(i) + '_status': vol['status'],\n \"vol\" + str(i) + '_size': vol['size'],\n \"vol\" + str(i) + '_created_at': vol['created_at'],\n \"vol\" + str(i) + '_updated_at': vol['updated_at'],\n \"vol\" + str(i) + '_type': vol['volume_type'],\n \"vol\" + str(i) + '_device': vol['device'],\n \"vol\" + str(i) + '_storage_node': vol['metadata']['storage-node'],\n #\"vol\" + str(i) + '_storage_mode': vol['metadata']['attached_mode'],\n \"vol\" + str(i) + '_server_id': vol['attachments'][0]['server_id'],\n \"vol\" + str(i) + '_attachment_id': vol['attachments'][0]['attachment_id'],\n \"vol\" + str(i) + '_host_name': vol['attachments'][0]['host_name'],\n \"vol\" + str(i) + '_volume_id': vol['attachments'][0]['volume_id'],\n \"vol\" + str(i) + '_az': vol['availability_zone']\n })\n i = i + 1\n\n else:\n myservers[server['name']].update({\n \"additional_storage\": 0\n })\n\n if int(len(myservers[server['name']])) > int(list(most_fields.values())[-1]):\n most_fields = dict()\n most_fields[server['name']] = int(len(myservers[server['name']]))\n\n # @TODO: add iteration via server['metadata'] when len > 0\n # @TODO: add iteration via server['properties'] when len > 0\n # @TODO: add iteration via server['addresses'] and dynamically add 'networks - Galaxy, public, private ..'\n\n return myservers", "def _normalize_data_to_send(info):\n # Remove the parts of the data that are unbounded in size.\n info = copy.deepcopy(info)\n for key in ['model_config', 'epoch_history']:\n if key in info:\n del info[key]\n return info", "def _process_subtask_rets(subtask_rets):\n ret = {}\n for key, val in subtask_rets.items():\n label, uuid = key.rsplit('_', 1)\n if label != 'wrapper':\n ret[uuid] = dict(val,\n wrapper=subtask_rets['wrapper_%s' % uuid])\n return ret", "def post_process(cls, data):\n request_string = data['request']\n request_pattern = r'(?P<http_method>GET|HEAD|POST) (?P<url>\\S+)'\n m = re.search(request_pattern, request_string)\n if m:\n newdata = m.groupdict()\n data.update(newdata)\n\n # If the upstream response was '-' then Nginx bailed out and didn't wait\n # Assume it's some high value\n if data['upstream_response_time'] == '-':\n data['upstream_response_time'] = '90'\n\n # Convert the times to floats\n for time_label in ['request_time', 'upstream_response_time']:\n data[time_label] = float(data[time_label])\n\n return data", "def post_process(data):\n for record in data[\"Records\"]:\n for name, value in record.items():\n if type(value) == list:\n newlist = []\n for entry in value:\n newlist.append(post_process_pair(name, entry))\n record[name] = newlist\n else:\n record[name] = post_process_pair(name, value)", "def map_hits_to_api_spec(es_res):\n hits = es_res['hits']['hits']\n clips = []\n for hit in hits:\n clip = copy(hit['_source'])\n clip['uri'] = hit['_id']\n clip['masterBrand'] = clip['masterBrand']['mid'] if clip['masterBrand'] is not None else None\n genres = {'topLevel': [], 'secondLevel': [], 'thirdLevel': []}\n genre_mapping = {\n 0: 'topLevel',\n 1: 'secondLevel',\n 2: 'thirdLevel'\n }\n for genre in clip['genres']:\n genres[genre_mapping[genre['level']]].append({\n 'uri': genre['uri'],\n 'label': genre['label'],\n 'key': genre['key']\n })\n clip['genres'] = genres\n clip['publicationDate'] = clip['releaseDate']\n clips.append(clip)\n\n fields_to_keep = ['pid', 'uri', 'mediaType', 'duration', 'masterBrand', 'genres', 'image', 'title',\n 'publicationDate', 'version']\n hits = [{k: v for k, v in clip.items() if k in fields_to_keep} for clip in clips]\n return hits", "def _postprocess(self, responses):\n for idx, response in enumerate(responses):\n responses[idx] = {'id': response[0],\n 'text': self.target_test[response[0]]}\n\n for jdx, score in enumerate(response[1:]):\n responses[idx]['score_' + str(jdx)] = response[1:][jdx]\n\n return responses", "def data_record_to_api_status(data):\r\n\tdata_web_view = data_record_to_web_view(data)\r\n\tdata = {\r\n\t\t\"request_id\": data[\"request_id\"],\r\n\t\t\"url\": data[\"url\"],\r\n\t\t\"status\": data_web_view[\"status\"],\r\n\t\t\"screenshot_available\": data_web_view[\"screenshot_available\"],\r\n\t\t\"proof_available\": data_web_view[\"proof_available\"],\r\n\t\t\"timestamp\": data[\"timestamp\"],\r\n\t\t\"prune_timestamp\": data[\"prune_timestamp\"]\r\n\t}\r\n\treturn data", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def parse_data( self ):\n self.parsed_data = dict( self.results )", "def read_data(raw_data: list):\r\n cleaned_data = {}\r\n for data_item in raw_data:\r\n clean_data_item = pre_process_string_data(data_item)\r\n if clean_data_item is not None:\r\n cleaned_data[clean_data_item['_id']] = clean_data_item\r\n return cleaned_data", "def _separate_raw_data(self, raw_data):\n for key, value in raw_data.items():\n if type(value) == dict:\n self.data_dict[key] = value\n elif type(value) == list:\n self.data_list[key] = value", "def get_json(self):\n data = {}\n data['ip'] = self.ip\n\n try:\n data['country'] = self.processedvtdata[\"country\"]\n except KeyError:\n data['country'] = 'None'\n try:\n data['as'] = self.processedvtdata[\"as_owner\"]\n except KeyError:\n data['as'] = 'None'\n try:\n data['rdns'] = self.processedvtdata[\"self.reversedns\"]\n except KeyError:\n data['rdns'] = 'None'\n try:\n data['label'] = self.expertlabel\n except AttributeError:\n data['label'] = ''\n\n # geodata\n #{\"status\":\"success\",\"country\":\"Yemen\",\"countryCode\":\"YE\",\"region\":\"SA\",\"regionName\":\"Amanat Alasimah\",\"city\":\"Sanaa\",\"zip\":\"\",\"lat\":15.3522,\"lon\":44.2095,\"timezone\":\"Asia/Aden\",\"isp\":\"Public Telecommunication Corporation\",\"org\":\"YemenNet\",\"as\":\"AS30873 Public Telecommunication Corporation\",\"query\":\"134.35.218.63\"}\n if self.geodata:\n data['geodata'] = self.geodata\n \n # vt resolutions. Is a list\n data['vt'] = {}\n try:\n if self.processedvtdata['resolutions'] != 'None':\n data['vt']['resolutions'] = []\n for count, resolution_tuple in enumerate(self.processedvtdata['resolutions']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = resolution_tuple[0]\n temp['domain'] = resolution_tuple[1]\n data['vt']['resolutions'].append(temp)\n except KeyError:\n pass\n\n # vt urls. Is a list\n try:\n if self.processedvtdata['detected_urls'] != 'None':\n data['vt']['detected_urls'] = []\n for count, url_tuple in enumerate(self.processedvtdata['detected_urls']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = url_tuple[0]\n temp['url'] = url_tuple[1][0]\n temp['detections'] = str(url_tuple[1][1]) + '/' + str(url_tuple[1][2])\n data['vt']['detected_urls'].append(temp)\n except KeyError:\n pass\n\n\n # vt detected communicating samples. Is a list\n try:\n if self.processedvtdata['detected_communicating_samples'] != 'None':\n data['vt']['detected_communicating_samples'] = []\n for count, communcating_tuple in enumerate(self.processedvtdata['detected_communicating_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = communcating_tuple[0]\n temp['detections'] = str(communcating_tuple[1][0]) + '/' + str(communcating_tuple[1][1])\n temp['sha256'] = communcating_tuple[1][2]\n data['vt']['detected_communicating_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt detected downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_downloaded_samples'] != 'None':\n data['vt']['detected_downloaded_samples'] = []\n for count, detected_tuple in enumerate(self.processedvtdata['detected_downloaded_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = detected_tuple[0]\n temp['detections'] = str(detected_tuple[1][0]) + '/' + str(detected_tuple[1][1])\n temp['sha256'] = detected_tuple[1][2]\n data['vt']['detected_downloaded_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt referrer downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_referrer_samples'] != 'None':\n data['vt']['detected_referrer_samples'] = []\n for count, referrer_tuple in enumerate(self.processedvtdata['detected_referrer_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['sha256'] = referrer_tuple[0]\n temp['detections'] = str(referrer_tuple[1][0]) + '/' + str(referrer_tuple[1][1])\n data['vt']['detected_referrer_samples'].append(temp)\n except AttributeError:\n pass\n\n # pt data\n data['pt'] = {}\n if self.processedptdata:\n count = 0\n data['pt']['passive_dns'] = []\n for result in self.processedptdata_results:\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['lastseen'] = result[0]\n temp['firstseen'] = result[1][0]\n temp['hostname'] = result[1][1]\n data['pt']['passive_dns'].append(temp)\n count += 1\n\n # shodan data\n try:\n if self.shodandata:\n data['shodan'] = self.shodandata\n except AttributeError:\n pass\n\n data = json.dumps(data)\n return data", "def postprocess(self, inference_output):\n ret = []\n quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # for each request\n for inference_output_request in inference_output:\n ret_request = []\n # for each time series\n for i in inference_output_request:\n l = {}\n l[\"item_id\"] = i.item_id\n l[\"quantiles\"] = {}\n for q in quantiles:\n l[\"quantiles\"][str(q)] = i.quantile(q).tolist()\n l[\"mean\"] = i.mean.tolist()\n ret_request.append(json.dumps(l))\n ret.append('\\n'.join(ret_request) + '\\n')\n return ret", "def get_master_data():\n data = RAW_RESPONSE.json()\n STATES = set()\n CITIES = set()\n DISTRICTS = set()\n for each in data[\"raw_data\"]:\n\n if each[\"detectedstate\"]!='':\n\n RAW_STATES.add(each[\"detectedstate\"])\n STATES.add(each[\"detectedstate\"].lower().strip().replace(\" \",\"\"))\n if each[\"detecteddistrict\"]!='':\n RAW_DISTRICTS.add(each[\"detecteddistrict\"])\n DISTRICTS.add(each[\"detecteddistrict\"].lower().strip().replace(\" \",\"\"))\n if each[\"detectedcity\"]!='':\n RAW_CITIES.add(each[\"detectedcity\"])\n CITIES.add(each[\"detectedcity\"].lower().strip().replace(\" \",\"\"))\n STATES = list(filter(None, STATES))\n DISTRICTS = list(filter(None, DISTRICTS))\n CITIES = list(filter(None, CITIES))\n\n return STATES, DISTRICTS, CITIES", "def process_data(data):\n # set up an OrderedDict to hold the data with initial data set to 0\n output = OrderedDict((s, {'calls': [], 'out_count': 0, 'out_dur':\n datetime.timedelta(seconds=0), 'in_count': 0, 'in_dur':\n datetime.timedelta(seconds=0), 'total_dur':\n datetime.timedelta(seconds=0)}) for s in config.SALESPEOPLE)\n for d in data:\n # assume it's an outgoing call.\n call_type = 'outgoing'\n key = d['From']\n tofrom = '{0} {1}'.format(d['To CID Name'], d['To CID Number'])\n if not tofrom.strip():\n tofrom = d['Digits'].strip() or d['To']\n out_count, in_count = 1, 0\n dur_h, dur_m, dur_s = map(int, d['Duration'].split(':'))\n duration = datetime.timedelta(hours=dur_h, minutes=dur_m,\n seconds=dur_s)\n out_dur = duration\n in_dur = datetime.timedelta(seconds=0)\n if key not in config.SALESPEOPLE:\n # it's an incoming call if the From name isn't one of the\n # config.SALESPEOPLE. Adjust the data accordingly\n call_type = 'incoming'\n key = d['To']\n tofrom = '{0} {1}'.format(d['From CID Name'], d['From CID Number'])\n if not tofrom.strip():\n tofrom = d['To']\n out_count, in_count = 0, 1\n out_dur = datetime.timedelta(seconds=0)\n in_dur = duration\n\n # format the phone numbers\n tofrom = re.sub(r'1?(\\d{3})(\\d{3})(\\d{4})$', r'\\1-\\2-\\3', tofrom)\n\n output[key]['calls'].append({'time': d['Start Time'], 'type':\n call_type, 'duration': d['Duration'], 'tofrom': tofrom})\n output[key]['out_count'] = output[key]['out_count'] + out_count\n output[key]['out_dur'] = output[key]['out_dur'] + out_dur\n output[key]['in_count'] = output[key]['in_count'] + in_count\n output[key]['in_dur'] = output[key]['in_dur'] + in_dur\n output[key]['total_dur'] = output[key]['total_dur'] + duration\n\n return output", "def _data_normalization(data: list) -> list:\n\n return [ \n [\n d[0], # IP\n d[1], # Port\n Froxy._split_proxy_info(\n d[2].strip(' ') # Proxy Info\n ) \n ]\n for d in data\n ]", "def _process_dict(data):\n new_dict = {}\n for key in data.keys():\n\tnew_dict['name'] = data['printerName']\n #new_dict[key] = data[key]\n\n #FIGURE OUT AND UPDATE PRINTER STATUS\n status = BUSY_STATUS\n error_msg = \"\"\n if \"FrontPanelMessage\" in data:\n if data[\"FrontPanelMessage\"].lower() in READY_MESSAGES:\n status = READY_STATUS\n elif \"error\" in data[\"FrontPanelMessage\"].lower():\n status = ERROR_STATUS\n error_msg = \"general error\"\n \n if \"TonerStatus\" in data:\n if data[\"TonerStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Toner Error\"\n #if len(new_dict[\"TonerStatus\"]) > 4:\n #new_dict[\"TonerStatus\"] = new_dict[\"TonerStatus\"][4:]\n\n if \"PaperStatus\" in data:\n if data[\"PaperStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Status Error\"\n elif data[\"PaperStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Out of Paper\"\n #if len(new_dict[\"PaperStatus\"]) > 4:\n #new_dict[\"PaperStatus\"] = new_dict[\"PaperStatus\"][4:]\n\n if \"PaperJamStatus\" in data:\n if data[\"PaperJamStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Jam\"\n #if len(new_dict[\"PaperJamStatus\"]) > 4:\n #new_dict[\"PaperJamStatus\"] = new_dict[\"PaperJamStatus\"][4:]\n\n new_dict[\"status\"] = status\n new_dict[\"error_msg\"] = error_msg\n new_dict[\"location\"] = PRINTERS[new_dict[\"name\"]][0]\n new_dict[\"building_name\"] = PRINTERS[new_dict[\"name\"]][1]\n new_dict[\"latitude\"] = PRINTERS[new_dict[\"name\"]][2]\n new_dict[\"longitude\"] = PRINTERS[new_dict[\"name\"]][3]\n new_dict[\"atResidence\"] = PRINTERS[new_dict[\"name\"]][4]\n return new_dict", "def post_process(self, res):\n # some lists are better converted to numpy arrays\n convert_to_arr = (\n 'prediction_rank',\n 'cumulative_area',\n 'prediction_values',\n 'cumulative_crime',\n 'cumulative_crime_count',\n 'cumulative_crime_max',\n 'pai'\n )\n for k in convert_to_arr:\n if k in res:\n # this allows for optional components such as prediction values\n res[k] = np.array(res[k])", "def collect_response(response_lines):\n response_dict = {}\n for entry in filter(None, response_lines):\n prefix = entry[0]\n if prefix in response_dict:\n response_dict[prefix] += [entry]\n else:\n response_dict[prefix] = [entry]\n return response_dict" ]
[ "0.65975904", "0.6409738", "0.6102891", "0.60573673", "0.59634995", "0.566204", "0.54953855", "0.5486656", "0.54791516", "0.54694664", "0.54640585", "0.5420058", "0.5407419", "0.5402164", "0.53785086", "0.53610575", "0.53552586", "0.5355134", "0.5346608", "0.533809", "0.53360784", "0.5335487", "0.53354204", "0.5334851", "0.5325961", "0.5315716", "0.52924776", "0.52770966", "0.5276585", "0.5260157" ]
0.6871206
0
Create user with {APR1} password and check that user can login.
def test_auth_with_apr1_stored_password(self): # Create test user name = u'Test User' # generated with "htpasswd -nbm blaze 12345" password = '{APR1}$apr1$NG3VoiU5$PSpHT6tV0ZMKkSZ71E3qg.' # 12345 self.createUser(name, password, True) # Try to "login" theuser = user.User(self.request, name=name, password='12345') assert theuser.valid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(email, password, f_name, l_name):\n pass", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def user(name, password, **kwargs):\n if not user_exists(name, **kwargs):\n create_user(name, password, **kwargs)", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_user(username, password):\n if not validate_username(username):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(username):\n return \"käyttäjänimi on jo käytössä\"\n if not validate_password(password):\n return \"salasana on väärää muotoa\"\n sql = \"INSERT INTO users (username, password, user_group, is_active) \" \\\n \"VALUES (:username, :password, 'basic', TRUE)\"\n password_hash = generate_password_hash(password)\n db.session.execute(sql, {\"username\": username, \"password\": password_hash})\n db.session.commit()\n return \"ok\"", "def create_user(username,password):\n return User.objects.create_user(username=username,password=password)", "def test_create_user(self):\n email = '[email protected]'\n password = 'testPass'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertEqual(user.role, Role.PLAYER)\n self.assertTrue(user.check_password(password))\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_staff)", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_user():\n body = request.json\n username = body.get('username')\n password = body.get('password')\n validation = validate_user(username, password)\n password = md5(password.encode('utf-8')).hexdigest()\n if validation != \"OK\":\n return HTTPResponse(status=500, body={\"message\":validation})\n try:\n with db.atomic():\n user = User.create(username=username, password=password)\n user.save()\n ret = json.dumps({'message':'user created'})\n return HTTPResponse(status=200, body=ret)\n except IntegrityError:\n ret = json.dumps({'message':'user already exists'})\n return HTTPResponse(status=500, body=ret)", "def create(self, validated_data):\n username = validated_data['username']\n password = validated_data['password']\n\n if len(username) > 5 and len(password) > 5:\n newUser = User.objects.create_user(**validated_data) # username=username,password=password\n return newUser\n else:\n return 'error' # not a valid error will need changing ", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def create_user():\n print('Creating user ...')\n client = MongoClient(db_uri)\n try:\n client[database_].command(\"createUser\", os.environ['MONGODB_USERNAME'], pwd=os.environ['MONGODB_PASSWORD'],\n roles=[{'role': 'readWrite', 'db': database_}])\n except OperationFailure:\n print('User already exists')\n return\n print('User created')", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def create_and_login(self):\n with self.context():\n user = self.factory(meido.factories.UserFactory)\n self.client.post('/management/login', data={\n 'username': 'admin', 'password': 'pretender'\n })", "async def create_user(self, **kwargs):\n\n username = kwargs[\"Login\"]\n password = hash_password(kwargs[\"Password\"])\n\n if await self.request.app.manager.count(\n User.select().where(User.username == username)):\n\n return {\"Type\": \"registration\", \"Status\": \"user exist\"}\n\n user = await self.request.app.manager.create(User,\n username=username,\n password=password)\n self.request.user = user\n await self._login_user(user)\n await add_active_sockets(self.request)\n await create_instance(self.request)\n\n return {\"Type\": \"registration\", \"Status\": \"success\"}", "def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def sample_user(username='arturbartecki', password='testpassword'):\n return get_user_model().objects.create_user(username, password)", "def test_create_valid_user_success(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_201_CREATED\n user = get_user_model().objects.get(**res.data)\n assert user.check_password(payload['password'])\n assert 'password' not in res.data", "def create_user(ctx, db_username, db_password, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n user = cmd.ensure_admin_user(\n client=ctx.obj, project_id=project.id, username=db_username,\n password=db_password)\n pprint(user)", "def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200", "def new_user():\n\n username = request.json['username']\n if len(username) < 4:\n return '1'\n ds = \"'\\\\\\\"%}{\"\n for i in ds:\n if i in username:\n return '1'\n rem = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n email = request.json['email']\n if re.fullmatch(rem, email) == None:\n return '1'\n password = request.json['password']\n if len(password) != 64:\n return '1'\n _ = db.register(username, email, generate_password_hash(password))\n if _ == 0:\n return '0'\n else:\n return '2'", "def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'name': 'TestName'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n \n user = get_user_model().objects.get(**res.data)\n \n self.assertTrue(user.check_password(payload['[email protected]', \n 'testpass']))\n self.assertNotIn('testpass', res.data)", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)", "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')", "def test_create_user_whit_email_successfull(self):\n email = '[email protected]'\n password = 'pass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def sample_user(email='[email protected]', password='open@123'):\n return get_user_model().objects.create_user(email, password)" ]
[ "0.73004067", "0.7224032", "0.71962816", "0.70947254", "0.70152885", "0.69930893", "0.6973203", "0.69717765", "0.69700277", "0.69580203", "0.69015574", "0.6899521", "0.6899521", "0.6899521", "0.6867215", "0.68664825", "0.6848039", "0.68402165", "0.6837405", "0.68344754", "0.68216133", "0.6784681", "0.67828804", "0.6773983", "0.67737174", "0.67602605", "0.6759999", "0.6756348", "0.6748405", "0.6737872" ]
0.7594792
0
Create user with {MD5} password and check that user can login.
def test_auth_with_md5_stored_password(self): # Create test user name = u'Test User' password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345 self.createUser(name, password, True) # Try to "login" theuser = user.User(self.request, name=name, password='12345') assert theuser.valid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_user(username, hash_password):\n try:\n conn = sqlite3.connect(DATABASE)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO user\n (Username, Password)\n values\n (?, ?)'''\n cursor.execute(query, (username, hash_password))\n conn.commit()\n cursor.close()\n return True\n except:\n return False", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def test_create_user_with_successful_email(self):\n email = '[email protected]'\n password = 'userpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"Testpass123\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_whit_email_successfull(self):\n email = '[email protected]'\n password = 'pass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def create_user(email, password, f_name, l_name):\n pass", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"Testpass123\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_succesful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_on_email(self):\n email = \"[email protected]\"\n password = \"Test12345\"\n\n user = get_user_model().objects.create_user(\n email=email, password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_and_password(self):\n\n email = '[email protected]'\n password = 'otuonye'\n\n user = get_user_model().objects.create_user(\n email=email, password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Test1234'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successfull(self):\n email = '[email protected]'\n password = 'fghdjdkri'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def user(name, password, **kwargs):\n if not user_exists(name, **kwargs):\n create_user(name, password, **kwargs)", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"test_password_123\"\n\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)", "def test_create_user_with_email(self):\n email = '[email protected]'\n password = 'TestPass0244'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"loremIpsumDolor\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password,\n first_name=\"Hakan\",\n last_name=\"Yalcinkaya\",\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n\n email = '[email protected]'\n password = 'Password123'\n user = get_user_model().objects.create_user(email=email,\n password=password)\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'test123456'\n user = get_user_model().objects.create_user(email=email, password=password)\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_success(self):\n email = '[email protected]'\n password = 'azerty'\n user = get_user_model().objects.create_user(\n email = email,\n password = password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_succesful(self):\n email = '[email protected]'\n password = 'Testnew@11'\n user = get_user_model().objects.create_user(\n email=email, password=password)\n\n self.assertEqual(email, user.email)\n self.assertTrue(user.check_password(password))", "def test_create_user(self):\n email = '[email protected]'\n password = 'testPass'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertEqual(user.role, Role.PLAYER)\n self.assertTrue(user.check_password(password))\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_staff)", "def create(self, validated_data):\n username = validated_data['username']\n password = validated_data['password']\n\n if len(username) > 5 and len(password) > 5:\n newUser = User.objects.create_user(**validated_data) # username=username,password=password\n return newUser\n else:\n return 'error' # not a valid error will need changing ", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_create_valid_user(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check that the object has actually been created properly.\n user = get_user_model().objects.get(**response.data)\n self.assertTrue(user.check_password(credentials['password']))\n\n # Check that the HTTP response does not include the password.\n self.assertNotIn('password', response.data)" ]
[ "0.7318656", "0.73062855", "0.7287217", "0.72614634", "0.7258231", "0.72305536", "0.7217494", "0.7212634", "0.7208353", "0.7205905", "0.720523", "0.7198507", "0.7187735", "0.7183065", "0.7181787", "0.71778566", "0.71710557", "0.7155194", "0.7145005", "0.7124427", "0.71208876", "0.7119079", "0.7108233", "0.70916355", "0.7058565", "0.70358485", "0.7010442", "0.6952956", "0.6946913", "0.6935385" ]
0.8022129
0
Create user with {DES} password and check that user can login.
def test_auth_with_des_stored_password(self): # Create test user name = u'Test User' # generated with "htpasswd -nbd blaze 12345" password = '{DES}gArsfn7O5Yqfo' # 12345 self.createUser(name, password, True) try: import crypt # Try to "login" theuser = user.User(self.request, name=name, password='12345') assert theuser.valid except ImportError: py.test.skip("Platform does not provide crypt module!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_user(email, password, f_name, l_name):\n pass", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def user(name, password, **kwargs):\n if not user_exists(name, **kwargs):\n create_user(name, password, **kwargs)", "def create_new_user(username, hash_password):\n try:\n conn = sqlite3.connect(DATABASE)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO user\n (Username, Password)\n values\n (?, ?)'''\n cursor.execute(query, (username, hash_password))\n conn.commit()\n cursor.close()\n return True\n except:\n return False", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_create_user(self):\n email = '[email protected]'\n password = 'testPass'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertEqual(user.role, Role.PLAYER)\n self.assertTrue(user.check_password(password))\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_staff)", "def create_user(username,password):\n return User.objects.create_user(username=username,password=password)", "def test_create_user_whit_email_successfull(self):\n email = '[email protected]'\n password = 'pass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def test_create_user_with_successful_email(self):\n email = '[email protected]'\n password = 'userpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_and_password(self):\n\n email = '[email protected]'\n password = 'otuonye'\n\n user = get_user_model().objects.create_user(\n email=email, password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"Testpass123\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_valid_user(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check that the object has actually been created properly.\n user = get_user_model().objects.get(**response.data)\n self.assertTrue(user.check_password(credentials['password']))\n\n # Check that the HTTP response does not include the password.\n self.assertNotIn('password', response.data)", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_user_with_email_succesful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def create_user(username, password):\n if not validate_username(username):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(username):\n return \"käyttäjänimi on jo käytössä\"\n if not validate_password(password):\n return \"salasana on väärää muotoa\"\n sql = \"INSERT INTO users (username, password, user_group, is_active) \" \\\n \"VALUES (:username, :password, 'basic', TRUE)\"\n password_hash = generate_password_hash(password)\n db.session.execute(sql, {\"username\": username, \"password\": password_hash})\n db.session.commit()\n return \"ok\"", "def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'name': 'TestName'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n \n user = get_user_model().objects.get(**res.data)\n \n self.assertTrue(user.check_password(payload['[email protected]', \n 'testpass']))\n self.assertNotIn('testpass', res.data)", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"Testpass123\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_with_email_successfull(self):\n email = '[email protected]'\n password = 'fghdjdkri'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def sample_user(username='arturbartecki', password='testpassword'):\n return get_user_model().objects.create_user(username, password)", "def test_create_user_with_email_successful(self):\n email = '[email protected]'\n password = 'Testpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))" ]
[ "0.7343586", "0.73068523", "0.72953594", "0.7279533", "0.7268812", "0.7237736", "0.7235069", "0.72157264", "0.72143716", "0.7140119", "0.7119481", "0.71022165", "0.707469", "0.7071333", "0.7054914", "0.70465577", "0.7039468", "0.70377856", "0.70342153", "0.7029694", "0.70166934", "0.7011971", "0.69970894", "0.69929796", "0.698996", "0.69898367", "0.6986681", "0.69861066", "0.6984078", "0.6981751" ]
0.746672
0
create user and then rename user and check if the old username is removed from the cache name2id
def testRenameUser(self): # Create test user name = u'__Some Name__' password = name self.createUser(name, password) # Login - this should replace the old password in the user file theUser = user.User(self.request, name=name) # Rename user theUser.name = u'__SomeName__' theUser.save() theUser = user.User(self.request, name=name, password=password) assert not theUser.exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __check_if_username_has_changed(self, input_username):\n\n user_id = get_data.get_id_by_username_from_db(input_username)\n if user_id:\n username = get_data.get_username_by_id(user_id)\n if username:\n if input_username != username:\n update_data.rename_user(user_id, username)\n helper.rename_user_dir(input_username, username)\n return username\n return input_username", "def set_username(old_name, new_name):\n if not validate_username(new_name):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(new_name):\n return \"käyttäjänimi on jo käytössä\"\n sql = \"UPDATE users \" \\\n \"SET username=:new \" \\\n \"WHERE username=:old\"\n db.session.execute(sql, {\"new\": new_name, \"old\": old_name})\n db.session.commit()\n return \"ok\"", "def update_user(username, data):\n\n hashed_username = base64.b64encode(Cryptography.hash(username).digest()).decode()\n\n with open(getcwd() + Database.__DB_FILENAME) as file_input,\\\n open(getcwd() + Database.__DB_FILENAME + '.temp', 'w') as file_output:\n for entry in file_input:\n new_entry = entry\n if entry.split(':')[0] == hashed_username:\n iv, ciphered_data = Cryptography.cipher(Cryptography.get_passphrase(), data)\n new_entry = hashed_username + ':' + ciphered_data.hex() + '.' + iv.hex() + '\\n'\n file_output.write(new_entry)\n if os.path.exists(getcwd() + Database.__DB_FILENAME):\n os.remove(getcwd() + Database.__DB_FILENAME)\n os.rename(getcwd() + Database.__DB_FILENAME + '.temp', getcwd() + Database.__DB_FILENAME)", "def create_user(change):\n return change()", "def update_user():", "def rename(self,newName):\n self.userName = newName", "def test_modify_nonexist_username(self):\n print('(' + self.test_modify_nonexist_username.__name__+')',\n self.test_modify_nonexist_username.__doc__)\n self.assertIsNone(self.connection.modify_user(\n NON_EXIST_PATIENT_USERNAME, PATIENT['public_profile'],\n PATIENT['restricted_profile']))", "def rename(ctx, query, name, force, password, remember):\n\n _init_session(ctx, password, remember)\n session = ctx.obj[\"session\"]\n creds = session.list_credentials()\n hits = _search(creds, query, True)\n if len(hits) == 0:\n click.echo(\"No matches, nothing to be done.\")\n elif len(hits) == 1:\n cred = hits[0]\n if \":\" in name:\n issuer, name = name.split(\":\", 1)\n else:\n issuer = None\n\n new_id = _format_cred_id(issuer, name, cred.oath_type, cred.period)\n if any(cred.id == new_id for cred in creds):\n raise CliFail(\n f\"Another account with ID {new_id.decode()} \"\n \"already exists on this YubiKey.\"\n )\n if force or (\n click.confirm(\n f\"Rename account: {_string_id(cred)} ?\",\n default=False,\n err=True,\n )\n ):\n session.rename_credential(cred.id, name, issuer)\n click.echo(f\"Renamed {_string_id(cred)} to {new_id.decode()}.\")\n else:\n click.echo(\"Rename aborted by user.\")\n\n else:\n _error_multiple_hits(ctx, hits)", "def updateProfile( token, user=False, userinfo={'nickname':'newUser','first_name':'newUser'}):\n \n if not user:\n l= list(validName)\n sysrand.shuffle(l)\n l= \"\".join(l)\n print \"Attempting to create a user with the name \"+l\n user=User.objects.create_user(l,'')\n user.save()\n sid = transaction.savepoint()\n updateName( user, str(userinfo['nickname']).replace(' ',''), userinfo['first_name'], sid )\n transaction.savepoint_commit(sid)\n\n try: \n userprofile = user.get_profile()\n userprofile.uid = cPickle.dumps(token) #ensures the token parameter is retreivable and unique\n userprofile.user_id = user.id\n userprofile.save()\n transaction.commit()\n except:\n transaction.rollback()\n return user", "def test_replace_user(self):\n pass", "def test_username_taken(self):\n self.datautils.create_user({'username': 'newuser'})\n self.assertEqual(1, self.session.query(User).count())\n self.request.json_body = deepcopy(self.new_account)\n result = users_post_view(self.request)['d']\n self.assertEqual(result, error_dict('verification_error',\n 'username already in use: %s' % self.new_account['username']))", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def make_unique_username(username):\n if User.query.filter_by(username = username).first() is None:\n return username\n version = 2\n new_username = None\n while True:\n new_username = username + str(version)\n if User.query.filter_by(username = new_username).first() is None:\n break\n version += 1\n return new_username", "def updateName( user, login, name, sid, postfix=0 ):\n try:\n print \"Trying to update name with login_name=\", login\n user.first_name = name\n newlogin = login\n #strip the username of any special characters, including spaces\n \n if postfix:\n newlogin=\"%s%03d\" % ( login, postfix )\n user.username = newlogin\n user.save()\n except Exception, e:\n print \"Couldn't update name, rolling back\", e\n transaction.savepoint_rollback(sid)\n updateName( user, login, name, sid, postfix+1 )", "def userRenamed(self, old, new):\n sessions = self.findSessions(old)\n for ss in sessions:\n old = old.decode(ss.encoding)\n new = new.decode(ss.encoding)\n self.sendResponse(ss.rename(old, new))", "def update_username(self, old_username, new_username):\n raise NotImplementedError()", "def create_instance_user(problem_name, instance_number):\n\n converted_name = sanitize_name(problem_name)\n username = get_username(converted_name, instance_number)\n\n try:\n # Check if the user already exists.\n user = getpwnam(username)\n new = False\n except KeyError:\n create_user(username)\n new = True\n\n return username, new", "def testUsernameAlreadyThere(self):\r\n email = '[email protected]'\r\n new_user = UserMgr.signup_user(email, u'invite')\r\n DBSession.add(new_user)\r\n\r\n transaction.commit()\r\n\r\n user = DBSession.query(User).filter(User.username == email).one()\r\n\r\n url = quote('/{0}/reset/{1}'.format(\r\n user.email,\r\n user.activation.code\r\n ))\r\n\r\n res = self.app.post(\r\n url,\r\n params={\r\n 'password': u'testing',\r\n 'username': user.username,\r\n 'code': user.activation.code,\r\n 'new_username': u'admin',\r\n })\r\n self.assertIn('Username already', res.body)", "def test_create_profile_on_access(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n profile.delete()\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def edit_user_name(self, dto):\n user_id = dto[\"user_id\"]\n user_pin = dto[\"pin\"]\n new_user_name = dto[\"new_user_name\"]\n user = self._find_user_by_id_and_pin(user_id, user_pin)\n self.validate_user_name(new_user_name)\n user[\"user_name\"] = new_user_name\n self._user_dao.save_user(user)", "def formalize_user():\n print(request.get_json())\n username = request.get_json()['username']\n passwd = username = request.get_json()['passwd']\n # Check if the user exists by comparing the username\n # this contains the registered email\n existing_user = storage.filter_by(User, 'username', username)\n if not existing_user:\n user = storage.get(User, request.user)\n user.username = username\n user.passwd = passwd\n user.save()\n return jsonify(message='Success')\n return jsonify(message='Error creating user'), 309", "def create_user(self, username, password, firstname, lastname): # create gameuser, tested\r\n conn = self.get_db()\r\n with conn:\r\n c = conn.cursor()\r\n c.execute('SELECT COUNT(*) from gameuser WHERE username=%s',(username,))\r\n n = int(c.fetchone()[0])\r\n # print 'num of rfdickersons is ' + str(n)\r\n if n == 0:\r\n hashedpass = md5.new(password).hexdigest()\r\n c.execute('INSERT INTO gameuser (username, password, firstname, lastname) VALUES (%s,%s,%s,%s)', \r\n (username, hashedpass, firstname, lastname))\r\n conn.commit()\r\n # return True\r\n else:\r\n # return False\r\n raise UserAlreadyExistsException('{} user already exists'.format((username)) )", "def test_admin_cannot_create_users_with_same_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'This name is already registered!')\n self.assertEqual(resp.status_code, 400)", "def change_username(self, name):\n self.username = name", "def generate_username():\n while True:\n new_user_id = \"user-\" + str(random.randrange(10000000, 99999999))\n try:\n user = DjangoDev.objects.get(username=new_user_id)\n except DjangoDev.DoesNotExist:\n return new_user_id", "def test_admin_cannot_create_users_with_same_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Paul Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This username is already taken!')\n self.assertEqual(resp.status_code, 400)", "def change_username():\n if request.method == 'POST':\n username = get_username()\n new_username = request.form['change_username']\n user_id = get_id_from_username(username)\n #TODO: Error handling on database writes lol\n change_username_from_id(user_id, new_username )\n return redirect(url_for('users.account_page', username=new_username))", "def test_username_in_use(self):\n self.request.json_body = {'username': 'testuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('verification_error', 'username already in use: testuser'))", "def test_create_with_username(self):\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), '[email protected]')\n\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=False)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck')" ]
[ "0.6498411", "0.6445537", "0.64272594", "0.63668567", "0.62813914", "0.6276819", "0.62515754", "0.62395614", "0.6230753", "0.61900216", "0.6189337", "0.6094322", "0.6059636", "0.604635", "0.6021056", "0.5997202", "0.5993181", "0.59726524", "0.597109", "0.5961523", "0.59597486", "0.59297496", "0.592642", "0.5924449", "0.59237134", "0.59142554", "0.59077424", "0.59040195", "0.58884937", "0.5872745" ]
0.7157386
0
Create user with {SHA} password and check that logging in upgrades to {SSHA}.
def test_upgrade_password_from_sha_to_ssha(self): name = u'/no such user/' password = '{SHA}jLIjfQZ5yojbZGTqxg2pY0VROWQ=' # 12345 self.createUser(name, password, True) # User is not required to be valid theuser = user.User(self.request, name=name, password='12345') assert theuser.enc_password[:6] == '{SSHA}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_password_from_apr1_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbm blaze 12345\"\n password = '{APR1}$apr1$NG3VoiU5$PSpHT6tV0ZMKkSZ71E3qg.' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_md5_to_ssha(self):\n # Create test user\n name = u'Test User'\n password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_des_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbd blaze 12345\"\n password = '{DES}gArsfn7O5Yqfo' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def create_new_user(username, hash_password):\n try:\n conn = sqlite3.connect(DATABASE)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO user\n (Username, Password)\n values\n (?, ?)'''\n cursor.execute(query, (username, hash_password))\n conn.commit()\n cursor.close()\n return True\n except:\n return False", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_user(email, password, f_name, l_name):\n pass", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def add_user(self, username, password): #WORKS\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO users VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def add_check(self, username, password):\n can_login, msg = self.db.add_user(username, password)\n self.send_message(str(can_login) + DOLLAR + msg)", "def check_db(args):\n # Open connection to the registered users db\n base_path = \"pypackage\"\n users_db = \"openaq_users.db\"\n conn = sqlite3.connect(os.path.join(base_path, users_db))\n cursor = conn.cursor()\n\n # Check for username\n row = cursor.execute(\"SELECT * FROM user_database WHERE username = ?\",\n (args.username,))\n results = row.fetchall()\n conn.commit()\n\n if results:\n # Add salt\n salt = str(results[0][2])\n digest = salt + args.password\n\n # Compute the hash\n for i in range(1000):\n digest = hashlib.sha256(digest.encode('utf-8')).hexdigest()\n\n # Check for password\n if digest == results[0][1]:\n print('Successful log-in. Welcome {}!'.format(args.username))\n return True\n\n else:\n print(\n \"Password is invalid for user {}.\".format(args.username)\n )\n return False\n else:\n print(\"Username not present.\")\n return False", "def new_user():\n\n username = request.json['username']\n if len(username) < 4:\n return '1'\n ds = \"'\\\\\\\"%}{\"\n for i in ds:\n if i in username:\n return '1'\n rem = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n email = request.json['email']\n if re.fullmatch(rem, email) == None:\n return '1'\n password = request.json['password']\n if len(password) != 64:\n return '1'\n _ = db.register(username, email, generate_password_hash(password))\n if _ == 0:\n return '0'\n else:\n return '2'", "def create_user(username, password):\n if not validate_username(username):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(username):\n return \"käyttäjänimi on jo käytössä\"\n if not validate_password(password):\n return \"salasana on väärää muotoa\"\n sql = \"INSERT INTO users (username, password, user_group, is_active) \" \\\n \"VALUES (:username, :password, 'basic', TRUE)\"\n password_hash = generate_password_hash(password)\n db.session.execute(sql, {\"username\": username, \"password\": password_hash})\n db.session.commit()\n return \"ok\"", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def do_user_create(cs, args):\n cs.users.create(args.username, args.password, args.email, args.realname,\n args.comment)\n print(\"Create user '%s' successfully.\" % args.username)", "def create_user(username, password):\n\n user = User(username=username, password_hash=generate_password_hash(password))\n \n db.session.add(user)\n db.session.commit()\n return user", "def test_auth_with_md5_stored_password(self):\n # Create test user\n name = u'Test User'\n password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345\n self.createUser(name, password, True)\n\n # Try to \"login\"\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.valid", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def create_db_user(db_username, db_password):\n print system(\"su -c \\\"echo \\\\\\\"create user \" + db_username +\n \" with createdb encrypted password '\" + db_password + \"';\\\\\\\" | psql \\\" postgres\")", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_user():\n body = request.json\n username = body.get('username')\n password = body.get('password')\n validation = validate_user(username, password)\n password = md5(password.encode('utf-8')).hexdigest()\n if validation != \"OK\":\n return HTTPResponse(status=500, body={\"message\":validation})\n try:\n with db.atomic():\n user = User.create(username=username, password=password)\n user.save()\n ret = json.dumps({'message':'user created'})\n return HTTPResponse(status=200, body=ret)\n except IntegrityError:\n ret = json.dumps({'message':'user already exists'})\n return HTTPResponse(status=500, body=ret)", "def user(name, password, **kwargs):\n if not user_exists(name, **kwargs):\n create_user(name, password, **kwargs)", "def create_user(self, username, password):\n\t\tpassword = password.encode('utf-8')\n\t\thash_pass = bcrypt.hashpw(password, bcrypt.gensalt(12))\n\n\t\tself.users.insert_one({\"username\":username, \"password\":hash_pass})\n\t\t\n\t\treturn username", "def add_user(username, password):\n return create_user(username, password)", "def add_user(username, password):\n return create_user(username, password)", "def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()", "def add_user(username, password):\n\n db_client_maker = core_db.get_nest_users_sqla_maker()\n md = nest_db.get_global_sqlalchemy_metadata()\n engine = nest_db.get_global_sqlalchemy_engine()\n #note this is a tablelike client, not a NestUser client\n db_client = db_client_maker.get_db_client(engine, md)\n\n system_user = core_db.get_system_user()\n db_client.set_requesting_user(system_user)\n\n schema = nest_users.generate_schema()\n passlib_hash = password_hash.compute_passlib_hash(password)\n\n nu = NestUser(None, username, None, None, \n is_superuser=False, passlib_hash=passlib_hash,\n origin='nest')\n\n tle = nu.to_tablelike_entry()\n tle = db_client.create_entry(tle)\n if tle is None:\n print('FAILURE ensuring user: ' + str(username))\n success = False\n else:\n print('ensured user: ' + str(username))\n success = True\n ensure_default_project(NestUser.from_tablelike_entry(tle))\n return success" ]
[ "0.73439944", "0.7333939", "0.7085419", "0.7017456", "0.7014548", "0.69558007", "0.67983985", "0.6787436", "0.661877", "0.6514608", "0.6433441", "0.6432057", "0.6385014", "0.63845205", "0.63744473", "0.63245636", "0.6289351", "0.62403136", "0.6240243", "0.6236771", "0.62236786", "0.6202787", "0.61979395", "0.61894596", "0.6187547", "0.6186556", "0.6178708", "0.6178708", "0.61593175", "0.61513203" ]
0.808385
0
Create user with {APR1} password and check that logging in upgrades to {SSHA}.
def test_upgrade_password_from_apr1_to_ssha(self): # Create test user name = u'Test User' # generated with "htpasswd -nbm blaze 12345" password = '{APR1}$apr1$NG3VoiU5$PSpHT6tV0ZMKkSZ71E3qg.' # 12345 self.createUser(name, password, True) # User is not required to be valid theuser = user.User(self.request, name=name, password='12345') assert theuser.enc_password[:6] == '{SSHA}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createABPPMGRUSER():\n password = globs.props['ABPP_Password'] \n sqlcommand = bytes('@'+globs.props['JDA_HOME']+'\\\\config\\\\database\\\\setup\\\\cr_abpp_user '+password, 'utf-8')\n stdout, stdin = runSQLQuery(sqlcommand, globs.props['System_Username'])\n print(stdout.decode('ascii'))", "def test_auth_with_apr1_stored_password(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbm blaze 12345\"\n password = '{APR1}$apr1$NG3VoiU5$PSpHT6tV0ZMKkSZ71E3qg.' # 12345\n self.createUser(name, password, True)\n\n # Try to \"login\"\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.valid", "def test_upgrade_password_from_sha_to_ssha(self):\n name = u'/no such user/'\n password = '{SHA}jLIjfQZ5yojbZGTqxg2pY0VROWQ=' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def create_user(email, password, f_name, l_name):\n pass", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def setupUser(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', create user '%s'\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"create user %s with password '%s'\" % (userName, userPassword))", "def create_dbuser(default_db_name, superuser_name, superuser_password, db_host, db_port, poi_user_name, poi_user_password):\n try:\n logging.info(\"Connecting to database postgres as user postgres at host %s port %s\", db_host, db_port)\n connection = psycopg2.connect(dbname=default_db_name, user=superuser_name, host=db_host, password=superuser_password, port=db_port)\n\n connection.autocommit=True\n cursor = connection.cursor()\n\n logging.info(\"Creating role %s\", poi_user_name)\n query = '''CREATE ROLE %s WITH \n NOSUPERUSER\n NOCREATEDB\n NOCREATEROLE\n NOINHERIT\n LOGIN\n CONNECTION LIMIT -1\n ENCRYPTED PASSWORD %s'''\n params = (AsIs(poi_user_name), poi_user_password)\n cursor.execute(query, params)\n\n logging.info(\"Successfully created user %s\", poi_user_name)\n except psycopg2.ProgrammingError as e:\n if e.pgcode == '42710': #duplicate_object error code\n logging.warning(\"Role %s already exists. Make sure it has the necessary privileges or delete it and run the setup script again\", poi_user_name)\n else:\n raise Exception(\"Exception creating user\" + poi_user_name + \": \" + str(e))\n except Exception as e:\n raise Exception(\"Exception creating user:\" + str(e))", "def CreateUser(self, row):\n if 'quota_limit' in row.keys() and row['quota_limit']:\n quota = row['quota_limit']\n else:\n quota = 25000\n if 'pw_hash_function' in row.keys() and row['pw_hash_function']:\n pw_hash_function = row['pw_hash_function']\n else:\n pw_hash_function = None\n if 'suspended' in row.keys() and row['suspended']:\n suspended_flag = row['suspended']\n else:\n suspended_flag = 'FALSE'\n try:\n self.gd_client.CreateUser(\n row['user_name'], row['family_name'], row['given_name'],\n row['password'], suspended=suspended_flag,\n password_hash_function=pw_hash_function, quota_limit=quota)\n row['status'] = 'success'\n except gdata.apps.service.AppsForYourDomainException, e:\n row['status'] = 'fail gdata error code:%s %s'% (\n e.error_code, ERROR_DICT[str(e.error_code)])\n except KeyError:\n print ('user_name, given_name, family_name, password are required\\n'\n 'headers when action is create')\n sys.exit()\n # if user is admin, IP_whistelisted, or change password required, \n # we need to do the following \n if ('admin' not in row.keys() and 'change_pw' not in row.keys()\n and 'ip_whitelisted' not in row.keys()):\n return\n try:\n user_feed = self.gd_client.RetrieveUser(row['user_name'])\n if 'admin' in row.keys() and row['admin']:\n user_feed.login.admin = row['admin']\n else:\n user_feed.login.admin = 'FALSE'\n if 'change_pw' in row.keys() and row['change_pw']:\n user_feed.login.change_password = row['change_pw']\n else:\n user_feed.login.change_password = 'FALSE'\n if 'ip_whitelisted' in row.keys() and row['ip_whitelisted']:\n user_feed.login.ip_whitelisted = row['ip_whitelisted']\n else:\n user_feed.login.ip_whitelisted = 'FALSE'\n self.gd_client.UpdateUser(row['user_name'], user_feed)\n except gdata.apps.service.AppsForYourDomainException, e:\n row['status'] = (\n 'fail: gdata error code:%s %s'%\n (e.error_code, ERROR_DICT[str(e.error_code)]))", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_db_user(db_username, db_password):\n print system(\"su -c \\\"echo \\\\\\\"create user \" + db_username +\n \" with createdb encrypted password '\" + db_password + \"';\\\\\\\" | psql \\\" postgres\")", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_mqtt_user():\n print('CREATE ADMIN PASSWORD IN MOSQUITTO DB')\n cmd = [\n '/usr/bin/mosquitto_passwd',\n '-b',\n '/etc/mosquitto/password',\n CONFIG['mqtt_user'],\n CONFIG['mqtt_password']]\n call(cmd)\n marker_replace_template(\n \"/etc/mosquitto/acl-template\",\n \"/etc/mosquitto/acl\",\n 'HERMOD_ROOT_USER',\n CONFIG['mqtt_user'])", "def create_user():\n print('Creating user ...')\n client = MongoClient(db_uri)\n try:\n client[database_].command(\"createUser\", os.environ['MONGODB_USERNAME'], pwd=os.environ['MONGODB_PASSWORD'],\n roles=[{'role': 'readWrite', 'db': database_}])\n except OperationFailure:\n print('User already exists')\n return\n print('User created')", "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')", "def test_upgrade_password_from_des_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbd blaze 12345\"\n password = '{DES}gArsfn7O5Yqfo' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_md5_to_ssha(self):\n # Create test user\n name = u'Test User'\n password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def create_new_user(username, hash_password):\n try:\n conn = sqlite3.connect(DATABASE)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO user\n (Username, Password)\n values\n (?, ?)'''\n cursor.execute(query, (username, hash_password))\n conn.commit()\n cursor.close()\n return True\n except:\n return False", "def onUserCreation(event):\n request = getRequest()\n if not IProductLayer.providedBy(request):\n return\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n principal = event.principal\n principal_id = principal.getUserId()\n principal_jid = xmpp_users.getUserJID(principal_id)\n pass_storage = getUtility(IXMPPPasswordStorage)\n principal_pass = pass_storage.set(principal_id)\n users.setupPrincipal(client, principal_jid, principal_pass)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def create_db_user():\n\n require('environment', provided_by=env.environments)\n _load_passwords(['database_password'], generate=True)\n postgres.create_db_user(env.database_user, password=env.database_password)", "def create_user(username, password):\n if not validate_username(username):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(username):\n return \"käyttäjänimi on jo käytössä\"\n if not validate_password(password):\n return \"salasana on väärää muotoa\"\n sql = \"INSERT INTO users (username, password, user_group, is_active) \" \\\n \"VALUES (:username, :password, 'basic', TRUE)\"\n password_hash = generate_password_hash(password)\n db.session.execute(sql, {\"username\": username, \"password\": password_hash})\n db.session.commit()\n return \"ok\"", "def setup(self,context,result):\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-add\", self.user_name,\n \"-pw\", self.user_password],[])\n except:\n result.note_exception(cause=\"Resource setup: Can't add user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result,Result.ERROR,'GSEC','Add new user',\n stdout,stderr)\n return\n else:\n self.do_cleanup = True", "def new_user():\n pass", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)" ]
[ "0.6923646", "0.6910008", "0.6903554", "0.69030684", "0.67261803", "0.6656542", "0.66533566", "0.663863", "0.65611124", "0.65610635", "0.6528321", "0.6524464", "0.6468667", "0.64476997", "0.6443924", "0.643508", "0.6432209", "0.6421695", "0.6406809", "0.64033985", "0.6395312", "0.63642806", "0.6350251", "0.6350251", "0.6350251", "0.6347815", "0.6343913", "0.6343612", "0.63376915", "0.6322249" ]
0.74522865
0
Create user with {MD5} password and check that logging in upgrades to {SSHA}.
def test_upgrade_password_from_md5_to_ssha(self): # Create test user name = u'Test User' password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345 self.createUser(name, password, True) # User is not required to be valid theuser = user.User(self.request, name=name, password='12345') assert theuser.enc_password[:6] == '{SSHA}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_password_from_sha_to_ssha(self):\n name = u'/no such user/'\n password = '{SHA}jLIjfQZ5yojbZGTqxg2pY0VROWQ=' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_apr1_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbm blaze 12345\"\n password = '{APR1}$apr1$NG3VoiU5$PSpHT6tV0ZMKkSZ71E3qg.' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_des_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbd blaze 12345\"\n password = '{DES}gArsfn7O5Yqfo' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def create_new_user(username, hash_password):\n try:\n conn = sqlite3.connect(DATABASE)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO user\n (Username, Password)\n values\n (?, ?)'''\n cursor.execute(query, (username, hash_password))\n conn.commit()\n cursor.close()\n return True\n except:\n return False", "def create_user(email, password, f_name, l_name):\n pass", "def test_auth_with_md5_stored_password(self):\n # Create test user\n name = u'Test User'\n password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345\n self.createUser(name, password, True)\n\n # Try to \"login\"\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.valid", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def new_user():\n\n username = request.json['username']\n if len(username) < 4:\n return '1'\n ds = \"'\\\\\\\"%}{\"\n for i in ds:\n if i in username:\n return '1'\n rem = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n email = request.json['email']\n if re.fullmatch(rem, email) == None:\n return '1'\n password = request.json['password']\n if len(password) != 64:\n return '1'\n _ = db.register(username, email, generate_password_hash(password))\n if _ == 0:\n return '0'\n else:\n return '2'", "def step_impl(context):\n\n from django.contrib.auth.models import User\n u = User(username='test_user', email='[email protected]')\n u.set_password('admin')", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def CreateNewSmtpUser(s):\n payload = ['adduser %s %s\\n' % (FLAGS.exploit_user, FLAGS.exploit_password),\n 'quit\\n']\n SendPayload(s, payload)\n logging.info('Created new user %s/%s' % (\n FLAGS.exploit_user, FLAGS.exploit_password))\n s.close()", "def add_user(self, username, password): #WORKS\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO users VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def user(name, password, **kwargs):\n if not user_exists(name, **kwargs):\n create_user(name, password, **kwargs)", "def sample_user_fifth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name5\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def new_user():\n pass", "def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()", "def check_db(args):\n # Open connection to the registered users db\n base_path = \"pypackage\"\n users_db = \"openaq_users.db\"\n conn = sqlite3.connect(os.path.join(base_path, users_db))\n cursor = conn.cursor()\n\n # Check for username\n row = cursor.execute(\"SELECT * FROM user_database WHERE username = ?\",\n (args.username,))\n results = row.fetchall()\n conn.commit()\n\n if results:\n # Add salt\n salt = str(results[0][2])\n digest = salt + args.password\n\n # Compute the hash\n for i in range(1000):\n digest = hashlib.sha256(digest.encode('utf-8')).hexdigest()\n\n # Check for password\n if digest == results[0][1]:\n print('Successful log-in. Welcome {}!'.format(args.username))\n return True\n\n else:\n print(\n \"Password is invalid for user {}.\".format(args.username)\n )\n return False\n else:\n print(\"Username not present.\")\n return False", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user", "def createsuperuser():\n\n email = prompt('User E-Mail')\n email_confirm = prompt('Confirm E-Mail')\n\n if not email == email_confirm:\n sys.exit('\\nCould not create user: E-Mail did not match')\n\n if not EMAIL_REGEX.match(email):\n sys.exit('\\nCould not create user: Invalid E-Mail addresss')\n\n password = prompt_pass('User password')\n password_confirm = prompt_pass('Confirmed password')\n\n if not password == password_confirm:\n sys.exit('\\nCould not create user: Passwords did not match')\n\n datastore = SQLAlchemyUserDatastore(db, User, Role)\n datastore.create_user(\n email=email,\n password=encrypt_password(password),\n active=True,\n super_user=True)\n\n db.session.commit()", "def create_admin():\n admin = models.User(username= 'gallery_admin', email='[email protected]', address='#0000' , password =bcrypt.generate_password_hash('toledano',\n current_app.config.get('BCRYPT_LOG_ROUNDS')).decode('utf-8'), admin=True)\n admin.save()", "def test_create_user_with_successful_email(self):\n email = '[email protected]'\n password = 'userpass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))" ]
[ "0.77516174", "0.73371005", "0.7249351", "0.70577127", "0.7020497", "0.68880355", "0.6835192", "0.6757816", "0.6754576", "0.66576886", "0.6590541", "0.6556658", "0.65215313", "0.6450802", "0.64421785", "0.6416691", "0.64149505", "0.6387276", "0.63868517", "0.63638514", "0.63396645", "0.63165045", "0.6307047", "0.6294208", "0.62908155", "0.62816364", "0.6266188", "0.62560266", "0.62551844", "0.62440234" ]
0.77814764
0
Create user with {DES} password and check that logging in upgrades to {SSHA}.
def test_upgrade_password_from_des_to_ssha(self): # Create test user name = u'Test User' # generated with "htpasswd -nbd blaze 12345" password = '{DES}gArsfn7O5Yqfo' # 12345 self.createUser(name, password, True) # User is not required to be valid theuser = user.User(self.request, name=name, password='12345') assert theuser.enc_password[:6] == '{SSHA}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_password_from_sha_to_ssha(self):\n name = u'/no such user/'\n password = '{SHA}jLIjfQZ5yojbZGTqxg2pY0VROWQ=' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_md5_to_ssha(self):\n # Create test user\n name = u'Test User'\n password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def test_upgrade_password_from_apr1_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbm blaze 12345\"\n password = '{APR1}$apr1$NG3VoiU5$PSpHT6tV0ZMKkSZ71E3qg.' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def create_new_user(username, hash_password):\n try:\n conn = sqlite3.connect(DATABASE)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO user\n (Username, Password)\n values\n (?, ?)'''\n cursor.execute(query, (username, hash_password))\n conn.commit()\n cursor.close()\n return True\n except:\n return False", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def test_auth_with_des_stored_password(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbd blaze 12345\"\n password = '{DES}gArsfn7O5Yqfo' # 12345\n self.createUser(name, password, True)\n\n try:\n import crypt\n # Try to \"login\"\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.valid\n except ImportError:\n py.test.skip(\"Platform does not provide crypt module!\")", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_user(item, username, passw):\n if len(passw) < 8:\n print(\"To short. Password should have minimum 8 characters.\")\n else:\n try:\n user = User(username=username, password=passw)\n user.save_to_db(item)\n print(\"User created\")\n except UniqueViolation as problem1:\n print(\"User already exist. Pick other username. \", problem1)", "def create_user(email, password, f_name, l_name):\n pass", "def create_user(username, password):\n result = create_hash(password)\n modify_query('INSERT INTO Users (username,password_hash,salt) VALUES ((%s), (%s), (%s))',\n username, result['hash'], result['salt'])\n\n return verify_login(username, password)", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def create_db_user(db_username, db_password):\n print system(\"su -c \\\"echo \\\\\\\"create user \" + db_username +\n \" with createdb encrypted password '\" + db_password + \"';\\\\\\\" | psql \\\" postgres\")", "def createsuperuser():\n\n email = prompt('User E-Mail')\n email_confirm = prompt('Confirm E-Mail')\n\n if not email == email_confirm:\n sys.exit('\\nCould not create user: E-Mail did not match')\n\n if not EMAIL_REGEX.match(email):\n sys.exit('\\nCould not create user: Invalid E-Mail addresss')\n\n password = prompt_pass('User password')\n password_confirm = prompt_pass('Confirmed password')\n\n if not password == password_confirm:\n sys.exit('\\nCould not create user: Passwords did not match')\n\n datastore = SQLAlchemyUserDatastore(db, User, Role)\n datastore.create_user(\n email=email,\n password=encrypt_password(password),\n active=True,\n super_user=True)\n\n db.session.commit()", "def setupUser(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', create user '%s'\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"create user %s with password '%s'\" % (userName, userPassword))", "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()", "def create_user(username, password):\n if not validate_username(username):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(username):\n return \"käyttäjänimi on jo käytössä\"\n if not validate_password(password):\n return \"salasana on väärää muotoa\"\n sql = \"INSERT INTO users (username, password, user_group, is_active) \" \\\n \"VALUES (:username, :password, 'basic', TRUE)\"\n password_hash = generate_password_hash(password)\n db.session.execute(sql, {\"username\": username, \"password\": password_hash})\n db.session.commit()\n return \"ok\"", "def CreateNewSmtpUser(s):\n payload = ['adduser %s %s\\n' % (FLAGS.exploit_user, FLAGS.exploit_password),\n 'quit\\n']\n SendPayload(s, payload)\n logging.info('Created new user %s/%s' % (\n FLAGS.exploit_user, FLAGS.exploit_password))\n s.close()", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def new_user():\n\n username = request.json['username']\n if len(username) < 4:\n return '1'\n ds = \"'\\\\\\\"%}{\"\n for i in ds:\n if i in username:\n return '1'\n rem = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n email = request.json['email']\n if re.fullmatch(rem, email) == None:\n return '1'\n password = request.json['password']\n if len(password) != 64:\n return '1'\n _ = db.register(username, email, generate_password_hash(password))\n if _ == 0:\n return '0'\n else:\n return '2'", "def add_user(self, username, password): #WORKS\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO users VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "def step_impl(context):\n\n from django.contrib.auth.models import User\n u = User(username='test_user', email='[email protected]')\n u.set_password('admin')", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def execute_create_user(arg):\n blockchain = Blockchain()\n blockchain.read_blockchain()\n\n username = arg['username']\n\n if username is None:\n print('You have to provide an username!!!')\n return\n\n wallet = blockchain.create_user(username)\n print(f'User wallet address is: {wallet.address}')" ]
[ "0.7500007", "0.7187628", "0.7142682", "0.6940297", "0.69316804", "0.69210327", "0.68815446", "0.6822661", "0.6731875", "0.6687178", "0.64787775", "0.64628786", "0.64233464", "0.64151156", "0.6408571", "0.6375208", "0.6362141", "0.6337913", "0.63170415", "0.63036406", "0.627818", "0.6273597", "0.62709373", "0.62709373", "0.62709373", "0.62588", "0.6244611", "0.6239423", "0.6218997", "0.6210387" ]
0.77103466
0
checks for no access to the email attribute by getting the user object from name
def test_for_email_attribute_by_name(self): name = u"__TestUser__" password = u"ekfdweurwerh" email = "__TestUser__@moinhost" self.createUser(name, password, email=email) theuser = user.User(self.request, name=name) assert theuser.email == ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email(obj):\r\n return obj.user.email", "def test_get_user_by_emailuser_email_get(self):\n pass", "def test_resource_user_resource_get_user_by_email_address_get(self):\n pass", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(self, data, field_name):\n existing = User.objects.filter(email__iexact=data['email'])\n if existing.exists():\n raise fields.ValidationError(\"A user with that email already exists.\")\n else:\n return data", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(\"A user with that email already exists.\"))", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def validate_email(self, data):\n user = account_models.User.objects.filter(username__iexact=data, is_active=True)\n if user:\n return data\n raise serializers.ValidationError(\"Email address not verified for any user account\")", "def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))", "def test_email_attr(self):\n user = User()\n self.assertTrue(hasattr(user, \"email\"))\n self.assertEqual(user.email, \"\")", "def clean_user_account_not_in_use(field_name):\n\n @check_field_is_empty(field_name)\n def wrapped(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n email_adress = self.cleaned_data.get(field_name).lower()\n\n # get the user account for this email and check if it's in use\n user_account = users.User(email_adress)\n\n fields = {'account': user_account}\n user_entity = user_logic.getForFields(fields, unique=True)\n\n if user_entity or user_logic.isFormerAccount(user_account):\n raise forms.ValidationError(\"There is already a user \"\n \"with this email address.\")\n\n return user_account\n return wrapped", "def clean_email(self):\r\n email = self.cleaned_data.get(\"email\")\r\n\r\n if not email: \r\n return email\r\n\r\n if User.objects.filter(email__iexact=email).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That e-mail is already used.\")\r\n else:\r\n return email", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def current_user(email):\n for user in Data.users:\n if email == user['email']:\n return user", "def testGetWithNoEmail(self):\n userdict = {'id': 1,\n 'objectID': u'04585bec-28cf-4a21-bc3e-081f3ed62680',\n 'username': u'testuser',\n 'passwordHash': 'hash',\n 'email': None,\n 'fullname': u'Test User',\n 'role': Role.ANONYMOUS.id}\n self.cache.set('user:testuser', json.dumps(userdict))\n result = self.userCache.get(u'testuser')\n user = result.results\n self.assertEqual(1, user.id)\n self.assertEqual('04585bec-28cf-4a21-bc3e-081f3ed62680',\n str(user.objectID))\n self.assertEqual(u'testuser', user.username)\n self.assertEqual('hash', user.passwordHash)\n self.assertEqual(u'Test User', user.fullname)\n self.assertEqual(None, user.email)\n self.assertEqual(Role.ANONYMOUS, user.role)", "def validate_email(self, data):\n users = User.objects.filter(email=data)\n if not self.instance and len(users) != 0:\n raise serializers.ValidationError(data+\" ya esta registrado\")\n\n elif self.instance and self.instance.username != data and len(users) != 0:\n raise serializers.ValidationError(data+\" ya esta registrado\")\n\n else:\n return data", "def test_email(self):\r\n \r\n self.assertEqual('[email protected]', self.user.email)", "def user(email):\r\n return User.objects.get(email=email)", "def test_for_email_attribut_by_uid(self):\n name = u\"__TestUser2__\"\n password = u\"ekERErwerwerh\"\n email = \"__TestUser2__@moinhost\"\n self.createUser(name, password, email=email)\n uid = user.getUserId(self.request, name)\n theuser = user.User(self.request, uid)\n assert theuser.email == email", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def email_exist(email):\n return User.objects.filter(email=email).first()", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "def clean_username_or_email(self):\n username_or_email = self.cleaned_data['username_or_email']\n try:\n user = User.objects.get(username__iexact=username_or_email)\n self.profile = user.profile\n except User.DoesNotExist:\n try:\n self.profile = Profile.objects.get(\n settings__email__iexact=username_or_email)\n except Profile.DoesNotExist:\n raise forms.ValidationError(ERRORS['invalid_username'])\n return username_or_email", "def clean_email(self):\r\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\r\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))\r\n return self.cleaned_data['email']", "def test_bad_request_anon_user_no_email(self, zendesk_mock_class, datadog_mock):\r\n self._test_bad_request_omit_field(self._anon_user, self._anon_fields, \"email\", zendesk_mock_class, datadog_mock)\r\n self._test_bad_request_empty_field(self._anon_user, self._anon_fields, \"email\", zendesk_mock_class, datadog_mock)", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError('This email address is already in use. Please supply a different email address.')\n return self.cleaned_data['email']", "def _user_from_name_or_email(username_or_email):\r\n username_or_email = strip_if_string(username_or_email)\r\n\r\n if '@' in username_or_email:\r\n return User.objects.get(email=username_or_email)\r\n else:\r\n return User.objects.get(username=username_or_email)", "def email(self, instance):\r\n return instance.user.email" ]
[ "0.69927853", "0.69904214", "0.6800825", "0.6738111", "0.6738111", "0.6700649", "0.6663769", "0.6641182", "0.661915", "0.66068196", "0.6578889", "0.652461", "0.65243393", "0.6438929", "0.6421183", "0.64171404", "0.641056", "0.6377576", "0.6363164", "0.63273644", "0.631347", "0.631347", "0.62939984", "0.628835", "0.62818563", "0.6278981", "0.62789726", "0.62708527", "0.6268468", "0.62376463" ]
0.71719223
0
checks access to the email attribute by getting the user object from the uid
def test_for_email_attribut_by_uid(self): name = u"__TestUser2__" password = u"ekERErwerwerh" email = "__TestUser2__@moinhost" self.createUser(name, password, email=email) uid = user.getUserId(self.request, name) theuser = user.User(self.request, uid) assert theuser.email == email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email(obj):\r\n return obj.user.email", "def test_get_user_by_emailuser_email_get(self):\n pass", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(\"A user with that email already exists.\"))", "def email(self, instance):\r\n return instance.user.email", "def validate_email(self, data):\n user = account_models.User.objects.filter(username__iexact=data, is_active=True)\n if user:\n return data\n raise serializers.ValidationError(\"Email address not verified for any user account\")", "def test_resource_user_resource_get_user_by_email_address_get(self):\n pass", "def current_user(email):\n for user in Data.users:\n if email == user['email']:\n return user", "def check_if_user_exists(self, email):\n for user in self.users.values():\n if user['email'] == email:\n return user['id']\n else:\n return False", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def check_user_email(self, email):\n useremails = []\n for user in self.__users:\n if user['email'] == email:\n useremails.append(user)\n return useremails", "def has_validated_email(self):\n return self.user.email_user is not None", "def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))", "def user(self, uid):", "def validate_email(self, data, field_name):\n existing = User.objects.filter(email__iexact=data['email'])\n if existing.exists():\n raise fields.ValidationError(\"A user with that email already exists.\")\n else:\n return data", "def get_user_by_email(self, strategy, email):\r\n return strategy.storage.user.user_model().objects.get(email=email)", "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def test_email(self):\r\n \r\n self.assertEqual('[email protected]', self.user.email)", "def get_user(self, email):\n try:\n return RegisterUser.objects.get(email=email)\n except:\n return None", "def clean_email(self):\r\n email = self.cleaned_data.get(\"email\")\r\n\r\n if not email: \r\n return email\r\n\r\n if User.objects.filter(email__iexact=email).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That e-mail is already used.\")\r\n else:\r\n return email", "def email_exist(email):\n return User.objects.filter(email=email).first()", "def clean_email(self):\n e = self.cleaned_data['email']\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n pass\n # msg = 'This email is not associated with an account'\n # raise forms.ValidationError(msg)\n return e", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n UserModel = get_user_model()\n email = self.cleaned_data[\"email\"]\n self.users_cache = UserModel._default_manager.filter(email__iexact=email)\n if not len(self.users_cache):\n raise forms.ValidationError(self.error_messages['unknown'])\n if not any(user.is_active for user in self.users_cache):\n # none of the filtered users are active\n raise forms.ValidationError(self.error_messages['unknown'])\n return email", "def user(email):\r\n return User.objects.get(email=email)", "def clean_email(self):\r\n email = self.cleaned_data[\"email\"]\r\n #The line below contains the only change, removing is_active=True\r\n self.users_cache = User.objects.filter(email__iexact=email)\r\n if not len(self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unknown'])\r\n if any((user.password == UNUSABLE_PASSWORD)\r\n for user in self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unusable'])\r\n return email", "def get_reference_user(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return ref\n except ObjectDoesNotExist:\n return None", "def clean_uid_field(self):\n if CONFIG.y(\"passbook.uid_fields\") == [\"email\"]:\n validate_email(self.cleaned_data.get(\"uid_field\"))\n return self.cleaned_data.get(\"uid_field\")" ]
[ "0.71520734", "0.69720227", "0.67924476", "0.67924476", "0.6726474", "0.6698958", "0.66802007", "0.66745335", "0.6655605", "0.6627979", "0.6603454", "0.6574884", "0.655539", "0.652709", "0.6515871", "0.64560187", "0.6446277", "0.6443656", "0.6422562", "0.63196146", "0.6314266", "0.62907124", "0.62872785", "0.6282589", "0.6282589", "0.62789077", "0.6272999", "0.625724", "0.62370676", "0.6231479" ]
0.73147726
0
Ask the user to choose one running instance
def _choose_among_running_instances(self): instances = self.compute.get_running_instances_ids() # No instances if not instances: print 'You do not have any running instances!' return None # List the name of the instances print 'Choose an instance:' for i, instance in enumerate(instances): print '%d) %s' % ((i + 1), instance) print # Choose an instance instance_id = '' while True: choice = raw_input("Instance target number or ID (empty to cancel): ") # Cancel if not choice: return None # Valid choice if choice in instances: instance_id = choice break choice = int(choice) if 1 <= choice <= len(instances): instance_id = instances[choice - 1] break # Invalid option print 'Incorrect option!' continue print return instance_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_among_stopped_instances(self):\n\n instances = self.compute.get_not_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance", "def create_instance_by_os(self):\n print '# Start a new instance based on the OS'\n\n # Choose between linux or windows\n is_linux = True\n while True:\n\n os = raw_input('Enter the OS (windows/linux or empty to cancel): ')\n\n # Cancel\n if not os:\n print 'Operation cancelled'\n return\n\n # Check if linux\n if os.lower() == 'linux':\n is_linux = True\n break\n\n # Check windows\n if os.lower() == 'windows':\n is_linux = False\n break\n\n # Error\n print 'Invalid input!'\n\n # Create the instance\n if self.compute.create_instance_by_os(is_linux):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given OS'", "def _choose_among_available_volumes(self):\n\n volumes = self.compute.get_available_volumes_ids()\n\n # No instances\n if not volumes:\n print 'You do not have any available volumes!'\n return None\n\n # List the name of the instances\n print 'Choose a volume:'\n for i, v in enumerate(volumes):\n print '%d) %s' % ((i + 1), v)\n print\n\n # Choose an instance\n volume_id = ''\n while True:\n\n choice = raw_input(\"Volume target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in volumes:\n volume_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(volumes):\n volume_id = volumes[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return volume_id", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def _choose_among_used_volumes(self):\n\n volumes = self.compute.get_used_volumes_ids()\n\n # No instances\n if not volumes:\n print 'You do not have any used volumes!'\n return None\n\n # List the name of the instances\n print 'Choose a volume:'\n for i, v in enumerate(volumes):\n print '%d) %s' % ((i + 1), v)\n print\n\n # Choose an instance\n volume_id = ''\n while True:\n\n choice = raw_input(\"Volume target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in volumes:\n volume_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(volumes):\n volume_id = volumes[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return volume_id", "def SelectInstanceInteractive(all_instances, service=None, version=None):\n if properties.VALUES.core.disable_prompts.GetBool():\n raise SelectInstanceError(\n 'Cannot interactively select instances with prompts disabled.')\n\n # Defined here to close over all_instances for the error message\n def _PromptOptions(options, type_):\n \"\"\"Given an iterable options of type type_, prompt and return one.\"\"\"\n options = sorted(set(options))\n if len(options) > 1:\n idx = console_io.PromptChoice(options, message='Which {0}?'.format(type_))\n elif len(options) == 1:\n idx = 0\n log.status.Print('Choosing [{0}] for {1}.\\n'.format(options[0], type_))\n else:\n if all_instances:\n msg = ('No instances could be found matching the given criteria.\\n\\n'\n 'All instances:\\n' +\n '\\n'.join(map('* [{0}]'.format, sorted(all_instances))))\n else:\n msg = 'No instances were found for the current project [{0}].'.format(\n properties.VALUES.core.project.Get(required=True))\n raise SelectInstanceError(msg)\n return options[idx]\n\n matching_instances = FilterInstances(all_instances, service, version)\n\n service = _PromptOptions((i.service for i in matching_instances), 'service')\n matching_instances = FilterInstances(matching_instances, service=service)\n\n version = _PromptOptions((i.version for i in matching_instances), 'version')\n matching_instances = FilterInstances(matching_instances, version=version)\n\n return _PromptOptions(matching_instances, 'instance')", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def _PromptOptions(options, type_):\n options = sorted(set(options))\n if len(options) > 1:\n idx = console_io.PromptChoice(options, message='Which {0}?'.format(type_))\n elif len(options) == 1:\n idx = 0\n log.status.Print('Choosing [{0}] for {1}.\\n'.format(options[0], type_))\n else:\n if all_instances:\n msg = ('No instances could be found matching the given criteria.\\n\\n'\n 'All instances:\\n' +\n '\\n'.join(map('* [{0}]'.format, sorted(all_instances))))\n else:\n msg = 'No instances were found for the current project [{0}].'.format(\n properties.VALUES.core.project.Get(required=True))\n raise SelectInstanceError(msg)\n return options[idx]", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def show(name):\n try:os.system(f'python -m pip show {name}') \n except Exception:\n user_choice=input(f\"Seems like {name} not found\")", "def create_instance_by_image(self):\n print '# Start a new instance based on an existing AMI'\n ami = raw_input('Enter AMI (empty to cancel): ')\n\n # Cancel\n if not ami:\n print 'Operation cancelled'\n return\n\n # Start the instance\n if self.compute.create_instance_by_image(ami):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given AMI'", "def start_instance(InstanceId=None):\n pass", "def instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance\")", "def start():\r\n print(\"Please select an option:\")\r\n print(\"1) Query by movies\")\r\n print(\"2) Query by actor\")\r\n print(\"3) Insert a new movie\")\r\n print(\"4) Save and Exit\")\r\n print(\"5) Exit\")\r\n option = input()\r\n return option", "def host():\n\n print(\"\"\" Bienvenue sur l'application Pur Beurre\n --------------------------------------------\n 1: Quel aliment souhaitez-vous remplacer ?\n 2: Retrouver mes aliments substitués\n 3: Quitter\"\"\")\n\n while True:\n try:\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in range(1, 4):\n break\n except ValueError:\n continue\n\n return choice", "def instance(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance\")", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def do_show(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key not in storage.all():\n print(\"** no instance found **\")\n return\n print(storage.all()[key])", "def select_server(self):\n pass", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def launch_instance(cls, argv=None, **kwargs):\n try:\n return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)\n except NoStart:\n return", "def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)", "def host_instance_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"host_instance_type\")", "def run_current_selection(self):\n import subprocess\n name, data, _ = self.matches_copy[self.selected_item]\n try:\n needs_term = data[\"Terminal\"].lower() == \"true\"\n except KeyError:\n needs_term = False\n if needs_term:\n with open(os.devnull, \"w\") as devnull:\n subprocess.call([\"nohup\", \"gnome-terminal\", \"-e\",\n data[\"command\"]],\n stdout=devnull,\n stderr=devnull)\n else:\n with open(os.devnull, \"w\") as devnull:\n cmdlist = [\"nohup\"]\n cmdlist.extend(data[\"Exec\"].split())\n subprocess.Popen(cmdlist,\n #stdout=devnull,\n #stderr=devnull\n )\n quit()", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance" ]
[ "0.73312354", "0.73238367", "0.62579465", "0.61847156", "0.6181139", "0.6167246", "0.5979625", "0.59451413", "0.58857286", "0.5849709", "0.5801913", "0.57791424", "0.5761157", "0.57333684", "0.5713015", "0.5706526", "0.5663359", "0.5656504", "0.55967057", "0.5560405", "0.5547022", "0.5400968", "0.5376453", "0.5355361", "0.53511566", "0.53511155", "0.5342984", "0.5331275", "0.532688", "0.5284561" ]
0.7763613
0
Ask the user to choose one stopped instance
def _choose_among_stopped_instances(self): instances = self.compute.get_not_running_instances_ids() # No instances if not instances: print 'You do not have any instances!' return None # List the name of the instances print 'Choose an instance:' for i, instance in enumerate(instances): print '%d) %s' % ((i + 1), instance) print # Choose an instance instance_id = '' while True: choice = raw_input("Instance target number or ID (empty to cancel): ") # Cancel if not choice: return None # Valid choice if choice in instances: instance_id = choice break choice = int(choice) if 1 <= choice <= len(instances): instance_id = instances[choice - 1] break # Invalid option print 'Incorrect option!' continue print return instance_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance", "def _choose_among_used_volumes(self):\n\n volumes = self.compute.get_used_volumes_ids()\n\n # No instances\n if not volumes:\n print 'You do not have any used volumes!'\n return None\n\n # List the name of the instances\n print 'Choose a volume:'\n for i, v in enumerate(volumes):\n print '%d) %s' % ((i + 1), v)\n print\n\n # Choose an instance\n volume_id = ''\n while True:\n\n choice = raw_input(\"Volume target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in volumes:\n volume_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(volumes):\n volume_id = volumes[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return volume_id", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def _choose_among_available_volumes(self):\n\n volumes = self.compute.get_available_volumes_ids()\n\n # No instances\n if not volumes:\n print 'You do not have any available volumes!'\n return None\n\n # List the name of the instances\n print 'Choose a volume:'\n for i, v in enumerate(volumes):\n print '%d) %s' % ((i + 1), v)\n print\n\n # Choose an instance\n volume_id = ''\n while True:\n\n choice = raw_input(\"Volume target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in volumes:\n volume_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(volumes):\n volume_id = volumes[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return volume_id", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def _PromptOptions(options, type_):\n options = sorted(set(options))\n if len(options) > 1:\n idx = console_io.PromptChoice(options, message='Which {0}?'.format(type_))\n elif len(options) == 1:\n idx = 0\n log.status.Print('Choosing [{0}] for {1}.\\n'.format(options[0], type_))\n else:\n if all_instances:\n msg = ('No instances could be found matching the given criteria.\\n\\n'\n 'All instances:\\n' +\n '\\n'.join(map('* [{0}]'.format, sorted(all_instances))))\n else:\n msg = 'No instances were found for the current project [{0}].'.format(\n properties.VALUES.core.project.Get(required=True))\n raise SelectInstanceError(msg)\n return options[idx]", "def ask_stop_game(self):\n return self.stop_game", "def stop_instance(InstanceId=None, Force=None):\n pass", "def SelectInstanceInteractive(all_instances, service=None, version=None):\n if properties.VALUES.core.disable_prompts.GetBool():\n raise SelectInstanceError(\n 'Cannot interactively select instances with prompts disabled.')\n\n # Defined here to close over all_instances for the error message\n def _PromptOptions(options, type_):\n \"\"\"Given an iterable options of type type_, prompt and return one.\"\"\"\n options = sorted(set(options))\n if len(options) > 1:\n idx = console_io.PromptChoice(options, message='Which {0}?'.format(type_))\n elif len(options) == 1:\n idx = 0\n log.status.Print('Choosing [{0}] for {1}.\\n'.format(options[0], type_))\n else:\n if all_instances:\n msg = ('No instances could be found matching the given criteria.\\n\\n'\n 'All instances:\\n' +\n '\\n'.join(map('* [{0}]'.format, sorted(all_instances))))\n else:\n msg = 'No instances were found for the current project [{0}].'.format(\n properties.VALUES.core.project.Get(required=True))\n raise SelectInstanceError(msg)\n return options[idx]\n\n matching_instances = FilterInstances(all_instances, service, version)\n\n service = _PromptOptions((i.service for i in matching_instances), 'service')\n matching_instances = FilterInstances(matching_instances, service=service)\n\n version = _PromptOptions((i.version for i in matching_instances), 'version')\n matching_instances = FilterInstances(matching_instances, version=version)\n\n return _PromptOptions(matching_instances, 'instance')", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def start():\r\n print(\"Please select an option:\")\r\n print(\"1) Query by movies\")\r\n print(\"2) Query by actor\")\r\n print(\"3) Insert a new movie\")\r\n print(\"4) Save and Exit\")\r\n print(\"5) Exit\")\r\n option = input()\r\n return option", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def resume(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n\n # Try to unpause\n if vmrun.unpause(quiet=True) is not None:\n time.sleep(1)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n if ip:\n puts_err(colored.green(\"VM resumed on {}\".format(ip)))\n else:\n puts_err(colored.green(\"VM resumed on an unknown IP address\"))\n\n # Otherwise try starting\n else:\n started = vmrun.start()\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM already was started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM already was started on an unknown IP address\"))", "def terminate(filter=\".*\"):\n list_instances,list_headers = ec2list(filter=filter)\n if not list_instances:\n print(\"No instance matched the filter\")\n sys.exit(1)\n title = \"Pick the instances to terminate\"\n options = [ '{} ---- {} ---- {} ---- {}'.format(\n x[\"name\"],\n x[\"privateip\"],\n x[\"id\"],\n x[\"launchtime\"],\n x[\"state\"]) for x in list_instances ]\n\n list_selected = pick(options, title, multiselect=True, default_index=len(options)-1)\n del(options[:-1])\n list_ips = []\n if not list_selected:\n print(\"No host selected, exiting\")\n return\n list_ids = []\n for option,index in list_selected:\n list_ids.append(list_instances[index]['id'])\n print(\"Terminating instances {}\".format(list_ids))\n boto3.client(\"ec2\").terminate_instances(InstanceIds=list_ids)", "def start_instance(InstanceId=None):\n pass", "def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))", "def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")", "def stop_fleet(Name=None):\n pass", "def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def start_stop(now, start, stop, temporary_user, config, tz):\n if now.time() >= start and now.time() < stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'stopped', now, tz)\n action_on_instances(temporary_user.start_instances, action_required_ids, 'Start')\n elif now.time() >= stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'running', now, tz)\n action_on_instances(temporary_user.stop_instances, action_required_ids, 'Stop')", "def host():\n\n print(\"\"\" Bienvenue sur l'application Pur Beurre\n --------------------------------------------\n 1: Quel aliment souhaitez-vous remplacer ?\n 2: Retrouver mes aliments substitués\n 3: Quitter\"\"\")\n\n while True:\n try:\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in range(1, 4):\n break\n except ValueError:\n continue\n\n return choice", "def kill_specific_instance(self, which_instances):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n instance.terminate_instance()", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def stop_current_episode(self):\n raise NotImplementedError", "def state_choose_exit(cfg, app, win):" ]
[ "0.7413727", "0.7075978", "0.6265056", "0.62350225", "0.6169538", "0.5851495", "0.58469385", "0.5749903", "0.5716171", "0.5652272", "0.5642341", "0.5590312", "0.55365413", "0.5526076", "0.5525531", "0.5506383", "0.5448957", "0.5398896", "0.5356763", "0.52930033", "0.5262115", "0.52310014", "0.5197096", "0.51889694", "0.5187088", "0.51804537", "0.5172186", "0.51641977", "0.5162644", "0.51445854" ]
0.8161445
0
Ask the user to choose an available volume
def _choose_among_available_volumes(self): volumes = self.compute.get_available_volumes_ids() # No instances if not volumes: print 'You do not have any available volumes!' return None # List the name of the instances print 'Choose a volume:' for i, v in enumerate(volumes): print '%d) %s' % ((i + 1), v) print # Choose an instance volume_id = '' while True: choice = raw_input("Volume target number or ID (empty to cancel): ") # Cancel if not choice: return None # Valid choice if choice in volumes: volume_id = choice break choice = int(choice) if 1 <= choice <= len(volumes): volume_id = volumes[choice - 1] break # Invalid option print 'Incorrect option!' continue print return volume_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_among_used_volumes(self):\n\n volumes = self.compute.get_used_volumes_ids()\n\n # No instances\n if not volumes:\n print 'You do not have any used volumes!'\n return None\n\n # List the name of the instances\n print 'Choose a volume:'\n for i, v in enumerate(volumes):\n print '%d) %s' % ((i + 1), v)\n print\n\n # Choose an instance\n volume_id = ''\n while True:\n\n choice = raw_input(\"Volume target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in volumes:\n volume_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(volumes):\n volume_id = volumes[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return volume_id", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def _volume_command(ramp, volume):\n if volume is not None:\n ramp.set_volume(float(volume))\n else:\n print ramp.volume", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n # if not ctx.voice_state.is_playing:\n # return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100.')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send(f\"Volume of the player set to {volume}%\\nThe volume will be applied in the next song.\")", "def _handler_open_volume(self, event):\n if not self.volume_data.IsEmpty():\n self.volume_data.Remove(0,100)\n filters = 'Volume files (*.vti)|*.vti;'\n dlg = wx.FileDialog(self, \"Please choose a data volume file\", \"\", \"\", filters, wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n self.volume_path = dlg.GetPath()\n filename=dlg.GetFilename()\n self.volume_data.AppendText(filename)\n dlg.Destroy()", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))\n await ctx.message.add_reaction('✅')", "def set_volume(self):\n import fcntl\n import struct\n try:\n knob = struct.pack(\"III\", 0, 0, self.volume) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, volume_level\n fcntl.ioctl(self.mixer_fd, 3, knob)\n except:\n pass", "def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)", "def choose(self, choice):\n if self.available(choice):\n self.select(choice)", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.current.source.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))", "async def volume(\n self, ctx: commands.Context, volume: int = None\n ) -> Optional[float]:\n\n if volume is None:\n return ctx.voice_client.source.volume * 100\n\n ctx.voice_client.source.volume = volume / 100\n self.queue[ctx.guild.id].volume = volume / 100\n return ctx.voice_client.source.volume * 100", "def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"", "def display_menu():\n print(\"Press 1 to purchase stocks\\n\")\n print(\"\\nPress 2 to visualize the total prices of selected stocks over the period of time imported from a json file\\n\")\n print(\"\\nPress 0 to quit\\n\")\n try:\n response = int(input(\"\\nwaiting for Input: \"))\n if response < 0 or response > 2:\n return \"Please input a value between 0 and 2\"\n except:\n print(\"Please enter the numeric values specified in the menu\")\n else:\n return response", "async def volume(self, ctx, volume: int):\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume))", "def volume_down():\n sonos.set_relative_volume(-10)\n return \"Ok\"", "def start_adaptVolume(hermes, intent_message): \n #log the input and the received intent\n start_logging()\n log_asr_input(intent_message.input)\n log_received_intent(intent_message)\n \n #extract the desired volume value from the intent message and adapt the volume\n volume_value = get_required_volume(intent_message.slots)\n \n if volume_value is None:\n #volume value not valid\n #get a randomly chosen answer from a list of possible answers\n message_to_tts = get_random_answer('adaptVolume', 'error')\n\n else:\n #volume value is valid\n change_volume(volume_value)\n #log the event of changing the volume\n log_msg = ' changed output volume to: {} '.format(str(volume_value))\n log_event(log_msg)\n \n #get a randomly chosen answer from a list of possible answers\n message_to_tts = get_random_answer('adaptVolume', 'adapt')\n \n #end by sending a message to the tts and logging\n log_tts_output(message_to_tts)\n hermes.publish_end_session(intent_message.session_id, message_to_tts)", "def run(self,inputVolume,outputVolume):\n return True", "def on_set_volume(self, event):\n self.currentVolume = self.volumeCtrl.GetValue()\n self.mplayer.SetProperty(\"volume\", self.currentVolume)", "def assign_volume_letters():\n remove_volume_letters()\n\n # Write script\n script = []\n for vol in get_volumes():\n script.append('select volume {}'.format(vol['Number']))\n script.append('assign')\n\n # Run\n run_diskpart(script)", "def volume():\n vol = sonos.volume\n return vol", "async def volume(self, msg, vol: int):\n\n if vol > 200:\n vol = 200\n vol = vol/100\n if msg.author.voice is not None:\n if msg.voice_client is not None:\n if msg.voice_client.channel == msg.author.voice.channel and msg.voice_client.is_playing() is True:\n msg.voice_client.source.volume = vol\n self.player[msg.guild.id]['volume'] = vol\n # if (msg.guild.id) in self.music:\n # self.music[str(msg.guild.id)]['vol']=vol\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"**Please join the same voice channel as the bot to use the command**\".title(), delete_after=30)", "def _wait_until_volume_available(self, volume, maybe_in_use=False):\n ok_states = [\"creating\"] # it's ok to wait if the volume is in this\n tries = 0\n if maybe_in_use:\n ok_states.append(\"in_use\")\n logger.info(\"Volume: %s is in state: %s\", volume.name, volume.state)\n while volume.state in ok_states:\n sleep(3)\n volume = self.get_volume(volume.name)\n tries = tries + 1\n if tries > 10:\n logger.info(\"Maximum amount of tries reached..\")\n break\n if volume.state == \"notfound\":\n logger.error(\"no volume was found for: %s\", volume.name)\n break\n logger.info(\" ... %s\", volume.state)\n if volume.state != \"available\":\n # OVH uses a non-standard state of 3 to indicate an available\n # volume\n logger.info(\"Volume %s is %s (not available)\", volume.name, volume.state)\n logger.info(\n \"The volume %s is not available, but will continue anyway...\",\n volume.name,\n )\n return True", "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume),delete_after=15)", "def volume(self, v: int) -> None:\n # changed so it returns to the default volume\n if v > VOLUME_MAX:\n self._volume = VOLUME_DEFAULT\n elif v < VOLUME_MIN:\n self._volume = VOLUME_MIN\n else:\n self._volume = v", "def configurar_volume(self):\n\n print(\"Volume configurado\")", "def volume(self, value):\n self._volume = value\n self._sendCommand('%03dVL' % value)", "async def volume(self, ctx, value: int):\n\n if value > 100:\n await self.bot.say('select a value between 0-100 pls')\n return\n state = self.get_voice_state(ctx.message.server)\n if state.is_playing():\n player = state.player\n player.volume = state.volume = value / 100\n await self.bot.say('Set the volume to {:.0%}'.format(player.volume))", "def __call__(self, level):\r\n import time\r\n\r\n currentVolume = self.__findCurrentVolumeLevel()[0]\r\n\r\n assert isinstance(level, int), \"Given volume level is not integer (instead %s)\" % type(level)\r\n if not (level <= self.currentMaximumVolume and level >= 0):\r\n self.phone.fail(\"adjustVolume: given level is not valid. Valid ones for this specific volume bar are 0 - %s)\" % self.currentMaximumVolume)\r\n\r\n self.phone.comment(\"adjustVolume(%s)\" % level)\r\n\r\n if level < currentVolume:\r\n while level < currentVolume:\r\n self.phone.delay(200, False)\r\n self.phone.select('KBD_KEY_VOL_DOWN')\r\n currentVolume -= 1\r\n\r\n elif level > currentVolume:\r\n while level > currentVolume:\r\n self.phone.delay(200, False)\r\n self.phone.select('KBD_KEY_VOL_UP')\r\n currentVolume += 1\r\n\r\n else: # volume level is now ok, pass\r\n pass\r\n\r\n return True\r\n\r\n ## TODO: current volume level should be asked from yapas\r\n\r\n #doCheck = False\r\n\r\n #if doCheck:\r\n #\r\n # currentVolume = self.getCurrentVolumeLevel()\r\n # assert currentVolume == level, \"Adjusted volume, but the volume level is %s when it should be %s\" % (currentVolume, level)\r\n\r\n # debug.brf(\"Selected volume level %s and verified from UI\" % level)\r\n # return True\r\n #else:\r\n # debug.brf(\"Selected volume level %s\" % level)\r\n # return True\r", "def OnSetVolume(self):\r\n volume = self.volume_var.get()\r\n # vlc.MediaPlayer.audio_set_volume returns 0 if success, -1 otherwise\r\n if volume > 100:\r\n volume = 100\r\n if self.player.audio_set_volume(volume) == -1:\r\n self.errorDialog(\"Failed to set volume\")", "def _selectInput(self):\n\n (my_file, my_path) = misc.get_file(FilterSpec='*.wav', \n DialogTitle='Select sound-input:', \n DefaultName='')\n if my_path == 0:\n print('No file selected')\n return 0\n else:\n full_in_file = os.path.join(my_path, my_file)\n print('Selection: ' + full_in_file)\n return full_in_file" ]
[ "0.6655224", "0.65141517", "0.63322014", "0.61468875", "0.61278677", "0.6078939", "0.6045884", "0.6009745", "0.59924775", "0.594599", "0.59088606", "0.59042", "0.5901506", "0.58802724", "0.5874152", "0.5873146", "0.58494675", "0.5837534", "0.5836003", "0.58232427", "0.58229053", "0.57979584", "0.5790901", "0.5782556", "0.57810456", "0.57796973", "0.57774407", "0.5775018", "0.5758578", "0.5734556" ]
0.6961525
0
Ask the user to choose an used volume
def _choose_among_used_volumes(self): volumes = self.compute.get_used_volumes_ids() # No instances if not volumes: print 'You do not have any used volumes!' return None # List the name of the instances print 'Choose a volume:' for i, v in enumerate(volumes): print '%d) %s' % ((i + 1), v) print # Choose an instance volume_id = '' while True: choice = raw_input("Volume target number or ID (empty to cancel): ") # Cancel if not choice: return None # Valid choice if choice in volumes: volume_id = choice break choice = int(choice) if 1 <= choice <= len(volumes): volume_id = volumes[choice - 1] break # Invalid option print 'Incorrect option!' continue print return volume_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_among_available_volumes(self):\n\n volumes = self.compute.get_available_volumes_ids()\n\n # No instances\n if not volumes:\n print 'You do not have any available volumes!'\n return None\n\n # List the name of the instances\n print 'Choose a volume:'\n for i, v in enumerate(volumes):\n print '%d) %s' % ((i + 1), v)\n print\n\n # Choose an instance\n volume_id = ''\n while True:\n\n choice = raw_input(\"Volume target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in volumes:\n volume_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(volumes):\n volume_id = volumes[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return volume_id", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def _volume_command(ramp, volume):\n if volume is not None:\n ramp.set_volume(float(volume))\n else:\n print ramp.volume", "def set_volume(self):\n import fcntl\n import struct\n try:\n knob = struct.pack(\"III\", 0, 0, self.volume) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, volume_level\n fcntl.ioctl(self.mixer_fd, 3, knob)\n except:\n pass", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n # if not ctx.voice_state.is_playing:\n # return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100.')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send(f\"Volume of the player set to {volume}%\\nThe volume will be applied in the next song.\")", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))\n await ctx.message.add_reaction('✅')", "async def volume(\n self, ctx: commands.Context, volume: int = None\n ) -> Optional[float]:\n\n if volume is None:\n return ctx.voice_client.source.volume * 100\n\n ctx.voice_client.source.volume = volume / 100\n self.queue[ctx.guild.id].volume = volume / 100\n return ctx.voice_client.source.volume * 100", "def _handler_open_volume(self, event):\n if not self.volume_data.IsEmpty():\n self.volume_data.Remove(0,100)\n filters = 'Volume files (*.vti)|*.vti;'\n dlg = wx.FileDialog(self, \"Please choose a data volume file\", \"\", \"\", filters, wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n self.volume_path = dlg.GetPath()\n filename=dlg.GetFilename()\n self.volume_data.AppendText(filename)\n dlg.Destroy()", "def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)", "async def volume(self, ctx, volume: int):\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume))", "def volume():\n vol = sonos.volume\n return vol", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.current.source.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))", "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume),delete_after=15)", "def on_set_volume(self, event):\n self.currentVolume = self.volumeCtrl.GetValue()\n self.mplayer.SetProperty(\"volume\", self.currentVolume)", "def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")", "def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")", "def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")", "def volume(self, value):\n self._volume = value\n self._sendCommand('%03dVL' % value)", "def volume(self, v: int) -> None:\n # changed so it returns to the default volume\n if v > VOLUME_MAX:\n self._volume = VOLUME_DEFAULT\n elif v < VOLUME_MIN:\n self._volume = VOLUME_MIN\n else:\n self._volume = v", "async def volume(self, msg, vol: int):\n\n if vol > 200:\n vol = 200\n vol = vol/100\n if msg.author.voice is not None:\n if msg.voice_client is not None:\n if msg.voice_client.channel == msg.author.voice.channel and msg.voice_client.is_playing() is True:\n msg.voice_client.source.volume = vol\n self.player[msg.guild.id]['volume'] = vol\n # if (msg.guild.id) in self.music:\n # self.music[str(msg.guild.id)]['vol']=vol\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"**Please join the same voice channel as the bot to use the command**\".title(), delete_after=30)", "def change_volume(value):\n\t\n\tprint('received val:', value)\n\t\n\tcommand = ['amixer', '--card', '1', 'set', 'Speaker', value]\t\n\tsubprocess.Popen(command)", "def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"", "async def volume(self, ctx, value: int):\n\n if value > 100:\n await self.bot.say('select a value between 0-100 pls')\n return\n state = self.get_voice_state(ctx.message.server)\n if state.is_playing():\n player = state.player\n player.volume = state.volume = value / 100\n await self.bot.say('Set the volume to {:.0%}'.format(player.volume))", "def run(self,inputVolume,outputVolume):\n return True", "def set_volume(self, emitter_name, value):\n\t\tif self._setting.get(FIFE_MODULE, \"PlaySounds\"):\n\t\t\tself.emitter[emitter_name].setGain(value)", "def _update_total_ask(self, volume):\r\n self.total_ask += self.gox.base2float(volume)", "def configurar_volume(self):\n\n print(\"Volume configurado\")", "def set_volume(self, volume):\n self.get(COMMAND_UIC, 'SetVolume', [('volume', int(volume))])", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def volume_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"volume_type\")" ]
[ "0.6707448", "0.6390336", "0.6348588", "0.6309887", "0.62393486", "0.61483634", "0.61318976", "0.61249876", "0.6119035", "0.60851866", "0.60461193", "0.6021429", "0.59837437", "0.59562814", "0.5935449", "0.5935449", "0.5935449", "0.59306705", "0.5927265", "0.5906751", "0.58967286", "0.58683133", "0.585331", "0.5843131", "0.5841254", "0.58385026", "0.58322376", "0.5823454", "0.58176476", "0.581759" ]
0.6921604
0
List (print) the EC2 instances
def list_instances(self): print '# AWS EC2 instances' self.compute.list_instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def ec2_list(ctx):\n\n from opstools.aws import ec2_list as this_ec2_list\n this_ec2_list.main()", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def list_instances(self):\n # list instances\n self._list_instances()", "def test_list_ec2_instances(self):\n instances = [e for e in list_ec2_instances()]\n self.assertEqual([], instances)", "def get_ec2(self, name: str) -> list:\n filters = [\n {\n 'Name': 'tag:Name',\n 'Values': [name]\n },\n {\n 'Name': 'instance-state-name',\n 'Values': ['running']\n }\n ]\n\n return list(self.ec2.instances.filter(Filters=filters).all())", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def show_instances():\n return get_instances()", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def ec2(filter=\".*\",interval=0):\n interval = int(interval)\n def printlist():\n (list_instances,list_headers) = ec2list(filter=filter)\n x = PrettyTable()\n x.field_names = list_headers\n for instance in list_instances:\n x.add_row([ instance[y] for y in list_headers ])\n print(x)\n if interval:\n while True:\n os.system(\"clear\")\n printlist()\n time.sleep(interval)\n else:\n printlist()", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def list_instances_detail(self, context):\n LOG.debug(\"list_instances_detail\")\n\n info_list = []\n bmms = db.bmm_get_all_by_instance_id_not_null(context)\n for bmm in bmms:\n instance = db.instance_get(context, bmm[\"instance_id\"])\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"off\":\n inst_power_state = power_state.SHUTOFF\n\n if instance[\"vm_state\"] == vm_states.ACTIVE:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.STOPPED})\n else:\n inst_power_state = power_state.RUNNING\n\n if instance[\"vm_state\"] == vm_states.STOPPED:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.ACTIVE})\n\n info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm[\"instance_id\"]), \n inst_power_state))\n\n return info_list" ]
[ "0.8685158", "0.8134876", "0.7839257", "0.7732988", "0.768987", "0.765721", "0.7422266", "0.73837346", "0.738019", "0.7235557", "0.7216362", "0.7206594", "0.7197218", "0.71892095", "0.71880347", "0.7167213", "0.71258837", "0.7125404", "0.7103951", "0.7100956", "0.708605", "0.7083838", "0.7048549", "0.70398587", "0.6989116", "0.69419855", "0.6893415", "0.6837203", "0.6797046", "0.67846215" ]
0.9023498
0
List (print) the running EC2 instances
def list_running_instances(self): print '# Running AWS EC2 instances' self.compute.list_running_instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def get_ec2(self, name: str) -> list:\n filters = [\n {\n 'Name': 'tag:Name',\n 'Values': [name]\n },\n {\n 'Name': 'instance-state-name',\n 'Values': ['running']\n }\n ]\n\n return list(self.ec2.instances.filter(Filters=filters).all())", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def ec2_list(ctx):\n\n from opstools.aws import ec2_list as this_ec2_list\n this_ec2_list.main()", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list_instances(self):\n # list instances\n self._list_instances()", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def show_instances():\n return get_instances()", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def machine_lookup_all(session, hostname, public_ip = True):\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n addresses = []\n items = response['Reservations']\n if len(items) > 0:\n for i in items:\n item = i['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n addresses.append(item['PublicIpAddress'])\n elif 'PrivateIpAddress' in item and not public_ip:\n addresses.append(item['PrivateIpAddress'])\n return addresses", "def test_list_ec2_instances(self):\n instances = [e for e in list_ec2_instances()]\n self.assertEqual([], instances)", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def execute(self):\n instances = self._get_active_instances()\n if not instances:\n print(\"No running instances.\")\n else:\n output = \"\\nInstanceId\\t\\tName\\n\\n\"\n for instance in instances:\n name = self._get_instance_name(instance)\n instance_id = instance['InstanceId']\n output += f\"{instance_id}\\t{name}\\n\"\n\n print(output)", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def list_instances_detail(self, context):\n LOG.debug(\"list_instances_detail\")\n\n info_list = []\n bmms = db.bmm_get_all_by_instance_id_not_null(context)\n for bmm in bmms:\n instance = db.instance_get(context, bmm[\"instance_id\"])\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"off\":\n inst_power_state = power_state.SHUTOFF\n\n if instance[\"vm_state\"] == vm_states.ACTIVE:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.STOPPED})\n else:\n inst_power_state = power_state.RUNNING\n\n if instance[\"vm_state\"] == vm_states.STOPPED:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.ACTIVE})\n\n info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm[\"instance_id\"]), \n inst_power_state))\n\n return info_list", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())" ]
[ "0.8954593", "0.8475898", "0.78454655", "0.7655982", "0.7580032", "0.75376797", "0.741361", "0.7245549", "0.7218848", "0.71418124", "0.7124448", "0.7121767", "0.7078368", "0.7067451", "0.7066132", "0.70264196", "0.7016414", "0.69694686", "0.6958187", "0.69378376", "0.69358754", "0.6891142", "0.6875601", "0.6871909", "0.6871361", "0.68260366", "0.67970324", "0.67453784", "0.6736904", "0.6727955" ]
0.8686558
1
Detail a given EC2 running instance The instance id is asked to the user
def detail_running_instance(self): instance_id = self._choose_among_running_instances() # Exit option if not instance_id: print 'Operation cancelled' return # Print the details print '# Details of the "%s" instance' % instance_id self.compute.detail_running_instance(instance_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Describe an instance\n instance = ec2_resource.Instance(instance_id)\n print('\\nInstance Id: ' + instance_id)\n print('Instance Id: ' + instance.id)\n print('Image Id: ' + instance.image_id)\n print('Instance Type: ' + instance.instance_type)\n print('State: ' + instance.state['Name'])\n if instance.state['Name'] == 'running':\n print('Private DNS Name: ' + instance.private_dns_name)\n print('Private IP: ' + instance.private_ip_address)\n print('Public DNS Name: ' + instance.public_dns_name)\n print('Public IP: ' + instance.public_ip_address)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"MissingParameter\":\n print(\"Error: Missing instance id!!\")\n else:\n raise\n return", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': '[email protected]'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='[email protected]',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def find_instance_by_id ( ec2_conn, instance_id ) :\n instance_results = ec2_conn.get_only_instances( instance_ids = [ instance_id ] )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def update_instance_description(self, instance_id=None):\n\n # TODO(dittrich): Make this capable of handling multi-instance stacks\n # Return a list or dictionary of multiple public_ip/public_dns sets.\n if self.client is None:\n try:\n # Lazy import boto3, because this:\n # botocore X.X.X has requirement docutils<Y.Y,>=Z.ZZ,\n # but you'll have docutils N.N which is incompatible.\n import boto3\n except ModuleNotFoundError:\n raise RuntimeError(\"[-] ensure the 'boto3' \"\n \"package is installed properly\")\n self.client = boto3.client('ec2')\n stack_list = self.client.describe_instances().get('Reservations')\n if len(stack_list) == 0:\n raise RuntimeError(\"[-] no running instances found\")\n if instance_id is None:\n for stack in stack_list:\n for instance in stack['Instances']:\n state = instance['State']['Name']\n if state != 'running':\n self.logger.debug(\n 'Ignoring %s instance %s',\n state,\n instance['InstanceId']\n )\n else:\n self.logger.debug(\n 'Found running instance %s',\n instance['InstanceId'])\n self.public_ip = instance.get(\n 'PublicIpAddress', None)\n self.public_dns = instance.get(\n 'PublicDnsName', None)\n break\n else:\n for stack in stack_list:\n for instance in stack['Instances']:\n if instance['InstanceId'] == instance_id:\n self.public_ip = instance.get('PublicIpAddress', None)\n self.public_dns = instance.get('PublicDnsName', None)\n return {'public_ip': self.public_ip,\n 'public_dns': self.public_dns}", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def view_instance(name):\n if request.method == \"GET\":\n instance_details = get_instance_details(name)\n\n if instance_details == 504:\n flash(\n \"The connection to {} has timed out. Please try again later.\".format(\n name\n ),\n \"warning\",\n )\n return redirect(url_for(\"list_instances\"))\n\n # try:\n # if instance_details['details']['pods'][0]['kind'] == 'Error':\n # print(\"No pod exist, so emptying logs and skipping lookup\")\n # instance_log = {'logs': ''}\n # except:\n # instance_log = get_instance_logs(name)\n\n # if instance_log == 500:\n # return render_template('500.html')\n\n instance_status = True\n\n # pretty_print = json.dumps(instance_details, sort_keys = True, indent = 2)\n return render_template(\n \"instance_profile.html\",\n name=name,\n instance_details=instance_details,\n instance_status=instance_status,\n )\n # instance_log=instance_log)", "def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)", "def start_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Start an instance\n response = ec2_resource.Instance(instance_id).start(DryRun=False)\n print(response)\n print(\"\\nSuccessfully starting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def get_self_instance_id():\n\n logging.debug('get_self_instance_id()')\n response = urllib2.urlopen('http://169.254.169.254/1.0/meta-data/instance-id')\n instance_id = response.read()\n return instance_id", "def ec2console(instanceid, interface=\"public\"):\n # First we get instance info\n os.system(\"clear\")\n date = os.popen(\"date +'%d-%m-%y %H:%M:%S'\").readline()\n client = boto3.client(\"ec2\")\n ready = False\n old = \"\"\n firsttime = True\n while not ready:\n client = boto3.client(\"ec2\")\n data = client.get_console_output(InstanceId=instanceid)\n if \"Output\" not in data:\n if firsttime:\n os.system(\"clear\")\n os.system(\"cat \"+os.path.dirname(os.path.realpath(__file__))+\"/waitforit\")\n firsttime = False\n else:\n current = data[\"Output\"]\n if not old:\n print(current)\n old = current\n else:\n if old != current:\n diff = re.sub(old, \"\", current)\n print(diff)\n list_lines = current.split(\"\\n\")\n for line in list_lines:\n if re.match(\".*loud-init.*finished.*\", line):\n print(\"\\n\\n=========\\nTime for SSH\\n\")\n ready = True\n if ready:\n break\n else:\n print(\"Sleeping 20seconds...\")\n time.sleep(20)\n data = client.describe_instances(InstanceIds=[instanceid])[\"Reservations\"][0][\"Instances\"][0]\n if interface == \"public\":\n ip = data[\"NetworkInterfaces\"][0][\"Association\"][\"PublicIp\"]\n else:\n ip = data[\"PrivateIpAddress\"]\n os.system(\"ssh \"+ip)", "def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance", "async def start_ec2_instance(self, env):\n instanceDef= {\n 'AWS_AMI_ID': os.getenv(\"AWS_AMI_ID\"),\n 'AWS_KEYNAME': os.getenv(\"AWS_KEYNAME\"),\n 'AWS_SECURITY_GROUP': os.getenv('AWS_SECURITY_GROUP'),\n 'AWS_SUBNET': os.getenv(\"AWS_SUBNET\"),\n 'DryRun':False,\n 'AWS_INSTANCE_NAME': 'Jupyter',\n 'AWS_IAM_ARN': os.getenv('AWS_IAM_ARN')\n }\n \n self.log.debug('building instance')\n ip = await self.buildInstance(instanceDef, env)\n return ip", "def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def get_status(ec2,spot_request_id):\n current = ec2.describe_spot_instance_requests(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = current[u'SpotInstanceRequests'][0][u'InstanceId'] if u'InstanceId' in current[u'SpotInstanceRequests'][0] else None\n return instance_id", "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def stop_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Stop an instance\n response = ec2_resource.Instance(instance_id).stop(DryRun=False)\n print(response)\n print(\"\\nSuccessfully stopping instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def show_instance(name, session=None, call=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n log.debug(\"show_instance-> name: %s session: %s\", name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record[\"is_a_template\"] and not record[\"is_control_domain\"]:\n try:\n base_template_name = record[\"other_config\"][\"base_template_name\"]\n except Exception: # pylint: disable=broad-except\n base_template_name = None\n log.debug(\n \"VM %s, does not have base_template_name attribute\",\n record[\"name_label\"],\n )\n ret = {\n \"id\": record[\"uuid\"],\n \"image\": base_template_name,\n \"name\": record[\"name_label\"],\n \"size\": record[\"memory_dynamic_max\"],\n \"state\": record[\"power_state\"],\n \"private_ips\": get_vm_ip(name, session),\n \"public_ips\": None,\n }\n\n __utils__[\"cloud.cache_node\"](ret, _get_active_provider_name(), __opts__)\n return ret", "def terminate_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Terminate an instance\n response = ec2_resource.Instance(instance_id).terminate(DryRun=False)\n print(response)\n print(\"\\nSuccessfully terminating instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def _get_instance_info(self, kc, port_id):\n\n neutron_endpoint = kc.service_catalog.url_for(service_type='network',\n endpoint_type='internalURL')\n nc = neutron_c.Client(token=kc.auth_token,\n tenant_id=kc.auth_tenant_id,\n endpoint_url=neutron_endpoint)\n port_details = nc.show_port(port_id)\n instance_id = port_details['port']['device_id']\n instance_info = {'id': instance_id}\n LOG.debug('Instance id for port id %s is %s' % (port_id, instance_id))\n\n nova_endpoint = kc.service_catalog.url_for(service_type='compute',\n endpoint_type='internalURL')\n nvc = nova_c.Client(auth_token=kc.auth_token,\n tenant_id=kc.auth_tenant_id,\n bypass_url=nova_endpoint)\n server_info = nvc.servers.get(instance_id)\n LOG.debug('Instance name for id %s is %s' % (instance_id, server_info.name))\n instance_info['original_name'] = server_info.name\n instance_info['scrubbed_name'] = _scrub_instance_name(server_info.name)\n if instance_info['original_name'] != instance_info['scrubbed_name']:\n LOG.warn('Instance name for id %s contains characters that cannot be used'\n ' for a valid DNS record. It was scrubbed from %s to %s'\n % (instance_id, instance_info['original_name'], instance_info['scrubbed_name']))\n instance_info['name'] = instance_info['scrubbed_name']\n else:\n instance_info['name'] = instance_info['original_name']\n\n return instance_info", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def get_info(self, instance):\n shutdown_staues = ['deallocating', 'deallocated',\n 'stopping', 'stopped']\n instance_id = instance.uuid\n state = power_state.NOSTATE\n status = 'Unkown'\n try:\n vm = self.compute.virtual_machines.get(\n CONF.azure.resource_group, instance_id, expand='instanceView')\n # azure may raise msrestazure.azure_exceptions CloudError\n except exception.CloudError as e:\n msg = six.text_type(e)\n if 'ResourceNotFound' in msg:\n raise nova_ex.InstanceNotFound(instance_id=instance.uuid)\n else:\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n else:\n LOG.debug('vm info is: {}'.format(vm))\n if vm and hasattr(vm, 'instance_view') and \\\n hasattr(vm.instance_view, 'statuses') and \\\n vm.instance_view.statuses is not None:\n for i in vm.instance_view.statuses:\n if hasattr(i, 'code') and \\\n i.code and 'PowerState' in i.code:\n status = i.code.split('/')[-1]\n if 'running' == status:\n state = power_state.RUNNING\n elif status in shutdown_staues:\n state = power_state.SHUTDOWN\n break\n LOG.info(_LI('vm: %(instance_id)s state is : %(status)s'),\n dict(instance_id=instance_id, status=status))\n return InstanceInfo(state=state, id=instance_id)", "def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def associate_eip_with_instance(ec2, instance_id, allocation_id):\n\taddress = ec2.VpcAddress(allocation_id)\n\taddress.associate(InstanceId=instance_id)\n\treturn address.public_ip", "def instance_id(self) -> str:\n return pulumi.get(self, \"instance_id\")" ]
[ "0.78466356", "0.73414147", "0.7093406", "0.7090001", "0.68222576", "0.6783114", "0.66420054", "0.6511005", "0.649048", "0.64746976", "0.64403975", "0.6409867", "0.63910794", "0.63681906", "0.6364402", "0.6344418", "0.6313384", "0.6279237", "0.6267937", "0.62498564", "0.6210147", "0.6193204", "0.61735505", "0.61585164", "0.615072", "0.6149753", "0.6146784", "0.6140472", "0.61310554", "0.6118492" ]
0.7481309
1
Create an EC2 instance given the OS The user will be asked to choose between Windows or Linux
def create_instance_by_os(self): print '# Start a new instance based on the OS' # Choose between linux or windows is_linux = True while True: os = raw_input('Enter the OS (windows/linux or empty to cancel): ') # Cancel if not os: print 'Operation cancelled' return # Check if linux if os.lower() == 'linux': is_linux = True break # Check windows if os.lower() == 'windows': is_linux = False break # Error print 'Invalid input!' # Create the instance if self.compute.create_instance_by_os(is_linux): print 'Instance started!' else: print 'It was not possible to create an instance with the given OS'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance", "def create_instance(self, image='ami-660c3023', key_name='linuxonEC2', instance_type='t1.micro', security_groups=['default']):\n return self.conn.run_instances(image,\n key_name=key_name,\n instance_type=instance_type,\n security_groups=security_groups).instances[0]", "def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def create_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Creating an instance of tcServer in %s\" % tcserver_dir)\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-instance.sh\", \"create\", instance_name])\n popdir()", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': '[email protected]'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='[email protected]',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def create_ami_from_instance ( aws_account_type,\n ec2_conn,\n instance,\n ami_name,\n ami_description = None,\n wait_for_available = True ) :\n ami_id = instance.create_image( ami_name, ami_description )\n ami = aws_wait( ec2_conn.get_all_images, ami_id, [ ami_id ] )\n if not ami :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n if wait_for_available :\n ami_available = wait_on_object_state( ami, 'available' ,max_wait=3600)\n if not ami_available :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n # Allow other AWS accounts the ability to see this AMI.\n if aws_account_type == 'esp-nonprod' :\n priv_account_id = esp_prod[ 'accountid' ]\n else :\n priv_account_id = esp_nonprod[ 'accountid' ]\n\n ami.set_launch_permissions( user_ids = [ priv_account_id ] )\n\n return ami", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def create_instance_by_image(self):\n print '# Start a new instance based on an existing AMI'\n ami = raw_input('Enter AMI (empty to cancel): ')\n\n # Cancel\n if not ami:\n print 'Operation cancelled'\n return\n\n # Start the instance\n if self.compute.create_instance_by_image(ami):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given AMI'", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "async def buildInstance(self, instanceDef, env):\n # Create tag specifications which we use to pass variables to the instance\n tags = self.env_to_tags(env)\n \n import EC2Spawner as ec2spawnerModule\n bootstrapPath = os.path.dirname(ec2spawnerModule.__file__) + '/data/bootstrap.sh'\n\n with open(bootstrapPath, 'r') as myfile:\n UserData = myfile.read()\n \n instance = self.ec2.create_instances(\n ImageId=instanceDef['AWS_AMI_ID'],\n KeyName=instanceDef['AWS_KEYNAME'], \n InstanceType='t2.medium',\n MinCount=1, MaxCount=1,\n DryRun=instanceDef['DryRun'],\n SubnetId=instanceDef['AWS_SUBNET'], \n SecurityGroupIds=[instanceDef['AWS_SECURITY_GROUP']],\n TagSpecifications=tags,\n IamInstanceProfile={'Arn': instanceDef['AWS_IAM_ARN']},\n UserData=UserData\n )\n\n # InstanceType='t1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.8xlarge'|'r5.12xlarge'|'r5.16xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.8xlarge'|'r5d.12xlarge'|'r5d.16xlarge'|'r5d.24xlarge'|'r5d.metal'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'\n \n self.log.debug(\"AWS Instance ID: {}\".format(instance[0].id))\n waiter = self.client.get_waiter('instance_running')\n \n self.log.debug('Waiting...')\n await waiter.wait(InstanceIds=[instance[0].id])\n description = self.client.describe_instances(InstanceIds=[instance[0].id])\n instanceIP = description['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']\n\n self.ec2_instance_ip = instanceIP\n self.log.debug(\"AWS Instance IP: {}\".format(self.ec2_instance_ip))\n self.ec2_instance_id = instance[0].id\n return instanceIP", "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def create_machine(request):\n\n params = params_from_request(request)\n cloud_id = request.matchdict['cloud']\n\n for key in ('name', 'size'):\n if key not in params:\n raise RequiredParameterMissingError(key)\n\n key_id = params.get('key')\n machine_name = params['name']\n location_id = params.get('location', None)\n image_id = params.get('image')\n if not image_id:\n raise RequiredParameterMissingError(\"image\")\n # this is used in libvirt\n disk_size = int(params.get('libvirt_disk_size', 4))\n disk_path = params.get('libvirt_disk_path', '')\n size_id = params['size']\n # deploy_script received as unicode, but ScriptDeployment wants str\n script = str(params.get('script', ''))\n # these are required only for Linode/GCE, passing them anyway\n image_extra = params.get('image_extra', None)\n disk = params.get('disk', None)\n image_name = params.get('image_name', None)\n size_name = params.get('size_name', None)\n location_name = params.get('location_name', None)\n ips = params.get('ips', None)\n monitoring = params.get('monitoring', False)\n networks = params.get('networks', [])\n docker_env = params.get('docker_env', [])\n docker_command = params.get('docker_command', None)\n script_id = params.get('script_id', '')\n script_params = params.get('script_params', '')\n post_script_id = params.get('post_script_id', '')\n post_script_params = params.get('post_script_params', '')\n async = params.get('async', False)\n quantity = params.get('quantity', 1)\n persist = params.get('persist', False)\n docker_port_bindings = params.get('docker_port_bindings', {})\n docker_exposed_ports = params.get('docker_exposed_ports', {})\n azure_port_bindings = params.get('azure_port_bindings', '')\n # hostname: if provided it will be attempted to assign a DNS name\n hostname = params.get('hostname', '')\n plugins = params.get('plugins')\n cloud_init = params.get('cloud_init', '')\n associate_floating_ip = params.get('associate_floating_ip', False)\n associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',\n None)\n project_id = params.get('project', None)\n bare_metal = params.get('bare_metal', False)\n # bare_metal True creates a hardware server in SoftLayer,\n # whule bare_metal False creates a virtual cloud server\n # hourly True is the default setting for SoftLayer hardware\n # servers, while False means the server has montly pricing\n softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)\n hourly = params.get('billing', True)\n job_id = params.get('job_id')\n job_id = params.get('job_id')\n # The `job` variable points to the event that started the job. If a job_id\n # is not provided, then it means that this is the beginning of a new story\n # that starts with a `create_machine` event. If a job_id is provided that\n # means that the current event will be part of already existing, unknown\n # story. TODO: Provide the `job` in the request's params or query it.\n if not job_id:\n job = 'create_machine'\n job_id = uuid.uuid4().hex\n else:\n job = None\n\n # these are needed for OnApp\n size_ram = params.get('size_ram', 256)\n size_cpu = params.get('size_cpu', 1)\n size_disk_primary = params.get('size_disk_primary', 5)\n size_disk_swap = params.get('size_disk_swap', 1)\n boot = params.get('boot', True)\n build = params.get('build', True)\n cpu_priority = params.get('cpu_priority', 1)\n cpu_sockets = params.get('cpu_sockets', 1)\n cpu_threads = params.get('cpu_threads', 1)\n port_speed = params.get('port_speed', 0)\n hypervisor_group_id = params.get('hypervisor_group_id')\n\n auth_context = auth_context_from_request(request)\n\n try:\n Cloud.objects.get(owner=auth_context.owner,\n id=cloud_id, deleted=None)\n except Cloud.DoesNotExist:\n raise NotFoundError('Cloud does not exist')\n\n # compose schedule as a dict from relative parameters\n if not params.get('schedule_type'):\n schedule = {}\n else:\n if params.get('schedule_type') not in ['crontab',\n 'interval', 'one_off']:\n raise BadRequestError('schedule type must be one of '\n 'these (crontab, interval, one_off)]'\n )\n if params.get('schedule_entry') == {}:\n raise RequiredParameterMissingError('schedule_entry')\n\n schedule = {\n 'name': params.get('name'),\n 'description': params.get('description', ''),\n 'action': params.get('action', ''),\n 'script_id': params.get('schedule_script_id', ''),\n 'schedule_type': params.get('schedule_type'),\n 'schedule_entry': params.get('schedule_entry'),\n 'expires': params.get('expires', ''),\n 'start_after': params.get('start_after', ''),\n 'max_run_count': params.get('max_run_count'),\n 'task_enabled': bool(params.get('task_enabled', True)),\n 'auth_context': auth_context.serialize(),\n }\n\n auth_context.check_perm(\"cloud\", \"read\", cloud_id)\n auth_context.check_perm(\"cloud\", \"create_resources\", cloud_id)\n tags = auth_context.check_perm(\"machine\", \"create\", None) or {}\n if script_id:\n auth_context.check_perm(\"script\", \"run\", script_id)\n if key_id:\n auth_context.check_perm(\"key\", \"read\", key_id)\n\n # Parse tags.\n try:\n mtags = params.get('tags') or {}\n if not isinstance(mtags, dict):\n if not isinstance(mtags, list):\n raise ValueError()\n if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):\n raise ValueError()\n mtags = {key: val for item in mtags for key, val in item.items()}\n tags.update(mtags)\n except ValueError:\n raise BadRequestError('Invalid tags format. Expecting either a '\n 'dictionary of tags or a list of single-item '\n 'dictionaries')\n\n args = (cloud_id, key_id, machine_name,\n location_id, image_id, size_id,\n image_extra, disk, image_name, size_name,\n location_name, ips, monitoring, networks,\n docker_env, docker_command)\n kwargs = {'script_id': script_id,\n 'script_params': script_params, 'script': script, 'job': job,\n 'job_id': job_id, 'docker_port_bindings': docker_port_bindings,\n 'docker_exposed_ports': docker_exposed_ports,\n 'azure_port_bindings': azure_port_bindings,\n 'hostname': hostname, 'plugins': plugins,\n 'post_script_id': post_script_id,\n 'post_script_params': post_script_params,\n 'disk_size': disk_size,\n 'disk_path': disk_path,\n 'cloud_init': cloud_init,\n 'associate_floating_ip': associate_floating_ip,\n 'associate_floating_ip_subnet': associate_floating_ip_subnet,\n 'project_id': project_id,\n 'bare_metal': bare_metal,\n 'tags': tags,\n 'hourly': hourly,\n 'schedule': schedule,\n 'softlayer_backend_vlan_id': softlayer_backend_vlan_id,\n 'size_ram': size_ram,\n 'size_cpu': size_cpu,\n 'size_disk_primary': size_disk_primary,\n 'size_disk_swap': size_disk_swap,\n 'boot': boot,\n 'build': build,\n 'cpu_priority': cpu_priority,\n 'cpu_sockets': cpu_sockets,\n 'cpu_threads': cpu_threads,\n 'port_speed': port_speed,\n 'hypervisor_group_id': hypervisor_group_id}\n if not async:\n ret = methods.create_machine(auth_context.owner, *args, **kwargs)\n else:\n args = (auth_context.owner.id, ) + args\n kwargs.update({'quantity': quantity, 'persist': persist})\n tasks.create_machine_async.apply_async(args, kwargs, countdown=2)\n ret = {'job_id': job_id}\n ret.update({'job': job})\n return ret", "def _create_vz(self, instance, ostemplate='ubuntu'):\n\n # TODO(imsplitbit): This needs to set an os template for the image\n # as well as an actual OS template for OpenVZ to know what config\n # scripts to use. This can be problematic because there is no concept\n # of OS name, it is arbitrary so we will need to find a way to\n # correlate this to what type of disto the image actually is because\n # this is the clue for openvz's utility scripts. For now we will have\n # to set it to 'ubuntu'\n\n # This will actually drop the os from the local image cache\n try:\n utils.execute('sudo', 'vzctl', 'create', instance['id'],\n '--ostemplate', instance['image_id'])\n except exception.ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed creating VE %s from image cache' %\n instance['id'])\n return True", "async def start_ec2_instance(self, env):\n instanceDef= {\n 'AWS_AMI_ID': os.getenv(\"AWS_AMI_ID\"),\n 'AWS_KEYNAME': os.getenv(\"AWS_KEYNAME\"),\n 'AWS_SECURITY_GROUP': os.getenv('AWS_SECURITY_GROUP'),\n 'AWS_SUBNET': os.getenv(\"AWS_SUBNET\"),\n 'DryRun':False,\n 'AWS_INSTANCE_NAME': 'Jupyter',\n 'AWS_IAM_ARN': os.getenv('AWS_IAM_ARN')\n }\n \n self.log.debug('building instance')\n ip = await self.buildInstance(instanceDef, env)\n return ip", "def spawn(self, context, instance,\n network_info=None, block_device_info=None):\n LOG.debug(\"spawn\")\n\n instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance[\"availability_zone\"])\n\n # update instances table\n bmm, reuse = self._select_machine(context, instance)\n instance[\"display_name\"] = bmm[\"name\"]\n instance[\"availability_zone\"] = instance_zone\n db.instance_update(context, \n instance[\"id\"], \n {\"display_name\": bmm[\"name\"],\n \"availability_zone\": instance_zone})\n if vlan_id:\n db.bmm_update(context, bmm[\"id\"], {\"availability_zone\": cluster_name, \n \"vlan_id\": vlan_id,\n \"service_ip\": None})\n \n if instance_zone == \"resource_pool\":\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n else: \n self._update_ofc(bmm, cluster_name)\n if bmm[\"instance_id\"]:\n db.instance_destroy(context, bmm[\"instance_id\"])\n\n if reuse:\n db.bmm_update(context, bmm[\"id\"], {\"status\": \"used\", \n \"instance_id\": instance[\"id\"]}) \n else:\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n \n if instance[\"key_data\"]:\n self._inject_key(bmm[\"pxe_ip\"], str(instance[\"key_data\"]))", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def create_windows_instance(\n project_id: str,\n zone: str,\n instance_name: str,\n machine_type: str,\n source_image_family: str = \"windows-2022\",\n network_link: str = \"global/networks/default\",\n subnetwork_link: str | None = None,\n) -> compute_v1.Instance:\n if subnetwork_link is None:\n subnetwork_link = f\"regions/{zone}/subnetworks/default\"\n\n base_image = get_image_from_family(\n project=\"windows-cloud\", family=source_image_family\n )\n disk_type = f\"zones/{zone}/diskTypes/pd-standard\"\n disks = [disk_from_image(disk_type, 100, True, base_image.self_link, True)]\n\n # You must verify or configure routes and firewall rules in your VPC network\n # to allow access to kms.windows.googlecloud.com.\n # More information about access to kms.windows.googlecloud.com: https://cloud.google.com/compute/docs/instances/windows/creating-managing-windows-instances#kms-server\n\n # Additionally, you must enable Private Google Access for subnets in your VPC network\n # that contain Windows instances with only internal IP addresses.\n # More information about Private Google Access: https://cloud.google.com/vpc/docs/configure-private-google-access#enabling\n\n instance = create_instance(\n project_id,\n zone,\n instance_name,\n disks,\n machine_type=machine_type,\n network_link=network_link,\n subnetwork_link=subnetwork_link,\n external_access=True, # Set this to False to disable external IP for your instance\n )\n return instance", "def amazonEc2_create(amazonEc2):\n\treturn amazonEc2", "def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)", "def prepareInstance(image, instancetype, accesskey, secretkey, pkname,\n softwareList, pipelineUrl):\n # Start up the AMI\n dnsName = startami(image, instancetype, accesskey, secretkey, pkname)\n\n # SSH onto the machine and run the webserver\n\n # SSH onto the machine and run the chef-solo\n installSoftware(dnsName, softwareList)\n\n return ((get_image_username(image), dnsName))", "def create_machine(self, name, ami, is_windows, key_name, key_data, username, password,\n instance_type=Consts.FREE_INSTANCE_TYPE, tags=None, allowed_ip_prefixes=Consts.EVERYONE):\n res = self.conn.run_instances(ami, key_name=key_name, instance_type=instance_type, security_groups=[\"default\"])\n inst = res.instances[0]\n assert inst, \"Machine creation failed!\"\n inst.add_tag(\"Name\", name)\n #TODO tags, key, username/password, security groups, billing, info\n t = threading.Thread(target=self.__stop_new_machine, args=[inst])\n t.start()\n return MachineDetails(inst)", "def create_instance(driver,\n user_id, sig_server_addr, sig_server_port, zone='us-central1-b',\n tags=[], branch='aosp-master', target='aosp_cf_x86_phone-userdebug'):\n\n target = target.replace('_','-')\n instance_name = f'halyard-{user_id}'\n image_family = f'halyard-{branch}-{target}'\n\n try:\n driver.ex_get_image_from_family(image_family)\n except:\n utils.fatal_error(f'Image family {image_family} does not exist.\\n \\\n New base images can be created using the `create_base_image` endpoint.')\n\n # Stops execution if instance already exists\n instance = utils.find_instance(driver, instance_name, zone)\n if instance:\n utils.fatal_error(f'Instance {instance_name} already exists.')\n\n build_node = driver.create_node(\n instance_name,\n 'n1-standard-4',\n None,\n location=zone,\n ex_image_family=image_family,\n ex_service_accounts=[{'scopes': ['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n\n utils.wait_for_instance(instance_name, zone)\n\n print('successfully created new instance', instance_name)\n\n launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, False)\n\n return {\"name\": instance_name}", "def machine_new(node=\"dev\", driver='virtualbox'):\n machine = Dockerizing(driver)\n\n # Check that the requested node does not already exist\n if node in machine.list():\n print(colors.warn | \"Failed:\", colors.bold |\n \"Machine '%s' Already exists\" % node)\n return\n machine.create(node)\n\n # Create the machine\n _logger.info(\"Preparing machine\", node)\n print(machine.create(node))\n _logger.info(colors.green | \"Created!\\n\\n\")", "def create_disk_instance(device, disk_params):\n\n domain_name = device[\"name\"]\n disk_instance_path = \"\"\n\n if \"type\" in disk_params:\n if disk_params[\"type\"] == \"image\" and \"image_id\" in disk_params:\n logger.debug(\"Creating secondary/tertiary Disk information\")\n image_id = disk_params[\"image_id\"]\n disk_image = Image.objects.get(pk=image_id)\n disk_base_path = settings.MEDIA_ROOT + \"/\" + disk_image.filePath.url\n\n disk_instance_path = osUtils.get_instance_path_from_image(disk_base_path,\n domain_name + \"_secondary_image.img\"\n )\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_thin_provision_instance(disk_base_path,\n domain_name + \"_secondary_image.img\"\n ):\n raise Exception(\"Could not create image instance for image: \" + disk_base_path)\n\n elif disk_params[\"type\"] == \"blank\":\n disk_instance_path = settings.MEDIA_ROOT \\\n + \"/user_images/instances/\" + domain_name + \"_secondary_blank.img\"\n\n disk_size = \"16G\"\n if \"size\" in disk_params:\n disk_size = disk_params[\"size\"]\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_blank_image(disk_instance_path, disk_size):\n raise Exception(\"Could not create image instance for image: \" + disk_instance_path)\n\n elif disk_params[\"type\"] == \"config_drive\":\n # let's check if config_drive is supported for this vm_type!\n # this is usually used for vMX in openstack, however, we can also use it here for KVM deployments\n disk_instance_path = ''\n if \"configDriveSupport\" in device and device[\"configDriveSupport\"] is True:\n\n logger.debug(\"Lets create a config-drive!\")\n\n # keep a dict of files with format: filename: filecontents\n files = dict()\n params = device[\"configDriveParams\"]\n if \"configDriveParamsFile\" in device and device[\"configDriveParamsFile\"]:\n logger.debug(\"Using inline config_drive format\")\n # behavior change 12-28-2016 - allow passing a list of templates and destinations\n # instead of defining the params directly on the device object\n # if the configDriveParams is a dict, then this is an older topology, leave this code here\n # to still support them - otherwise fall through to the isinstance check for list type for\n # newer style configuration\n if isinstance(params, dict):\n name = device[\"configDriveParamsFile\"]\n file_data = \"\"\n # config drive params are usually a dict - to make json serialization easier\n # for our purposes here, let's just make a file with a single key: value per line\n # note, we can add a serialization format to the vm_type.js if needed here\n # only currently used for /boot/loader.conf in vmx and riot\n for k in params:\n file_data += '%s=\"%s\"\\n' % (k, params[k])\n\n files[name] = file_data\n\n # junos customization\n # let's also inject a default config here as well if possible!\n if \"junos\" in device[\"type\"]:\n logger.debug(\"Creating Junos configuration template\")\n junos_config = osUtils.get_junos_default_config_template(device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n if junos_config is not None:\n files[\"/juniper.conf\"] = junos_config\n\n # check for new (12-28-2016) style config drive params definition\n if isinstance(params, list):\n logger.debug(\"params is a list\")\n for p in params:\n if \"template\" in p and \"destination\" in p:\n file_data = None\n file_data = osUtils.compile_config_drive_params_template(\n p[\"template\"],\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"]\n )\n if file_data is not None:\n files[p[\"destination\"]] = file_data\n\n disk_instance_path = osUtils.create_config_drive(device[\"name\"], files)\n if disk_instance_path is None:\n disk_instance_path = ''\n\n logger.debug(\"Using %s\" % disk_instance_path)\n return disk_instance_path", "def launch_instance(tag, key_name, group_name, inst_type, ami_name, user_data,\n wait=True, returninfo=None):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n failures = 0\n max_failures = 10\n while True:\n try:\n reservation = ec2.run_instances(ami_name,\n key_name=key_name,\n security_groups=[group_name],\n instance_type=inst_type,\n user_data=None)\n break\n except Exception, err:\n # Failed to get instance; wait 15 seconds and then try again (up to\n # 10 total times)\n errortext = str(err)\n if errortext.find(\"Not authorized for images\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that the AMI code in\"\n print \"* CloudSetup.py is deprecated. Please go to\"\n print \"* https://aws.amazon.com/marketplace/ and search for\"\n print \"* \\\"Ubuntu server lts hvm\\\", selecting the most recent\"\n print \"* version. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and then copy the AMI ID for the US East region.\"\n print \"* Copy that to the AMI_NAME value in CloudSetup.py\"\n print \"* and re-run.\"\n print \"***************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"***************************************\"\n return None\n elif errortext.find(\"accept terms and subscribe\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that you have never used this\"\n print \"* AMI before and need to accept its terms and\"\n print \"* subscribe to it. Please follow the link in the below\"\n print \"* error text. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and \\\"Accept Terms\\\". After receiving email\"\n print \"* confirmation, you can re-run the code.\"\n print \"**************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n failures += 1\n if failures == max_failures:\n print \"**************************************\"\n print \"* Maximum number of instance launch failures reached.\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n print \" ** ec2.run_instances failed for tag\", tag, \"; waiting 15\"\n print \" ** seconds and then trying again...\"\n time.sleep(15)\n\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance = reservation.instances[0]\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance.add_tag(\"tag\", tag)\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n\n if wait:\n print \" Instance requested, waiting for 'running' for tag\", tag\n while instance.state != \"running\":\n print \" %s ...\" % tag\n time.sleep(5)\n try:\n instance.update()\n except boto.exception.EC2ResponseError as e:\n print \"******************\"\n print \"Error caught in instance.update():\"\n print e.strerror\n print \"******************\"\n print \" %s done!\" % tag\n if returninfo:\n returninfo.put(tag)\n return instance", "def launch_example_ec2_cmd(*args, **kwargs):\n return launch_example_ec2(*args, **kwargs)" ]
[ "0.7112136", "0.68605405", "0.65974665", "0.656183", "0.65232134", "0.65207905", "0.6295957", "0.6276715", "0.62366116", "0.62254965", "0.62169397", "0.6215981", "0.6160839", "0.61591357", "0.61518484", "0.6142052", "0.6102039", "0.6092225", "0.60879654", "0.60726446", "0.6047026", "0.6043326", "0.60426205", "0.60418004", "0.60167044", "0.5952063", "0.59420365", "0.5866985", "0.5862921", "0.5857408" ]
0.803839
0
Stop all the EC2 instances
def stop_all_instances(self): print '# Stopping all the instances' number = self.compute.stop_all_instances() print '%d instances were stopped' % number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def Stop_Instances(ids=Get_Running_Instances()):\n ec2 = boto3.client('ec2')\n #call the features client from the boto3 library\n if not ids:\n #if the list of Ec2 instances returned is empty.\n print(\"No Instance in the state Running or pending\")\n else:\n ec2.stop_instances(InstanceIds=ids)\n #stop the instances using their id\n ec2.get_waiter('instance_stopped').wait(InstanceIds=ids)\n #wait for the state of the instances to change to stopped.\n print('instance {} was shutdown'.format(ids))", "def stop_instances(self, ids):\n self.conn.stop_instances(instance_ids=ids)", "def stop(self):\n for c in self.openstack_endpoints.values():\n c.stop()\n #for c in self.openstack_endpoints.values():\n # if c.server_thread:\n # print(\"Waiting for WSGIServers to be stopped ...\")\n # c.server_thread.join()", "def stop(self):\n logging.debug(\"footprint/stop entered\")\n logging.info(\"Stopping cloud instances\")\n print \"Stopping machines\"\n for machine in self.machines:\n logging.debug(\"stopping %s\" % machine)\n server = self.machines[machine]\n server.stop()\n \n # monitor until all the machines are down\n active_machines = 1\n while active_machines:\n running = 0\n active_machines = 0\n for machine in self.machines:\n server = self.machines[machine]\n try:\n tmp = cs.servers.get(self.machines[machine].id)\n active_machines = 1\n running = running + 1 \n except novaclient.exceptions.NotFound:\n continue\n # if running == 0:\n # break\n time.sleep(10)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n logging.info(\"Stopping Networks\")\n print\n print \"Stopping networks\"\n \n for network in self.networks:\n logging.debug(\"stopping %s\" % str(network))\n n = self.networks[network]\n n.stop()\n \n while True:\n running = 0\n # print self.networks\n for network in self.networks:\n n = self.networks[network]\n\n try:\n tmp = cn.find(id=n.id)\n running = running + 1\n except pyrax.exceptions.NotFound:\n continue\n if running == 0:\n break\n time.sleep(1)\n sys.stdout.write(\".\")\n sys.stdout.flush()", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def stop_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n ec2_client.stop_instances(InstanceIds=instances_ids)\n \n # wait till instance is stopped\n waiter = ec2_client.get_waiter(\"instance_stopped\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"\\n===EC2 instance has stopped!\")", "def stop(self, ids: list) -> str:\n # If no ids are passed raise Nothing to do\n if 'None' in ids:\n raise EC2Error('Nothing to do. Need IDS! Arrgh!!!')\n\n try:\n status = self.ec2.instances.filter(InstanceIds=ids).stop()\n return status\n except IOError as e:\n raise EC2Error('Error stopping EC2 Instances {}'.format(e))", "def stop_instances(self, instance_ids=None, force=False):\r\n params = {}\r\n if force:\r\n params['Force'] = 'true'\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('StopInstances', params,\r\n [('item', Instance)], verb='POST')", "def stop(self):\r\n for srv in self._servers:\r\n srv.stop()", "def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()", "def shutdown_vpc ( ec2_conn, vpc, exceptions = [] ) :\n instances = ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )\n exceptions = [ exception.upper( ) for exception in exceptions ]\n for instance in instances :\n if instance.state == 'running' :\n is_exception = False\n instance_name = instance.tags[ 'Name' ].upper( )\n for exception in exceptions :\n if instance_name.find( exception ) != -1 :\n is_exception = True\n break\n\n if not is_exception :\n print \" Stopping instance \" + instance_name\n instance.stop( )\n else :\n print \" Exception found, not stopping \" + instance_name\n\n else :\n print \"WARNING: found instance in non-running state.\"\n print \" name: \" + instance.tags[ 'Name' ]\n print \" id: \" + instance.id\n print \" state: \" + instance.state", "def terminate(filter=\".*\"):\n list_instances,list_headers = ec2list(filter=filter)\n if not list_instances:\n print(\"No instance matched the filter\")\n sys.exit(1)\n title = \"Pick the instances to terminate\"\n options = [ '{} ---- {} ---- {} ---- {}'.format(\n x[\"name\"],\n x[\"privateip\"],\n x[\"id\"],\n x[\"launchtime\"],\n x[\"state\"]) for x in list_instances ]\n\n list_selected = pick(options, title, multiselect=True, default_index=len(options)-1)\n del(options[:-1])\n list_ips = []\n if not list_selected:\n print(\"No host selected, exiting\")\n return\n list_ids = []\n for option,index in list_selected:\n list_ids.append(list_instances[index]['id'])\n print(\"Terminating instances {}\".format(list_ids))\n boto3.client(\"ec2\").terminate_instances(InstanceIds=list_ids)", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def stop(self):\n for process in self.process:\n process.stop()", "def stop_instance(InstanceId=None, Force=None):\n pass", "def terminate_all(self):\n self._stop_all('terminate')", "def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"", "def stop_instances(self, instance_ids):\n response = instance.stop_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = StopInstancesResponse.StopInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def stop_ec2_instance(client, instance_id, hibernate=False):\n\n response = client.stop_instances(\n InstanceIds=[instance_id],\n )\n return response", "def MultipleBFEBSInstances(self):\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.image = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n self.MultipleInstances()", "def stop_all():\n subprocess.check_call(\n ['./run.py --down'], shell=True,\n cwd=orc8_docker_path,\n )\n subprocess.check_call(\n 'docker-compose down', shell=True,\n cwd=feg_docker_integ_test_path,\n )\n subprocess.check_call(\n 'vagrant halt magma', shell=True,\n cwd=agw_path,\n )", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def terminate_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('TerminateInstances', params,\r\n [('item', Instance)], verb='POST')", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def test_ungraceful_shutdown_aws(self, resources, instances, aws_obj, force):\n aws_obj.stop_ec2_instances(instances=instances, wait=True, force=force)\n aws_obj.start_ec2_instances(instances=instances, wait=True)\n self.validate_cluster(resources, instances)", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def lambda_handler(event, context):\n Stop_Instances()" ]
[ "0.80661106", "0.78416044", "0.7770538", "0.76056653", "0.75015163", "0.7428186", "0.7423569", "0.7338515", "0.7267911", "0.7160578", "0.7159199", "0.7147509", "0.69454277", "0.6943702", "0.6860941", "0.68560904", "0.6803914", "0.67757475", "0.6771662", "0.67619", "0.6760722", "0.67322016", "0.67133784", "0.66832185", "0.6677803", "0.6654596", "0.6648252", "0.66282886", "0.6612227", "0.66117865" ]
0.8161892
0
Stop a certain instance The instance ID will be asked to the user
def stop_instance(self): instance_id = self._choose_among_running_instances() # Cancel if not instance_id: print 'Operation cancelled' return print '# Stopping the instance "%s"' % instance_id self.compute.stop_instance(instance_id) print 'The instance has been stopped'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop_instance(InstanceId=None, Force=None):\n pass", "def stop_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Stop an instance\n response = ec2_resource.Instance(instance_id).stop(DryRun=False)\n print(response)\n print(\"\\nSuccessfully stopping instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def stop_instance(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n instanceid = args[\"Instance-ID\"].replace(\",\", \" \").split()\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n ec2.instances.filter(InstanceIds=instanceid).stop()\n\n message.message_text = \"Instance Stopped\"\n return message.to_json()", "def stop_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n ec2_client.stop_instances(InstanceIds=instances_ids)\n \n # wait till instance is stopped\n waiter = ec2_client.get_waiter(\"instance_stopped\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"\\n===EC2 instance has stopped!\")", "def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def stop_ec2_instance(client, instance_id, hibernate=False):\n\n response = client.stop_instances(\n InstanceIds=[instance_id],\n )\n return response", "def cancel_instance(self, instance_id):\r\n return self.guest.deleteObject(id=instance_id)", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue", "def terminate_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Terminate an instance\n response = ec2_resource.Instance(instance_id).terminate(DryRun=False)\n print(response)\n print(\"\\nSuccessfully terminating instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def stop(instance):\n if instance.state == STOPPED:\n return\n\n Queue.objects.add(function=\"terminate\", instance=instance)", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def stop(self, ids: list) -> str:\n # If no ids are passed raise Nothing to do\n if 'None' in ids:\n raise EC2Error('Nothing to do. Need IDS! Arrgh!!!')\n\n try:\n status = self.ec2.instances.filter(InstanceIds=ids).stop()\n return status\n except IOError as e:\n raise EC2Error('Error stopping EC2 Instances {}'.format(e))", "def stop(self, **kwargs):\n return self.client.api.stop(self.id, **kwargs)", "def lambda_handler(event, context):\n Stop_Instances()", "def stop(self, name=None, **kwargs):\n result = None\n compute_service = self._get_compute_service()\n _operation = None\n if name is None:\n return\n try:\n\n project_id = kwargs.pop('project_id', self.auth[\"project_id\"])\n zone = kwargs.pop('zone', self.default[\"zone\"])\n\n _operation = compute_service.instances().stop(\n project=project_id,\n zone=zone,\n instance=name).execute()\n\n self._wait_for_operation(compute_service,\n _operation,\n project_id,\n zone,\n name)\n\n # Get the instance details to update DB.\n result = self.__info(name, displayType=\"vm\")\n\n except Exception as se:\n print(se)\n if type(se) == HttpError:\n Console.error(\n f'Unable to stop instance {name}. Reason: {se._get_reason()}')\n else:\n Console.error(f'Unable to stop instance {name}.')\n\n return result", "def stop_instances(self, instance_ids):\n response = instance.stop_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = StopInstancesResponse.StopInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def terminateInstance(region,zone,instance_id):\n\ttry:\n\t\tec2 = boto.ec2.connect_to_region(region+'-'+zone)\n\t\tec2.terminate_instances(instance_ids=[instance_id])\n\t\treturn True\n\texcept Exception as e:\n\t\tlogError(e)\n\t\treturn False", "def Stop_Instances(ids=Get_Running_Instances()):\n ec2 = boto3.client('ec2')\n #call the features client from the boto3 library\n if not ids:\n #if the list of Ec2 instances returned is empty.\n print(\"No Instance in the state Running or pending\")\n else:\n ec2.stop_instances(InstanceIds=ids)\n #stop the instances using their id\n ec2.get_waiter('instance_stopped').wait(InstanceIds=ids)\n #wait for the state of the instances to change to stopped.\n print('instance {} was shutdown'.format(ids))", "def stop_notebook_instance(NotebookInstanceName=None):\n pass", "def delete_instance(InstanceId=None, DeleteElasticIp=None, DeleteVolumes=None):\n pass", "def power_off(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.stop()\n self.instance_waiter.wait(instance, self.instance_waiter.STOPPED)\n return True", "def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:", "def stop_instances(self, instance_ids=None, force=False):\r\n params = {}\r\n if force:\r\n params['Force'] = 'true'\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('StopInstances', params,\r\n [('item', Instance)], verb='POST')", "def stop_instance(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.stop_instance_with_http_info(id, **kwargs)\n else:\n (data) = self.stop_instance_with_http_info(id, **kwargs)\n return data", "def stop_instances(self, ids):\n self.conn.stop_instances(instance_ids=ids)", "def stop(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"stop\"\n run_command_with_services(context, user, remote, instance, stack, command, services)" ]
[ "0.83348054", "0.822144", "0.7949099", "0.77721786", "0.7509467", "0.7411322", "0.7342773", "0.73260367", "0.72915834", "0.7276886", "0.72549224", "0.72458875", "0.7214738", "0.7049063", "0.7026452", "0.69632554", "0.69612324", "0.68873054", "0.6869498", "0.6835195", "0.6815972", "0.6808564", "0.677136", "0.6752142", "0.67331713", "0.66904336", "0.6661543", "0.6640369", "0.6635591", "0.6609958" ]
0.83298826
1
Start an stopped instance The instance id wil be asked to the user
def start_instance(self): instance_id = self._choose_among_stopped_instances() # Cancel if not instance_id: print 'Operation cancelled' return print '# Starting the instance "%s"' % instance_id if self.compute.start_instance(instance_id): print 'The instance has been started' else: print 'The instance could not be started'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_instance(InstanceId=None):\n pass", "def start_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Start an instance\n response = ec2_resource.Instance(instance_id).start(DryRun=False)\n print(response)\n print(\"\\nSuccessfully starting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': '[email protected]'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='[email protected]',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def start_stop(now, start, stop, temporary_user, config, tz):\n if now.time() >= start and now.time() < stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'stopped', now, tz)\n action_on_instances(temporary_user.start_instances, action_required_ids, 'Start')\n elif now.time() >= stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'running', now, tz)\n action_on_instances(temporary_user.stop_instances, action_required_ids, 'Stop')", "def _choose_among_stopped_instances(self):\n\n instances = self.compute.get_not_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def stop_instance(InstanceId=None, Force=None):\n pass", "def start_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n print(\"\\n===Creating EC2 instance.\")\n ec2_client.start_instances(InstanceIds=instances_ids)\n \n # wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"===EC2 instance is ready!\")", "def start_ec2_instance(client, instance_id, hibernate=False):\n\n response = client.start_instances(\n InstanceIds=[instance_id],\n )\n return response", "def stop_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Stop an instance\n response = ec2_resource.Instance(instance_id).stop(DryRun=False)\n print(response)\n print(\"\\nSuccessfully stopping instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def resume(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n\n # Try to unpause\n if vmrun.unpause(quiet=True) is not None:\n time.sleep(1)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n if ip:\n puts_err(colored.green(\"VM resumed on {}\".format(ip)))\n else:\n puts_err(colored.green(\"VM resumed on an unknown IP address\"))\n\n # Otherwise try starting\n else:\n started = vmrun.start()\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM already was started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM already was started on an unknown IP address\"))", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def reboot_instance(InstanceId=None):\n pass", "def power_on(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.start()\n self.instance_waiter.wait(instance, self.instance_waiter.RUNNING)\n return True", "def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()", "def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def stop_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n ec2_client.stop_instances(InstanceIds=instances_ids)\n \n # wait till instance is stopped\n waiter = ec2_client.get_waiter(\"instance_stopped\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"\\n===EC2 instance has stopped!\")", "def test_launch_volume_as_instance(self, volume, instances_steps,\n volumes_steps):\n instance_name = next(generate_ids('instance'))\n volumes_steps.launch_volume_as_instance(\n volume.name, instance_name, network_name=INTERNAL_NETWORK_NAME)\n\n instances_steps.page_instances().table_instances.row(\n name=instance_name).wait_for_status('Active')\n instances_steps.delete_instance(instance_name)", "def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")", "def start_instances(self, ids):\n self.conn.start_instances(instance_ids=ids)", "def start_instance(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n instanceid = args[\"Instance-ID\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.start_instances(\n InstanceIds=[instanceid]\n )\n\n message.message_text = \"Instance Started\"\n return message.to_json()", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def _create_instance(cls, hook: EC2Hook):\n conn = hook.get_conn()\n try:\n ec2_client = conn.meta.client\n except AttributeError:\n ec2_client = conn\n\n # We need existed AMI Image ID otherwise `moto` will raise DeprecationWarning.\n images = ec2_client.describe_images()[\"Images\"]\n response = ec2_client.run_instances(MaxCount=1, MinCount=1, ImageId=images[0][\"ImageId\"])\n return response[\"Instances\"][0][\"InstanceId\"]", "def create_instance_by_image(self):\n print '# Start a new instance based on an existing AMI'\n ami = raw_input('Enter AMI (empty to cancel): ')\n\n # Cancel\n if not ami:\n print 'Operation cancelled'\n return\n\n # Start the instance\n if self.compute.create_instance_by_image(ami):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given AMI'", "def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True" ]
[ "0.77406687", "0.7394557", "0.69855464", "0.69245553", "0.6719281", "0.67054856", "0.67014045", "0.65921193", "0.6582727", "0.65723014", "0.6555857", "0.6545913", "0.651672", "0.6385068", "0.6359116", "0.63164145", "0.6295009", "0.6273814", "0.62709045", "0.61745346", "0.6171268", "0.61560476", "0.61342317", "0.6129772", "0.61264217", "0.611855", "0.6117587", "0.60961384", "0.60647196", "0.6058846" ]
0.7646898
1
List (print) all the volumes
def list_volumes(self): print '# Listing existing volumes' self.compute.list_volumes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def volumes(self):", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass", "def get_basic_volume_info_all():\n vl = None\n try:\n d, err = xml_parse.run_gluster_command(\n '/usr/sbin/gluster volume info all --xml')\n if err:\n raise Exception(err)\n\n root = d[\"root\"]\n\n # Get the admin vol name so it can be excluded from the list\n admin_vol_name, err = config.get_admin_vol_name()\n if err:\n raise Exception(err)\n\n # Now get the all the volume info for user created volumes\n vl, err = xml_parse.get_volume_info(root, admin_vol_name)\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error getting basic volume information for all volumes : %s' % str(e)\n else:\n return vl, None", "def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs", "def volumes(self, details=True):\n if details:\n vol = _volume.Volume\n else:\n vol = _volume.VolumeDetail\n\n return list(self._list(vol, paginated=False))", "def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_complete_volume_info_all():\n\n return_list = []\n try:\n vl, err = get_basic_volume_info_all()\n if err:\n raise Exception(err)\n # print 'vl is', vl\n\n if vl:\n for vol_info_dict in vl:\n\n rd, err = get_complete_volume_info(\n vol_info_dict['name'], vol_info_dict)\n if err:\n raise Exception(err)\n\n return_list.append(rd)\n\n except Exception, e:\n return None, 'Error getting complete volume information for all volumes: %s' % str(e)\n else:\n return return_list, None", "def volumes(self):\n return self._volumes", "def volumes(self) -> dict:\n return self.data[\"volumes\"]", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def listPVs(self):\n for pv in self._pvlist:\n print pv", "def ft_volumeslice( slice_name ):\n print \"slice: %s\" % slice_name\n \n volumes = get_volumeslice_volume_names( slice_name )\n \n print \"volumes mounted in slice %s:\" % slice_name\n for v in volumes:\n print \" %s:\" % v\n \n vs = get_volumeslice( v, slice_name )\n \n print \" %s\" % dir(vs)", "def show_volume(self, volume, check=True):\n cmd = 'cinder show ' + volume.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_SHOW_TIMEOUT, check=check)\n\n volume_table = output_parser.table(stdout)\n show_result = {key: value for key, value in volume_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(volume.id))\n if volume.name:\n assert_that(show_result['name'], is_(volume.name))\n if volume.description:\n assert_that(show_result['description'],\n is_(volume.description))", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def lsfbvol(self, args: str = \"\") -> List[str]:\n\n lsfbvol_cmd = f\"{self.base_cmd} lsfbvol {args}\"\n lsfbvol_out = runsub.cmd(lsfbvol_cmd)\n\n return lsfbvol_out", "def volume(self):\n return [node.volume for node in self]", "def get_volume_list(self, name_or_ip=\"\" , part=\"\", noresolve=False, _cfg=None) :\n command_list = [_cfg.binaries[\"vos\"],\"listvldb\", \"-cell\",\"%s\" % _cfg.cell ]\n if name_or_ip != \"\" :\n command_list += [ \"-server\", \"%s\" % name_or_ip ] \n if part != \"\" :\n command_list += [\"-part\", \"%s\" % part]\n if noresolve :\n command_list.append(\"-noresolve\")\n return command_list, PM.get_volume_list", "def volumes(self) -> Sequence['outputs.GetVolumeGroupSapHanaVolumeResult']:\n return pulumi.get(self, \"volumes\")", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def show_vdcs(self):\n for v in self.vdcs:\n print v", "def command_ls(self, list_what):\n if list_what in ('available', 'mounted', 'unmounted'):\n callback = getattr(self.environment, 'get_%s_ids' % list_what)\n lst = callback()\n else:\n lst = []\n if len(lst) != 0:\n print((\"\\n\".join(lst)))", "def volumes(self) -> Optional[Sequence['_core.v1.outputs.Volume']]:\n return pulumi.get(self, \"volumes\")", "def test_volumes_get(self):\n pass" ]
[ "0.80436873", "0.7764808", "0.7590659", "0.75419503", "0.7517373", "0.7301277", "0.72656804", "0.7239778", "0.722382", "0.72186816", "0.71733505", "0.7154594", "0.7012045", "0.6987291", "0.69027334", "0.68525237", "0.6819813", "0.6786619", "0.6753734", "0.6687461", "0.6674284", "0.66359806", "0.6627658", "0.6607365", "0.6603915", "0.6402433", "0.6394274", "0.6380179", "0.6358098", "0.633679" ]
0.8437514
0
Make a dictionary from the star file, key = 'class ', value = list[micrograph ]
def aclass_dict(micrograph_star): # get the class number as a list class_list = [] for line in micrograph_star: if len(line.split()) > 2: class_list.append(line.split()[6]) class_list = list(set(class_list)) # make the class_dict class_dict = {} for aclass in class_list: class_dict[aclass] = [] # fill each class with micrograph number for line in micrograph_star: if len(line.split()) > 2: class_number = line.split()[6] micrograph_number = str(int(line.split()[0][:6])) class_dict[class_number].append(micrograph_number) return class_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_classes_from_file(self, class_file):\n items = []\n with open(class_file) as f:\n for cl in f.readlines():\n # c:code, d:description\n item = [{'value': c, 'text': f'{c}: ' + d.replace('\\n','')} for c, d in [cl.split(',')]]\n items+=item\n \n return items", "def read_classes(file, class_list):\n\n if 'PSB' not in file.readline().strip():\n raise ('Not a valid PSB classification header', ImportError)\n\n _, num_models = file.readline().strip().split()\n modelcount = 0\n class_dict = {}\n\n while modelcount < int(num_models):\n line = file.readline().strip().split()\n if len(line) == 0:\n pass \n elif len(line) > 2 and line[2] == '0': # empty class label\n pass\n elif len(line) > 2:\n class_name = str(line[0])\n # if the class not in the class_list add it\n if class_name not in class_list:\n class_list.append(class_name)\n else: # add the class to the number of the model\n class_id = class_list.index(class_name) # give class id based on class_list index\n class_dict[line[0]] = (class_id, class_name)\n modelcount += 1\n\n return class_dict, class_list", "def import_graph(cls, filename, node_cls=GraphNode):\n with open(filename, 'r') as file:\n num_nodes = None\n graph = {}\n for line in file:\n if num_nodes is None:\n num_nodes = int(line)\n graph = {id_: node_cls(id_) for id_ in range(1, num_nodes + 1)}\n else:\n m, n, dist = line.split(' ')\n m = int(m)\n n = int(n)\n dist = float(dist)\n graph[m].neighbours[n] = graph[n]\n graph[n].neighbours[m] = graph[m]\n graph[m].distances[n] = dist\n graph[n].distances[m] = dist\n return graph", "def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes", "def __init__(self, path, type = 'mrk') :\n stim = np.loadtxt(path, skiprows = 1, usecols = (0,1), dtype = np.dtype(int))\n labels = np.loadtxt(path, skiprows = 1, usecols = 2, dtype = np.dtype(str))\n\n self.dic = dict.fromkeys(labels)\n for key, _ in self.dic.items() : self.dic[key] = []\n for k in range(len(stim)) :\n self.dic[labels[k]].append(stim[k, :])\n return None", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def get_structure(self):\n main = {}\n for line in self.load():\n match = re.match('^\\s*([A-Za-z0-9_]+)(\\((\\d+)\\))?=(.*)$', line)\n if match:\n key = match.group(1)\n index = match.group(3)\n value = match.group(4)\n if index is None:\n main[key] = self.parse_data_value(value)\n else:\n if key not in main:\n main[key] = []\n main[key].append(self.parse_data_value(value))\n #else:\n # print(line)\n return main", "def _identify_media(self):\n\n mediapaths = {k: v['medium'] for k, v in self.labels.items() if v.get('medium') is not None}\n\n media_dict = {}\n for label, path in mediapaths.items():\n if path.lower() == 'air':\n media_dict[label] = Air()\n else:\n media_dict[label] = from_yaml(path)\n return media_dict", "def _read_lick_list(cls, fname=__default__, comment='#'):\n with open(fname, 'r') as f:\n data = {}\n hdr = []\n for line in f:\n if line[0] != comment:\n l = line.split()\n attr = dict(\n band=(float(l[1]), float(l[2])),\n blue=(float(l[3]), float(l[4])),\n red=(float(l[5]), float(l[6])),\n unit='mag' if int(l[7]) > 0 else 'ew',\n )\n name = l[8]\n data[name] = attr\n else:\n hdr.append(line[1:-1])\n return data, hdr", "def parse_metadata(metadata):\n id_to_classes_recount = {}\n with open(metadata, \"r\") as file:\n header = next(file)\n for line in file:\n try:\n splitted_line = line.split(\"\\n\")[0].split(\"\\t\")\n file_id = splitted_line[22]\n project = splitted_line[77]\n sample_type = splitted_line[107]\n if project == \"TCGA-LIHC\":\n if sample_type == 'Primary Tumor':\n id_to_classes_recount[file_id] = 1\n elif sample_type == 'Solid Tissue Normal':\n id_to_classes_recount[file_id] = 0\n elif sample_type == 'Recurrent Tumor':\n id_to_classes_recount[file_id] = 1\n else:\n print(sample_type)\n except:\n pass\n return id_to_classes_recount", "def get_sketch_frame_mapping(frame_list, sketch_list, classes_list):\n dict_frame_sketches = {}\n # for every class\n for cl in tqdm(classes_list, desc=\"creating sketch-frame mapping\"):\n cl_frame_list = utils.filter_list_by_partial_word(cl, frame_list)\n cl_sketch_list = utils.filter_list_by_partial_word(cl, sketch_list)\n\n # 1. iterate over sketches (because they are fewer than frames)\n for s in cl_sketch_list:\n sketch_info = get_info(s)\n sketch_info[\"class\"] = cl\n # 2. look for corrsponding frame\n video_frames = utils.filter_list_by_partial_word(\n sketch_info[\"video_id\"], cl_frame_list\n )\n frame = utils.filter_list_by_partial_word(\n f\"frame_{sketch_info['frame']}\", video_frames\n )\n # sanity checks\n if len(frame) == 0:\n print(f\"{len(frame)} frames found for sketch {sketch_info['path']}\")\n continue # prevent crashing, if no frame is found\n # frame can infact be > 1 since extracted frames/segments can overlap\n # ignore this since frames extracted multiple times ARE still the same\n # elif len(frame) > 1:\n # pass\n\n frame = frame[0]\n sketch_info[\"frame_path\"] = frame\n # only using vid_fid as dict key ensures sketches on the same frame,\n # but of different classes to map to the same frame\n dict_key = f\"v{sketch_info['video_id']}_f{sketch_info['frame']}/\"\n # 3. Add sketch info to dict\n utils.add_to_dict_key(dict_frame_sketches, dict_key, sketch_info)\n\n return dict_frame_sketches", "def metadata_obj(filelike):\n\tif test_hachoir_extension(filelike.name):\n\t\tmetadata = metadata_for_filelike(filelike)\n\t\tif metadata:\n\t\t\tdata = dict([\n\t\t\t\t(data.key, data.values[0].value)\n\t\t\t\tfor data in metadata\n\t\t\t\tif data.values\n\t\t\t\t])\n\t\telse:\n\t\t\tdata=None\n\telif test_3D_extension(filelike.name):# 3D not in the extention \n\t\tdata = {'mime_type':'model'}\n\telse:\n\t\tdata = None\n\t\t\n\treturn data", "def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }", "def classify_by_weight(wts_file):\r\n\r\n nodes = {}\r\n data = []\r\n flag = 0\r\n with open(wts_file) as wf:\r\n for num, line in enumerate(wf, 0):\r\n if num == 0 or '#' in line or line == '\\n':\r\n continue\r\n else:\r\n data = line.split()\r\n if data[1] != '0':\r\n if data[1] not in nodes.keys():\r\n nodes[data[1]] = []\r\n nodes[data[1]].append(data[0])\r\n else:\r\n continue\r\n return nodes", "def _read_lick_list(cls, fname=__default_lick__, comment='#'):\n with open(fname, 'r') as f:\n data = {}\n hdr = []\n for line in f:\n if line[0] != comment:\n _line = line.split()\n attr = dict(\n band=(float(_line[1]), float(_line[2])),\n blue=(float(_line[3]), float(_line[4])),\n red=(float(_line[5]), float(_line[6])),\n unit='mag' if int(_line[7]) > 0 else 'ew',\n )\n name = _line[8]\n data[name] = attr\n else:\n hdr.append(line[1:-1])\n return data, hdr", "def parse_detection_file(detection_file):\n read_objects = {}\n with open(detection_file, 'r') as f:\n for line in f.readlines():\n line = line.rstrip() # remove newline character\n line = line.split(',')\n line[0] = int(line[0]) # timestamp_micro\n line[2: -1] = [float(x) for x in line[2: -1]] # from h to score\n\n o = WaymoObject(*line)\n try:\n read_objects[o.timestamp_micro].append(o)\n except KeyError:\n read_objects[o.timestamp_micro] = [o]\n\n return read_objects", "def createDictionaryFromFile(inputfile):\n logger.info('loading file: %s' % inputfile)\n dic = {}\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path = arr[0]\n\n labels = []\n for label in arr[1:]:\n labels.append(ast.literal_eval(label))\n\n cpath = path.split('/')\n id_img = int(cpath[-1].replace('.jpg', ''))\n size_img = cpath[-2]\n activity = cpath[-3]\n id_data = int((cpath[-4])[-1])\n home = '/'.join(cpath[:-4])\n\n if dic.has_key(id_data):\n if dic[id_data].has_key(activity):\n if dic[id_data][activity].has_key(size_img):\n dic[id_data][activity][size_img][id_img] = labels\n else:\n dic[id_data][activity][size_img] = {id_img: labels}\n else:\n dic[id_data][activity] = {size_img: {id_img: labels}}\n else:\n dic[id_data] = {activity: {size_img: {id_img: labels}}}\n return n, home, dic", "def load_mpeg7():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'MPEG7')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of numpy array; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def load_graph(file_name):\r\n citizens = []\r\n f = open(file_name, 'r')\r\n number_citizens = int(f.readline())\r\n \r\n # creates the citizen's list.\r\n for i in range(number_citizens):\r\n # creates citizen object\r\n citizen = Citizen(i)\r\n citizens.append(citizen)\r\n\r\n # we need this second loop because we cannot create the list of friends \r\n # if we don't have the whole list of citizens in memory.\r\n for citizen in citizens:\r\n # loads basic infor\r\n inf_list = f.readline().split(';')\r\n citizen.location = int(inf_list[1])\r\n citizen.influence_level = int(inf_list[2])\r\n citizen.proactivity_level = inf_list[3]\r\n \r\n # loads opinions\r\n opinions_list = f.readline().split(';')\r\n opinions = {}\r\n \r\n for op in opinions_list[:-1]:\r\n cat_weight = op.split(':')\r\n cat = int(cat_weight[0])\r\n weight = float(cat_weight[1])\r\n idea = Idea(1,'',cat, weight)\r\n opinions[cat] = idea\r\n\r\n citizen.opinions = opinions\r\n \r\n # loads friends \r\n friends_ids_list = f.readline().split(';')\r\n friends = []\r\n for friend_id in friends_ids_list[:-1]:\r\n # note that we match the position of the citizen in the citizens list with its id.\r\n friends.append(citizens[int(friend_id)])\r\n \r\n citizen.friends = friends\r\n \r\n f.close()\r\n \r\n return citizens", "def setupdict(parfile):\n pardict = {}\n with open(parfile,'r+') as f:\n for line in f:\n flags = line[56:65].split(' ')\n try:\n flags = [int(f) for f in flags]\n except:\n continue\n # if we found res pars\n if( all(flags) <= 3 ):\n # if any varied pars\n if( any(flags) > 0 ):\n # energies are dict keys\n estring = endf_float_str(float(line[0:11]))\n pardict[estring] = []\n pars = [float(line[0+11*i:11+11*i]) for i in range(len(flags))]\n for i,flag in enumerate(flags):\n if( flag > 0 ):\n pardict[estring].append((i,pars[i]))\n return pardict", "def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass", "def read(self, filePath):\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result", "def get_sample_info(lines):\r\n mapping_data, header, comments = parse_mapping_file(lines)\r\n labels = [\"from\", \"to\", \"eweight\", \"consensus_lin\"]\r\n node_labels = [\"node_name\", \"node_disp_name\", \"ntype\", \"degree\",\r\n \"weighted_degree\", \"consensus_lin\"]\r\n cat_by_sample = {}\r\n sample_by_cat = defaultdict(list)\r\n meta_dict = {}\r\n category_labels = header[1:-1]\r\n labels.extend(category_labels)\r\n node_labels.extend(category_labels)\r\n label_list = [[] for c in category_labels]\r\n for r in mapping_data:\r\n categories = r[0:len(category_labels) + 1]\r\n sample = categories[0]\r\n meta_dict[sample] = ['\\t'.join(categories[1:]), 0]\r\n\r\n cat_by_sample[sample] = [(l.strip(), c.strip())\r\n for l, c in zip(category_labels, categories[1:])]\r\n\r\n cat_list = []\r\n for i, (l, c) in enumerate(zip(category_labels, categories[1:])):\r\n if c not in label_list[i]:\r\n label_list[i].append(c)\r\n l = l.strip()\r\n c = c.strip()\r\n cat_list.append((l, c))\r\n sample_by_cat[(l, c)].append(sample)\r\n\r\n cat_by_sample[sample] = cat_list\r\n\r\n return cat_by_sample, sample_by_cat, len(category_labels), meta_dict,\\\r\n labels, node_labels, label_list", "def load_air_sar2_instances(dirname: str, split: str):\n with PathManager.open(os.path.join(dirname, \"ImageSets\", \"Main\", split + \".txt\")) as f:\n fileids = np.loadtxt(f, dtype=np.str)\n\n dicts = []\n for fileid in fileids:\n anno_file = os.path.join(dirname, \"AIR-SARShip-2.0-xml\", fileid + \".xml\")\n jpeg_file = os.path.join(dirname, \"AIR-SARShip-2.0-data\", fileid + \".tiff\")\n\n tree = ET.parse(anno_file)\n\n r = {\n \"file_name\": jpeg_file,\n \"image_id\": fileid,\n \"height\": 1000,\n \"width\": 1000,\n }\n instances = []\n\n for obj in tree.find('objects').findall(\"object\"):\n cls = obj.find('possibleresult').find('name').text\n bbox = obj.find(\"points\")\n xmin = ymin = float(10000)\n xmax = ymax = 0. \n for bbox_node in bbox.findall('point'):\n coor = bbox_node.text.split(',')\n x, y = map(float, coor)\n xmin = min(xmin, x)\n ymin = min(ymin, y)\n xmax = max(xmax, x)\n ymax = max(ymax, y) \n \n instances.append(\n {\"category_id\": 0, \"bbox\": [xmin, ymin, xmax, ymax], \"bbox_mode\": BoxMode.XYXY_ABS}\n )\n r[\"annotations\"] = instances\n dicts.append(r)\n return dicts", "def parse(filepath):\n data = []\n\n with open(filepath, 'r') as file:\n line = next(file)\n while line:\n str_line = line.strip()\n if(str_line.startswith(\"Cell\")):\n dict_entry = {\n 'address': \"\",\n 'channel': \"\",\n 'frequency': \"\",\n 'quality': \"\",\n 'sLevel': \"\",\n 'essid': \"\"\n }\n dict_entry[\"address\"] = line.split(\": \")[1].strip()\n if(str_line.startswith(\"Channel\")):\n dict_entry[\"channel\"] = line.split(\":\")[1].strip()\n \n if(str_line.startswith(\"Frequency\")):\n reg_match = _RegExLib(str_line)\n dict_entry[\"frequency\"] = reg_match.frequency.group(1)\n\n \n\n if(str_line.startswith(\"Extra: Last beacon\")):\n data.append(dict_entry)\n \n \n\n line = next(file, None)\n\n return data", "def __init__(self, manifest, mode='train'):\n self.audio_links = [line.rstrip('\\n').split(' ')[0] for line in open(manifest)]\n self.labels_emotion = [int(line.rstrip('\\n').split(' ')[1]) for line in open(manifest)]\n self.labels_gender = [int(line.rstrip('\\n').split(' ')[2]) for line in open(manifest)]", "def read_file():\n\tgraph = {}\n\twith open('data/SCC.txt', 'r') as f:\n\t\told_index = '1'\n\t\tadjacency_list = []\n\t\tfor line in f:\n\t\t\tdata = line.split()\n\t\t\tnew_index = data[0]\n\t\t\tif old_index != new_index:\n\t\t\t\tgraph[old_index] = {'adj_nodes': adjacency_list, 'is_explored': False}\n\t\t\t\told_index = new_index\n\t\t\t\tadjacency_list = []\n\t\t\tadjacency_list.append(data[1])\n\t\tgraph[old_index] = {'adj_nodes': adjacency_list, 'is_explored': False}\n\n\tfor i in range(1, NUM_VERT + 1):\n\t\tif graph.get(str(i), False) is False:\n\t\t\tgraph[str(i)] = {'adj_nodes': [], 'is_explored': False}\n\treturn graph", "def readInstance(self):\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()", "def parse_file(file_path):\n stations = defaultdict(dict) # Spares me 2 lines inside that loop\n for line in open(file_path):\n if '->' in line:\n origin, destiny = map(str.strip, line.split('->'))\n elif '-' in line:\n stations[origin][destiny] = [int(i) for i in line.split('-')]\n return stations", "def get_fastg_seqs_dict(fastg_name, G):\n fp = open(fastg_name, 'r')\n seqs = {}\n for name,seq,qual in readfq(fp):\n name_parts = re.sub('[:,]',\" \", name[:-1]).split()\n node = name_parts[0]\n seqs[node] = seq\n return seqs" ]
[ "0.6050446", "0.60270834", "0.56219846", "0.562041", "0.55898035", "0.55807143", "0.55349976", "0.5500859", "0.5495069", "0.54794693", "0.54678434", "0.5459361", "0.54553705", "0.5414836", "0.5407407", "0.53714395", "0.53606534", "0.53194445", "0.5286147", "0.5281463", "0.52779496", "0.527241", "0.5230422", "0.5177534", "0.5168556", "0.51228", "0.51141095", "0.51013297", "0.50734156", "0.50566363" ]
0.808465
0
Returns event intervals for specified `name` and `task` Name here implies `section` or `counter` name.
def event_intervals(self, name=None, task=None, interval=None, match_exact=True): if name is None: intervals = \ IntervalList(sorted_items(self._tmw_intervals_by_name.values())) elif isinstance(name, string_types): if match_exact: intervals = self._tmw_intervals_by_name[name] else: intervals = IntervalList(sorted_items(value for key, value in self._tmw_intervals_by_name.iteritems() if name in key)) else: # assume iterable (must match exact) intervals = IntervalList(sorted_items(value for key, value in self._tmw_intervals_by_name.iteritems() if key in name)) intervals = intervals.slice(interval=interval) if task: intervals = IntervalList(filter(lambda it: it.event.task == task, intervals)) return intervals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frame_intervals(self, task=None, interval=None):\n names = ['animator:'] + UI_THREAD_DRAW_NAMES + RENDER_THREAD_DRAW_NAMES\n return self.event_intervals(name=names, task=task,\n interval=interval, match_exact=False)", "def get_tasks_ids(self, name=None):\n\n m_return = {}\n\n for x in self.get_tasks().findall(\"task\"):\n m_return[x.find(\"name\").text] = x.get(\"id\")\n\n if name:\n return {name : m_return[name]}\n else:\n return m_return", "def _input_intervals():\n last_timestamp = self._trace.interval.start\n for ir_event in filter_by_task(all_tasks, 'name', 'InputReader', 'any'):\n if last_timestamp <= ir_event.interval.end:\n yield Interval(last_timestamp, ir_event.interval.end)\n last_timestamp = ir_event.interval.end", "def ui_frame_intervals(self, task=None, interval=None):\n return self.event_intervals(name=UI_THREAD_DRAW_NAMES, task=task,\n interval=interval, match_exact=False)", "def render_frame_intervals(self, task=None, interval=None):\n return self.event_intervals(name=RENDER_THREAD_DRAW_NAMES, task=task,\n interval=interval, match_exact=False)", "def list(self, name=None):\n if name is not None:\n tasks = self._list_all_tasks_from_single_dataset(name)\n else:\n tasks = self._list_all_tasks_from_all_datasets()\n return tasks", "def getTaskIdsFromName(tasks_name):\n ids = []\n for name in tasks_name:\n task_obj = Tafv2Task.objects.get(script=name)\n ids.append(task_obj.id)\n\n return ids", "def get_tasks_by_name(self, name: str) -> Set[\"Task\"]: # noqa: F821\n find = set()\n for task in self.tasks.values():\n if task.name == name:\n find.add(task)\n return find", "def list_events(self, name):\n return self._get_events(name)", "def input_latencies(self, irq_name, interval=None):\n try:\n return self._input_latencies.slice(interval=interval)\n except AttributeError:\n return self._input_latency_handler(irq_name=irq_name).\\\n slice(interval=interval)", "def get_timespan(name):\n \n if name not in pytplot.data_quants.keys():\n print(\"That name is currently not in pytplot\") \n return\n\n return pytplot.data_quants[name].attrs['plot_options']['trange'][0], pytplot.data_quants[name].attrs['plot_options']['trange'][1]", "def get(self, name, task):\n assert name, \"Must input a valid dataset name.\"\n assert task, \"Must input a valid task name.\"\n self._assert_dataset_exists_in_cache(name)\n self._assert_task_exists_in_dataset_in_cache(name, task)\n return self.manager.data[\"dataset\"][name][\"tasks\"][task]", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def get_task_by_name(self, task_name):\n for task in self.tasks:\n if task.name == task_name:\n logger.debug(\"Returning task with name '%s': '%s'\", task_name, task.to_xml_string())\n return task\n raise ValueError(\"A step task with the name {} can not be found.\".format(task_name))", "def read_todo(taskname):\n autodel()\n with open(todofile, 'r') as todo:\n for task in todo:\n task = json.loads(task)\n if taskname in task['name']:\n return [task['name'], \n task['deadline'], \n task['priority'],\n task['reminder'],\n task['no_del']]\n return None", "def get_shadow_scheduler_tasks(self, name):\n # First look for an index.\n url = SHADOW_SCHEDULER_ARTIFACT_URL.format(rev=self.rev, name=name)\n r = requests.get(url)\n\n if r.status_code != 200:\n if name not in self._shadow_scheduler_artifacts:\n return None\n r = requests.get(self._shadow_scheduler_artifacts[name])\n\n tasks = r.text\n return set(tasks.splitlines())", "def getMakeSpan(tasks):\n ms = 0\n for t in tasks:\n if t.tEnd > ms:\n ms = t.tEnd\n return ms", "def _get_params_ranges(task: str,) -> Dict[str, Any]:\n params_file = os.path.join(\n os.path.dirname(__file__), \"params\", \"xgboost.yml\"\n )\n params = utils.read_yaml(params_file)\n\n if \"regression\" in task.lower():\n params.update({\"objective\": \"reg:squarederror\"})\n return params\n if \"binary\" in task.lower():\n params.update({\"objective\": \"binary:logistic\"})\n return params\n raise ValueError(f\"{task} is not a supported task.\")", "def get(self, task_name):\n try:\n return self._registry[task_name]\n except KeyError:\n raise RuntimeError('Task {} is not registered'.format(task_name))", "def _task_data(self):\n output = {\n 'all': [],\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n 'week_done': [],\n 'week_done_hours': 0,\n 'week_due': [],\n 'week_due_hours': 0,\n 'velocity': [],\n 'velocity_hours': 0,\n 'velocity_count': 0,\n }\n\n last_sunday = SUNDAY - timedelta(weeks=1)\n three_weeks_ago = MONDAY - timedelta(weeks=4)\n\n tasks = Task.originals.owner_id(self.pk).order_by('due_dt')\n for t in tasks:\n output['all'].append(t)\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n if t.completed_dt >= three_weeks_ago and t.completed_dt <= last_sunday:\n output['velocity'].append(t)\n output['velocity_hours'] += t.task_time\n\n if t.due_dt >= MONDAY and t.due_dt <= SUNDAY:\n output['week_due'].append(t)\n output['week_due_hours'] += t.task_time\n\n if t.completed and t.completed_dt >= MONDAY and t.completed_dt <= SUNDAY:\n output['week_done'].append(t)\n output['week_done_hours'] += t.task_time\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n # Extra calcs for the velocity\n output['velocity_count'] = len(output['velocity'])\n\n if output['velocity_hours'] > 0:\n output['velocity_hours'] = round(output['velocity_hours']/3,2)\n if output['velocity_count'] > 0:\n output['velocity_count'] = round(Decimal(output['velocity_count'])/3,2)\n\n return output", "def getTimeSegments(segments,bounds,radius,starttime,endtime,magrange,catalog,contributor):\n stime = starttime\n etime = endtime\n \n dt = etime - stime\n dtseconds = dt.days*86400 + dt.seconds\n #segment 1\n newstime = stime\n newetime = stime + timedelta(seconds=dtseconds/2)\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n #segment 2\n newstime = newetime\n newetime = etime\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,\n starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,\n contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n\n return segments", "def app_launch_latencies(self, task=None):\n launch_latencies = []\n launched_events = list(self.launched_app_events())\n print 'start'\n print launched_events\n launched_events.append(None)\n\n for curr_app_event, next_app_event in zip(launched_events, launched_events[1:]):\n event = curr_app_event.event\n next_event = next_app_event.event if next_app_event else None\n if task and event.task != task:\n continue\n start_time, end_time = \\\n self._start_launch_time(event), self._end_launch_time(event, next_event)\n if (start_time and end_time) is not None:\n launch_interval = Interval(start_time, end_time)\n launch_latencies.append(LaunchLatency(task=event.task,\n interval=launch_interval,\n latency=launch_interval.duration))\n return launch_latencies", "def read_timestamps(self, tasks):\n from reframe.core.deferrable import evaluate\n\n self.begin_stamps = []\n self.end_stamps = []\n for t in tasks:\n with open(evaluate(t.check.stdout), 'r') as f:\n self.begin_stamps.append(float(f.readline().strip()))\n self.end_stamps.append(float(f.readline().strip()))\n\n self.begin_stamps.sort()\n self.end_stamps.sort()", "def interval(self):\n return Intersection(*(a.interval for a in self.args))", "def _build_intervals(self) -> List[Tuple[datetime.datetime, datetime.datetime]]:\n if self.granularity == 'HOUR':\n days = max(min((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['HOUR'][1]),\n self.GRANULARITIES['HOUR'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(hours=1)\n elif self.granularity == 'MONTH':\n # no need to split requests for monthly data\n days = max((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['MONTH'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(days=1)\n else:\n days = max(min((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['DAY'][1]),\n self.GRANULARITIES['DAY'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(days=1)\n\n time_pointer = self.bounds[1]\n intervals = []\n while time_pointer > self.bounds[0]:\n upper = time_pointer\n time_pointer -= interval_length\n intervals.append((time_pointer, upper))\n time_pointer -= offset\n return intervals", "async def list_tasks():", "def collect_times(self,calc,sn,group):\n\t\n\t\tif type(calc['slice_name'])==str:\n\t\t\treturn self.slice(sn)[calc['slice_name']]['all' if not group else group]['timeseries']\n\t\telse: \n\t\t\treturn np.concatenate([self.slice(sn)[sname]['all' if not group else group]['timeseries']\n\t\t\t\tfor sname in calc['slice_name']])", "def taskid(name):\n return \"(select id from event_type where name = '{}')\".format(name)", "def get_data(name, sections=sections):\n return sections.get(name, {}).get(\"data\", [])", "def intervals(parser, token ):\n try:\n tag_name, arg = token.contents.split(None, 1)\n except ValueError:\n raise template.TemplateSyntaxError, \"%r needs arguments\" % token.contents.split()[0]\n m = re.search(r'(.+) as (.+)', arg)\n if not m:\n raise template.TemplateSyntaxError, \"You need to specify 'as' variable \"\n arguments, asname = m.groups()\n if not '=' in arguments:\n return intervals_args(parser, token )\n return IntervalNode( arguments, asname )" ]
[ "0.5835097", "0.5724242", "0.5594315", "0.54250634", "0.5367838", "0.53254664", "0.52327496", "0.5148218", "0.5067933", "0.49952528", "0.4971878", "0.48637992", "0.48349625", "0.48186946", "0.47934702", "0.4789053", "0.47088632", "0.46823967", "0.46184328", "0.4609138", "0.45909464", "0.45898697", "0.4589132", "0.45864525", "0.45739335", "0.45737755", "0.45587602", "0.4557003", "0.45518446", "0.4549719" ]
0.7265889
0
Returns intervals a frame from render thread was processed.
def render_frame_intervals(self, task=None, interval=None): return self.event_intervals(name=RENDER_THREAD_DRAW_NAMES, task=task, interval=interval, match_exact=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frames(self):\n return self._frames", "def get_frame_time(self):\n return self.get_timings().frame_time", "def get_frame_clock(self): # real signature unknown; restored from __doc__\n pass", "def captured_frames(self):\n return self._captured_frames", "def get_frames(self):\n\n log(\"Getting frames for {} at {}\".format(self._location, self._t0))\n fn_get = lambda time_str: self.get_wximg(time_str)\n pool0 = multiprocessing.dummy.Pool(self._frames)\n raw = pool0.map(fn_get, self.get_time_strs())\n wximages = [x for x in raw if x is not None]\n if not wximages:\n return None\n pool1 = multiprocessing.dummy.Pool(len(wximages))\n background = self.get_background()\n if background is None:\n return None\n fn_composite = lambda x: self._pilimg.alpha_composite(background, x)\n composites = pool1.map(fn_composite, wximages)\n legend = self.get_legend()\n if legend is None:\n return None\n loop_frames = pool1.map(lambda _: legend.copy(), composites)\n fn_paste = lambda x: x[0].paste(x[1], (0, 0))\n pool1.map(fn_paste, zip(loop_frames, composites))\n return loop_frames", "def get_frame(self):\n BaseCamera.last_access = time.time()\n\n # wait for a signal from the camera thread\n BaseCamera.event.wait()\n BaseCamera.event.clear()\n\n return BaseCamera.frame", "def get_frame(self):\n return self.frames.get()", "def frameTimes(self):\n sr = self.sampleRate\n offset = self.activeOffset\n stride = self.activeStride\n nf = self.numFrames\n t = np.arange(nf) * (stride[0] / sr) + (offset / sr)\n return t", "def get_frames(self):\n video_getter = Thread(target=self.streamer)\n video_getter.daemon = True\n video_getter.start()", "def frame_intervals(self, task=None, interval=None):\n names = ['animator:'] + UI_THREAD_DRAW_NAMES + RENDER_THREAD_DRAW_NAMES\n return self.event_intervals(name=names, task=task,\n interval=interval, match_exact=False)", "def framerate(self, interval=None):\n total_frames = 0.0\n\n # These are times when SF begins compositing.\n vsync_events = self.event_intervals(name='VSYNC-sf', interval=interval)\n if not vsync_events:\n vsync_events = self.event_intervals(name='VSYNC', interval=interval)\n\n for vsync_event_a, vsync_event_b in zip(vsync_events, vsync_events[1:]) : \n frames_presented = len(self.event_intervals('postFramebuffer', \n interval=vsync_event_a.interval))\n # Below required to skip interval when we had nothing to do.\n # As this event 'toggles' every VSYNC when SurfaceFlinger has work\n # to do. If nothing is done (i.e. no 'postFramebuffer' events)\n # there was jank in this interval.\n if vsync_event_a.value != vsync_event_b.value and frames_presented:\n total_frames += frames_presented\n \n present_time = self.present_duration(interval=interval)\n return round(total_frames/present_time, 1) if present_time != 0.0 else float('nan')", "def sub_render(self):\n frame_list = self._construct_frame_list()\n arg_holder = [[self._blender_path, \"-b\", self._blender_file_path, \"-P\", self._script_path, f\"{start} {end}\"]\n for start, end in frame_list]\n\n start = f\"{datetime.now().hour}-{datetime.now().minute}-{datetime.now().second}\"\n start_clock = time.time()\n print(f\"Started {len(arg_holder)} processes of lengths {[end - start for start, end in frame_list]} at {start}\")\n\n sub_p = []\n for args in arg_holder:\n p = subprocess.Popen(args)\n sub_p.append(p)\n\n for sub in sub_p:\n sub.wait()\n\n end = f\"{datetime.now().hour}-{datetime.now().minute}-{datetime.now().second}\"\n end_clock = time.time()\n print(f\"Finished at {end}! It took {round((end_clock - start_clock) / 60, 2)} minutes to process\")", "def next_frame(self):\n # Tick will limit framerate and return delta in millis\n delta = self.clock.tick(self.framerate)\n\n # Convert delta to seconds\n delta /= 1000.0\n\n # Limit worst-case delta to make debugging easier\n delta = min(delta, FrameTimer.MAX_FRAME_TIME)\n\n return delta", "def get_rgb_frame(self) -> np.array:\n return self.rstate.render_frame_rgb(self.rsimulator)", "def frames(self) -> Optional[Tuple[int, ...]]:\n return self._frames", "def get_fps(self):\n # Take difference.\n interframe_intervals = np.diff(self.data[\"t\"])\n\n # Inter-frame interval in milliseconds.\n mean_interval = np.mean(interframe_intervals)\n fps = round(1 / (mean_interval / 1000))\n\n return int(fps)", "def frames(self):\n return list(self._frames)", "def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)", "def get_fps(self):\n return self._num_frames / (datetime.now() - self._start).total_seconds()", "def ui_frame_intervals(self, task=None, interval=None):\n return self.event_intervals(name=UI_THREAD_DRAW_NAMES, task=task,\n interval=interval, match_exact=False)", "def frames(self):\n if self.integration is None:\n return None\n return self.integration.frames", "def get(self):\n return self.frames", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def frame_deltas(self):\n return self._frame_deltas", "def get_frame(self):\n\t\tframe = None\n\t\twhile not frame:", "def get_frame_time(self, f):\n return f * self.get_frame_duration()", "def get_frame(self):\n return self.last_frame", "def get_between_frame(self):\n \n return self.between_frame", "def get_frame(self):\n return self.get_frame_at_index(self.current_frame)", "def getFrameList(self):\n with self.frameLock:\n return list(self.frameList)" ]
[ "0.6350662", "0.6295398", "0.6266249", "0.6143987", "0.6121702", "0.6075664", "0.60559654", "0.6034855", "0.59660757", "0.5928639", "0.5918026", "0.5916097", "0.5895267", "0.58773875", "0.5867642", "0.58481395", "0.58152115", "0.5806984", "0.58036256", "0.57969207", "0.5767734", "0.5706908", "0.57042426", "0.56915575", "0.56867945", "0.5684593", "0.56785333", "0.56742394", "0.5658069", "0.56552327" ]
0.64448464
0
Returns intervals a frame from UI thread was processed.
def ui_frame_intervals(self, task=None, interval=None): return self.event_intervals(name=UI_THREAD_DRAW_NAMES, task=task, interval=interval, match_exact=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frame_intervals(self, task=None, interval=None):\n names = ['animator:'] + UI_THREAD_DRAW_NAMES + RENDER_THREAD_DRAW_NAMES\n return self.event_intervals(name=names, task=task,\n interval=interval, match_exact=False)", "def render_frame_intervals(self, task=None, interval=None):\n return self.event_intervals(name=RENDER_THREAD_DRAW_NAMES, task=task,\n interval=interval, match_exact=False)", "def get_frame(self):\n return self.frames.get()", "def frames(self):\n return self._frames", "def get_frames(self):\n\n log(\"Getting frames for {} at {}\".format(self._location, self._t0))\n fn_get = lambda time_str: self.get_wximg(time_str)\n pool0 = multiprocessing.dummy.Pool(self._frames)\n raw = pool0.map(fn_get, self.get_time_strs())\n wximages = [x for x in raw if x is not None]\n if not wximages:\n return None\n pool1 = multiprocessing.dummy.Pool(len(wximages))\n background = self.get_background()\n if background is None:\n return None\n fn_composite = lambda x: self._pilimg.alpha_composite(background, x)\n composites = pool1.map(fn_composite, wximages)\n legend = self.get_legend()\n if legend is None:\n return None\n loop_frames = pool1.map(lambda _: legend.copy(), composites)\n fn_paste = lambda x: x[0].paste(x[1], (0, 0))\n pool1.map(fn_paste, zip(loop_frames, composites))\n return loop_frames", "def captured_frames(self):\n return self._captured_frames", "def get_frame_range(self):\n return map(\n int,\n pm.timeControl(\n pm.melGlobals['$gPlayBackSlider'],\n q=1,\n range=1\n )[1:-1].split(':')\n )", "def get_frame_range(self):\n #self._root = self.get_root_node()\n #startFrame = int(self._root.knob('first_frame').value())\n #endFrame = int(self._root.knob('last_frame').value())\n start_frame = self.comp.GetAttrs()['COMPN_GlobalStart']\n end_frame = self.comp.GetAttrs()['COMPN_GlobalEnd']\n return start_frame, end_frame", "def get(self):\n return self.frames", "def get_frames(self):\n video_getter = Thread(target=self.streamer)\n video_getter.daemon = True\n video_getter.start()", "def frames(self):\n if self.integration is None:\n return None\n return self.integration.frames", "def frames(self):\n return list(self._frames)", "def get_frame(self):\n\t\tframe = None\n\t\twhile not frame:", "def get_frame(self):\n return self.get_frame_at_index(self.current_frame)", "def frames(self) -> Optional[Tuple[int, ...]]:\n return self._frames", "def get_frame(self):\n return self.frame", "def getFrameList(self):\n with self.frameLock:\n return list(self.frameList)", "def get_between_frame(self):\n \n return self.between_frame", "def get_frame(self):\n BaseCamera.last_access = time.time()\n\n # wait for a signal from the camera thread\n BaseCamera.event.wait()\n BaseCamera.event.clear()\n\n return BaseCamera.frame", "def frame_deltas(self):\n return self._frame_deltas", "def get_frame_time(self):\n return self.get_timings().frame_time", "def get_frame(self):\n return self.last_frame", "def get_frame_clock(self): # real signature unknown; restored from __doc__\n pass", "def baselineFrames(self):\n frames=[]\n for tag,T1,T2 in [x for x in self.tags if x[0]=='baseline']:\n for i,timePoint in enumerate(self.conf['times']):\n if timePoint>=T1*60 and timePoint<=T2*60:\n frames.append(i)\n return frames\n else:\n return [0]", "def get_frame(self, ind):\n pass", "def get_frame_range(self):\n raise NotImplementedError(\"get_frame_range is not implemented\")", "def frame(self):\n return self._frame", "def frame(self):\n return self._frame", "def frame(self):\n return self._frame", "def between_blocks(self, frame):\n return []" ]
[ "0.63190186", "0.6236168", "0.61133915", "0.6058052", "0.5944484", "0.5941922", "0.5881279", "0.58722425", "0.58456933", "0.5781916", "0.5739827", "0.57152134", "0.57075226", "0.56388026", "0.56256515", "0.5613554", "0.56118697", "0.55820066", "0.54994404", "0.5483587", "0.5451633", "0.54452", "0.54392093", "0.54332834", "0.54159516", "0.5406104", "0.54041004", "0.54041004", "0.54041004", "0.5378221" ]
0.6766843
0
Since SurfaceFlinger(SF) in Android updates the framebuffer only when there's work to be done. Measuring FPS in traditional sense as frames / seconds would be incorrect as time might include intervals when no screen updates occurred. To account for this, we use SF Vsync which is set to 0 when SurfaceFlinger has work to do. We accumulate intervals when a framebuffer was posted and use this as Framerate.
def framerate(self, interval=None): total_frames = 0.0 # These are times when SF begins compositing. vsync_events = self.event_intervals(name='VSYNC-sf', interval=interval) if not vsync_events: vsync_events = self.event_intervals(name='VSYNC', interval=interval) for vsync_event_a, vsync_event_b in zip(vsync_events, vsync_events[1:]) : frames_presented = len(self.event_intervals('postFramebuffer', interval=vsync_event_a.interval)) # Below required to skip interval when we had nothing to do. # As this event 'toggles' every VSYNC when SurfaceFlinger has work # to do. If nothing is done (i.e. no 'postFramebuffer' events) # there was jank in this interval. if vsync_event_a.value != vsync_event_b.value and frames_presented: total_frames += frames_presented present_time = self.present_duration(interval=interval) return round(total_frames/present_time, 1) if present_time != 0.0 else float('nan')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_fps(self):\n time_difference = self.time_array[-1] - self.time_array[0]\n time_difference_in_seconds = time_difference.to_sec()\n if time_difference_in_seconds == 0:\n pass\n self.fps = self.buffer_size / time_difference_in_seconds\n rospy.loginfo(\"[EulerianMotionMagnification] Estimated FPS: \" + str(self.fps) + \" (Measured timespan: \" + str(time_difference_in_seconds) + \"s)\")\n rospy.loginfo(\"[EulerianMotionMagnification] Video array length: \" + str(len(self.video_array)))", "def calcFrameRate(self):\n\n tot = 0\n count = 0\n for session in self.sessions:\n for sample in session.samples:\n if not sample.isLoading:\n tot += sample.fps\n count += 1\n if count:\n self.avgFps = tot / count\n self.lowFps = (self.avgFps < 10)\n self.highFps = (self.avgFps > 25)", "def get_fps(self):\n if len(self.times) >= 2:\n dif = np.diff(self.times)\n fps = 1. / dif.min()\n # if the FPS crosses 500, do not update it\n if fps <= 500:\n self.fps = fps\n return self.fps\n else:\n return 0.", "def limit_fps(fps):\n global _last_update\n elapsed = time.time() - _last_update\n if elapsed < 1 / fps:\n time.sleep(1 / fps - elapsed)\n _last_update = time.time()", "def update(self, max_updates = 0):\n \n assert self.started, \"You must call 'start' before using a GameClock.\" \n\n real_time_now = self.get_real_time()\n \n self.real_time_passed = real_time_now - self.real_time\n self.real_time = real_time_now\n \n self.clock_time += self.real_time_passed\n \n if not self.paused:\n self.virtual_time += self.real_time_passed * self.speed\n \n update_count = 0\n while self.game_time + self.game_tick < self.virtual_time:\n \n self.game_frame_count += 1\n self.game_time = self.game_frame_count * self.game_tick\n yield (self.game_frame_count, self.game_time)\n \n if max_updates and update_count == max_updates:\n break\n \n self.between_frame = ( self.virtual_time - self.game_time ) / self.game_tick\n \n if self.real_time_passed != 0:\n self.fps = 1.0 / self.real_time_passed\n else:\n self.fps = 0.0\n \n self.fps_sample_count += 1\n \n if self.real_time - self.fps_sample_start_time > 1.0:\n \n self.average_fps = self.fps_sample_count / (self.real_time - self.fps_sample_start_time)\n self.fps_sample_start_time = self.real_time\n self.fps_sample_count = 0", "def get_fps(self):\n # Take difference.\n interframe_intervals = np.diff(self.data[\"t\"])\n\n # Inter-frame interval in milliseconds.\n mean_interval = np.mean(interframe_intervals)\n fps = round(1 / (mean_interval / 1000))\n\n return int(fps)", "def get_fps(self):\n return self._num_frames / (datetime.now() - self._start).total_seconds()", "def cap_frame_rate(self):\n now = pygame.time.get_ticks()\n milliseconds_since_last_update = now - self.last_update_completed\n\n time_to_sleep = self.desired_milliseconds_between_updates - milliseconds_since_last_update\n if time_to_sleep > 0:\n pygame.time.delay(int(time_to_sleep))\n self.last_update_completed = now", "def fps(self):\n\t\treturn float(len(self.buf)) / (self.buf[-1][0] - self.buf[0][0])", "def update_fps(self):\n self.fps.tick()\n\n\trange_str = \"\"\n gd = self.main_curve_dialog.curve.get_data()[1]\n\trange_str = \"Max: %s, Min: %s, Avg: %0.5s \" \\\n\t\t % (numpy.max(gd), numpy.min(gd), numpy.average(gd))\n\n\n fps_text = \"%s Update: %s FPS\" % (range_str, self.fps.rate())\n self.action_fps_display.setText(fps_text)", "def update_frame(self):\r\n while not self.stopped:\r\n if not self.grabbed or not self.cap.isOpened():\r\n self.stop()\r\n else:\r\n self.grabbed, self.frame = self.cap.read()\r\n try:\r\n if self.grabbed:\r\n #self.New_Frame_Time = time.time()\r\n #self.FPS = 1/(self.New_Frame_Time-self.Old_Frame_Time)\r\n #self.Old_Frame_Time = self.New_Frame_Time\r\n self.FrameCount += 1\r\n else:\r\n print(f'Grabbed status is: {self.grabbed}')\r\n #self.Old_Frame_Time = time.time()\r\n except ZeroDivisionError:\r\n print(\"Division by zero error when finding video feed fps\")\r\n self.FPS = 0\r\n self.Old_Frame_Time = time.time()", "def get_fps(self):\n raise NotImplementedError(\"get_fps is not implemented\")", "def _GetSurfaceFlingerFrameData(self):\n window_name = self._GetSurfaceViewWindowName()\n command = ['dumpsys', 'SurfaceFlinger', '--latency']\n # Even if we don't find the window name, run the command to get the refresh\n # period.\n if window_name:\n command.append(window_name)\n output = self._device.RunShellCommand(command, check_return=True)\n return ParseFrameData(output, parse_timestamps=bool(window_name))", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def __videoThread(self):\n\n self.frameList = []\n\n fpsTimer = FpsTimer(self.fps)\n printf(\"Starting videoStream thread.\")\n while self.running:\n fpsTimer.wait()\n if not fpsTimer.ready(): continue\n if self.setCamera is not None: self.__setNewCamera(self.setCamera)\n if self.paused: continue\n if self.cap is None: continue\n\n\n # Get a new frame\n ret, newFrame = self.cap.read()\n\n if not ret: # If a frame was not successfully returned\n printf(\"ERROR: while reading frame from Cam. Setting camera again...\")\n self.__setNewCamera(self.cameraID)\n cv2.waitKey(1000)\n continue\n\n\n # Do frame related work\n with self.frameLock:\n self.frame = newFrame\n\n # Add a frame to the frameList that records the 5 latest frames for Vision uses\n self.frameList.insert(0, self.frame.copy())\n # print(\"len\", len(self.frameList), \"Curr frames: \", [id(frame) for frame in self.frameList])\n while len(self.frameList) > 10:\n del self.frameList[-1]\n\n # Keep track of new frames by counting them. (100 is an arbitrary number)\n if self.frameCount >= 100:\n self.frameCount = 0\n else:\n self.frameCount += 1\n\n\n # Run any work functions that must be run. Expect no results. Work should be run before filters.\n if len(self.workList) > 0:\n # print(\"Work: \", self.workList)\n with self.workLock:\n for workFunc in self.workList:\n workFunc(self.frame)\n\n\n\n # Run any filters that must be run, save the results in self.filterFrame\n if len(self.filterList) > 0:\n # print(\"Filters: \", self.filterList)\n with self.filterLock:\n filterFrame = self.getFrame()\n for filterFunc in self.filterList:\n filterFrame = filterFunc(filterFrame)\n\n # Draw FPS on the screen\n fps = str(int(round(fpsTimer.currentFPS, 0)))\n cv2.putText(filterFrame, fps, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.25, (255, 255, 255), 2)\n\n self.filterFrame = filterFrame\n\n\n else:\n self.filterFrame = self.frame\n\n printf(\"VideoStream Thread has ended\")", "def flush_buffer(self):\n t1 = time.time()\n while True:\n t2 = time.time()\n if t2-t1>0.03:\n break\n t1 = t2\n self.cam.read()", "def get_fps(self):\n #return int(self._root.knob('fps').getValue())\n return None", "def get_fps(clock):\n if video_mode:\n return \"30\" # Video FPS will be 30\n else:\n return str(int(round(clock.get_fps(), 0)))", "def get_fps(self):\n return self.fps", "def update(self):\n self.t = time()\n self.frame += 1\n self.loop(self)\n self.draw_bg()\n self.draw_C()\n if self.cursor:\n self.draw_rect(*self.pos, RED, 2)\n self.draw_grid()\n self.draw_T()\n self.show_info()\n for (surf, rect) in self.surf_list:\n self.screen.blit(surf, rect)\n pygame.display.update()\n self.clock.tick(self.fps)", "def get_fps(self):\n if not self.count_fps:\n logging.error(\"No FPSCounter set\")\n return None\n return self.fps.get_fps()", "def get_fps(self):\n if not self.count_fps:\n logging.error(\"No FPSCounter set\")\n return None\n return self.fps.get_fps()", "def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off", "def update_fps(self, fps):\n self.fps_history.append(fps)\n if len(self.fps_history) > FPS_AVERAGES:\n self.fps_history.pop(0)\n\n self.fps_estimate = np.mean(self.fps_history)\n return self.fps_estimate", "def record_throughput(cls, obj, interval=10):\n\n while True:\n obj._reset_receiving_data_throughput()\n obj._reset_consuming_data_throughput()\n\n time.sleep(interval)\n\n print(f'Receiving FPS: {obj._get_receiving_data_throughput() / interval:.2f}, '\n f'Consuming FPS: {obj._get_consuming_data_throughput() / interval:.2f}')", "def get_fps(self):\n \n return self.fps, self.average_fps", "def show_fps(self, screen):\n fps = self.clock.get_fps()\n self.pgtext.display_text(\"FPS: {0:.2f}\".format(fps), screen, 600, 10)", "def start(self):\n \n if self.started:\n return\n \n self.clock_time = 0.\n self.virtual_time = 0.\n self.game_time = 0.\n self.game_frame_count = 0\n self.real_time_passed = 0.\n \n self.real_time = self.get_real_time()\n self.started = True\n \n self.fps = 0.0\n self.fps_sample_start_time = self.real_time\n self.fps_sample_count = 0", "def fps(x, y, i):\n\n # Special case for the edges.\n if i < 2:\n return (y[i+1] - y[i]) / (x[i+1] - x[i])\n elif i > len(x) - 3:\n return (y[i] - y[i-1]) / (x[i] - x[i-1])\n\n else:\n h = x[i] - x[i-1]\n f0 = y[i]\n f1 = y[i+1]\n f2 = y[i+2]\n f3 = y[i-1]\n f4 = y[i-2]\n return (-f2 + 8*f1 - 8*f3 + f4) / (12 * h)", "def run(self):\n\t\twhile True:\n\t\t\tself.clock.tick(self.settings.max_fps)\n\t\t\tself._check_events()\n\t\t\tself._update_screen()" ]
[ "0.6668513", "0.61614895", "0.6137861", "0.60783404", "0.6046331", "0.60180646", "0.6015373", "0.5965686", "0.59367114", "0.59102696", "0.5899993", "0.58347744", "0.5731469", "0.5689734", "0.55668485", "0.55627173", "0.55482215", "0.553777", "0.5526215", "0.5490262", "0.5389928", "0.5389928", "0.5374728", "0.5373918", "0.5272044", "0.5269954", "0.5245516", "0.52435917", "0.5193355", "0.5169685" ]
0.78960985
0
Returns list of intervals when a jank (missed frame) occurred.
def jank_intervals(self, interval=None): missedFrames = self.event_intervals('FrameMissed', interval=interval) return IntervalList(filter(lambda x:x.value==1, missedFrames))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def candle_intervals(self):\n pass", "def calculate_intervals(tick_times: List[float]) -> List[float]:\n return [tick_times[i] - tick_times[i - 1] for i in range(1, len(tick_times))]", "def dropped_frames(self):\n # type: () -> int\n return self._dropped_frames", "def getIntrons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)-1):\n rtrn.append(Interval(self.chr,self.exonEnds[i]+1,self.exonStarts[i+1]-1))\n return rtrn", "def get_interval_list_predefined_gap(traces_list, gap_interval):\n\n intv = 0\n interval_list = []\n pre_traces = []\n\n for timst in traces_list:\n timst = timst.replace(microsecond=0)\n pre_traces.append(timst)\n\n for i in range(0, len(pre_traces)-1):\n iat = (pre_traces[i+1]-pre_traces[i]).total_seconds()\n if iat <= gap_interval:\n current_trace = pre_traces[i]\n while current_trace < pre_traces[i+1]:\n interval_list.append(current_trace)\n current_trace = current_trace + datetime.timedelta(0,1)\n else:\n interval_list.append(pre_traces[i])\n\n if i == len(pre_traces)-2:\n interval_list.append(pre_traces[i+1])\n\n return interval_list", "def baselineFrames(self):\n frames=[]\n for tag,T1,T2 in [x for x in self.tags if x[0]=='baseline']:\n for i,timePoint in enumerate(self.conf['times']):\n if timePoint>=T1*60 and timePoint<=T2*60:\n frames.append(i)\n return frames\n else:\n return [0]", "def get_intervals(self, account):\n buckets = []\n for monitor in self.get_watchauditors(account):\n interval = monitor.watcher.get_interval()\n if not interval in buckets:\n buckets.append(interval)\n return buckets", "def interval(self):\n return (self.start, S.Infinity)", "def gaps(self):\n return self.gaps_L + self.gaps_R", "def mins(self):\n return self.intervals[:, 0]", "def getDisjointIntervals(X: np.ndarray) -> list:\n if len(X) == 0:\n return []\n else:\n cc = 0\n currentToken = -1\n intervals = []\n reading = False\n while cc < len(X):\n\n if (X[cc] > 0) and (not reading):\n idxLeft = cc\n currentToken = X[cc]\n reading = True\n\n elif (X[cc] != currentToken) and reading:\n idxRight = (cc - 1)\n record = (int(idxLeft), int(idxRight), int(currentToken))\n intervals.append(record)\n\n if X[cc] > 0:\n idxLeft = cc\n currentToken = X[cc]\n reading = True\n else:\n reading = False\n\n cc += 1\n\n # termination case\n if reading:\n assert cc == len(X)\n idxRight = cc - 1\n record = (int(idxLeft), int(idxRight), int(currentToken))\n intervals.append(record)\n\n return intervals", "def break_points(inte, minutes):\n inte = np.asarray(inte)\n minutes = np.asarray(minutes)\n n = len(inte)\n breaks = []\n last_observed_min = 0\n\n # we consider sessions of 30 productive minutes, so we cannot split a session lesser than 60 minutes\n if n > 60:\n c = 0\n for i in range(0,(n)):\n if minutes[i] != last_observed_min:\n last_observed_min = minutes[i]\n c += 1\n \n if (inte[i] >= BREAK_POINT and c > 30) and len(inte[i:]) > 30:\n breaks.append(i)\n c = 0\n \n return breaks", "def between_blocks(self, frame):\n return []", "def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps", "def thresholds(self):\n return list(self._thresholds)", "def compute_previous_intervals(I):\n # extract start and finish times\n start = [i.left for i in I]\n finish = [i.right for i in I]\n\n p = []\n for j in range(len(I)):\n # rightmost interval f_i <= s_j\n i = bisect.bisect_right(finish, start[j]) - 1\n p.append(i)\n\n return p", "def _get_packet_intervals(\n packets: Sequence[Packet],\n node: int,\n getter: Callable[[Packet, int], float]\n) -> np.ndarray:\n prev_time = 0.0\n intervals = []\n for packet in packets:\n if packet.was_served[node]:\n new_time = getter(packet, node)\n intervals.append(new_time - prev_time)\n prev_time = new_time\n return np.asarray(intervals)", "def calc_breakpoints_wvl(array, interval):\n\n\t\t\tbreakpoints = []\n\t\t\tcounter = 0\n\t\t\tfor i in range(len(array)):\n\n\t\t\t\tif (array[i] - array[counter]) >= interval:\n\t\t\t\t\tcounter = i\n\t\t\t\t\tbreakpoints.append(array[i])\n\n\t\t\treturn breakpoints", "def getEnds(self) -> List[int]:\n ...", "def getObservationMJDSecRange(vis):\n return([getObservationStart(vis),getObservationStop(vis)])", "def InterBurstGapUnits(self):\r\n\t\treturn self._get_attribute('interBurstGapUnits')", "def determine_cutting_frames(pos_data):\n def get_pts(dat):\n \"\"\" short-cut function to determine when state in dat changes\n Args:\n A numpy vector\n Returns:\n Boolean vector\n \"\"\"\n return np.where(np.abs(np.diff(dat)) > 0)[0]+1\n\n # cutting points according to game status\n max_frame = pos_data.shape[0]-1\n poss_cts = get_pts(pos_data[:, 1])\n status_cts = get_pts(pos_data[:, 2])\n half_cts = get_pts(pos_data[:, 3])\n cut_pts = np.unique(np.concatenate([[0], status_cts, half_cts, poss_cts, [max_frame]]))\n return cut_pts", "def get_frame_range(self):\n return map(\n int,\n pm.timeControl(\n pm.melGlobals['$gPlayBackSlider'],\n q=1,\n range=1\n )[1:-1].split(':')\n )", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def num_janks(self, interval=None):\n return len(self.jank_intervals(interval=interval))", "def find_breaks(self, time=None):\n if time is None:\n time = self.time\n diff = np.diff(time)\n ind = np.where((diff >= 2.5*np.std(diff)+np.nanmean(diff)))[0]\n ind = np.append(0, ind)\n ind = np.append(ind, len(time))\n\n subsets = []\n for i in range(len(ind)-1):\n region = np.arange(ind[i], ind[i+1], 1)\n subsets.append(region)\n\n return np.array(subsets)", "def season_breaks(in_mjd, ra):\n\n season = np.floor(calcSeason(ra, in_mjd))\n\n\n di = np.diff(season)\n break_indx = np.where(di > 0)[0]\n #breaks = (in_mjd[break_indx] + in_mjd[break_indx+1])/2.\n\n return break_indx", "def get_fixation_frames(subject, run=0):\n\n trial_frames = np.append(condition_frames(load_evs(subject, 'wm', 'all_bk_cor'))[run],\n condition_frames(load_evs(subject, 'wm', 'all_bk_err'))[run]) # TODO: include no response trials\n trial_frames = np.sort(trial_frames)\n\n fixation_start = np.array([], dtype=int) # initialize\n\n for idx, i in enumerate(trial_frames):\n if idx == 0:\n continue\n\n # find frames with difference greater than 10s\n if i - trial_frames[idx - 1] > 10 / TR:\n fixation_start = np.append(fixation_start, trial_frames[idx - 1])\n\n fixation_duration = np.ceil(15 / TR) # always 15s duration\n\n # get range of frames corresponding to duration of fixation block\n fixation_frames = np.concatenate([i + np.arange(0, fixation_duration, dtype=int) for i in fixation_start])\n\n return fixation_frames", "def get_frames(self, indices=None):\n if indices is None:\n return self._spikestimes\n raise self._spikestimes[indices]", "def find_dishonest_intervals(z_hist):\n spans = []\n x_init = 0\n for t, _ in enumerate(z_hist[:-1]):\n if z_hist[t + 1] == 0 and z_hist[t] == 1:\n x_end = t\n spans.append((x_init, x_end))\n elif z_hist[t + 1] == 1 and z_hist[t] == 0:\n x_init = t + 1\n return spans" ]
[ "0.5913416", "0.5804972", "0.56494457", "0.5641811", "0.5607846", "0.55438244", "0.5464397", "0.5447493", "0.541252", "0.54121006", "0.54103", "0.53855985", "0.53515875", "0.53275687", "0.5318675", "0.5315721", "0.5306424", "0.5305439", "0.5303692", "0.5252823", "0.520765", "0.51814467", "0.51764864", "0.51721454", "0.5167473", "0.5144841", "0.51420027", "0.510485", "0.510118", "0.50589764" ]
0.79757136
0
Returns number of janks (missed frame) within interval.
def num_janks(self, interval=None): return len(self.jank_intervals(interval=interval))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jankrate(self, interval=None):\n try:\n return round(self.num_janks(interval=interval) / self.present_duration(interval=interval), 1)\n except ZeroDivisionError:\n return 0.0", "def jank_intervals(self, interval=None):\n missedFrames = self.event_intervals('FrameMissed', interval=interval)\n return IntervalList(filter(lambda x:x.value==1, missedFrames))", "def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)", "def npulses(self):\n return self.header.pulse_count", "def num_frames(self):\n return self._first_rgb.shape[1]", "def num_tanks(self):\n return len(self._node_reg.tank_names)", "def get_number_rows(rk_settings, rock_height, star_height):\r\n\tavailable_space_y = (rk_settings.screen_height -\r\n\t\t\t\t\t\t(3 * star_height) - rock_height)\r\n\tnumber_rows = int(available_space_y / (2 * star_height))\r\n\treturn number_rows", "def get_num_frames(self):\n return self._frames.shape[0]", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def count_frames(f):\n def counted(n):\n counted.open_count += 1\n counted.max_count = max(counted.max_count, counted.open_count)\n result = f(n)\n counted.open_count -= 1\n return result\n counted.open_count = 0\n counted.max_count = 0\n return counted", "def num_zombies(self):\n return len(self._zombie_list)", "def num_zombies(self):\n return len(self._zombie_list)", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def count_games(self) -> int:\n return len(self.rating_history) - 1", "def num_traj_frames(self, run_idx, traj_idx):\n return self.traj(run_idx, traj_idx)[POSITIONS].shape[0]", "def num_zombies(self):\r\n return len(self._zombie_list)", "def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames", "def count_level(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0,len(self.matrix[i])):\r\n if self.matrix[i][j] == \"0\":\r\n count += 1\r\n\r\n # We substract 1 to count level from 0\r\n return count - 1", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def get_total_frames(self) -> int:\n return self.num_frames", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def count(self):\n return len(self.deck)", "def get_nr_of_misplaced_tiles(board):\n result = 0\n\n for idx, val in enumerate(board):\n if idx != val:\n result += 1\n\n return result", "def obstacle_count(self):\n found_something = False\n count = 0\n starting_postion = self.get_heading()\n self.right(primary=60, counter=60)\n time.sleep(0.5)\n while self.get_heading() != starting_postion:\n if self.read_distance() < 250 and not found_something:\n found_something = True\n count += 1\n print (\"I found something\")\n elif self.read_distance() > 250 and found_something:\n found_something = False\n print(\"I have a clear view\")\n self.stop()\n\n print(\"I have found this many things: %d\" % count)\n return count", "def get_nrof_pumps(self):\n pumps = 0\n for p in self.pump_array:\n if p:\n pumps += 1\n return pumps", "def num_wires(self):\n return self._top_exp.number_of_wires()", "def NumberOfEmptySpots(self):\n\n return np.count_nonzero(self.state == EMPTY)", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def N(self):\n return len(self.time)" ]
[ "0.6568541", "0.61695075", "0.6039369", "0.59244394", "0.59195894", "0.59130406", "0.5842135", "0.5823727", "0.58052087", "0.5759005", "0.5667941", "0.5667941", "0.5654426", "0.5644131", "0.5642725", "0.56422305", "0.5639742", "0.5639491", "0.5623889", "0.56237584", "0.56106645", "0.55939436", "0.55696976", "0.5547887", "0.5538034", "0.5535108", "0.5527974", "0.54899824", "0.54860634", "0.54843456" ]
0.7892927
0
Returns number of janks (missed frame) per second within interval.
def jankrate(self, interval=None): try: return round(self.num_janks(interval=interval) / self.present_duration(interval=interval), 1) except ZeroDivisionError: return 0.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_janks(self, interval=None):\n return len(self.jank_intervals(interval=interval))", "def num_frames(self):\n return self._first_rgb.shape[1]", "def get_total_frames(self) -> int:\n return self.num_frames", "def times(self) -> int:\n return self._channel_arrays[0].shape[self.time_pos]", "def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames", "def npulses(self):\n return self.header.pulse_count", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def n_timesteps(self) -> int:\n return len(self.time)", "def count_frames(f):\n def counted(n):\n counted.open_count += 1\n counted.max_count = max(counted.max_count, counted.open_count)\n result = f(n)\n counted.open_count -= 1\n return result\n counted.open_count = 0\n counted.max_count = 0\n return counted", "def N(self):\n return len(self.time)", "def num_run_cycles(self, run_idx):\n return self.num_traj_frames(run_idx, 0)", "def vlass_stars(duration, n_beams):\n n_pointings = duration//4.2\n n_observed = n_pointings*n_beams\n return n_observed", "def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)", "def find_total_numbeats(nb1, nb2, nb3, nb4, nb5, nb6):\n numbeats = nb1 + nb2 + nb3 + nb4 + nb5 + nb6\n\n logging.info('Calculated total number of beats: %s', numbeats)\n return numbeats", "def get_num_frames(self):\n return self._frames.shape[0]", "def num_tanks(self):\n return len(self._node_reg.tank_names)", "def determine_number_of_packets(self):\n self.Ltot = 4. * np.pi * np.sum(self.eta * self.dV)\n self.L = self.Ltot / float(self.Npackets)\n\n self.npackets_cell = (4. * np.pi * self.eta * self.dV /\n self.L).astype(np.int)\n self.npackets_cell_cum_frac = (\n np.cumsum(self.npackets_cell).astype(np.float) /\n np.sum(self.npackets_cell))", "def jank_intervals(self, interval=None):\n missedFrames = self.event_intervals('FrameMissed', interval=interval)\n return IntervalList(filter(lambda x:x.value==1, missedFrames))", "def count_sign_changes():\n numzero = 0\n for i in xrange(length):\n if frames[i] == 0:\n numzero += 1\n numzero /= 3 # 3 seconds\n numzero /= 2\n return numzero", "def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)", "def num_time_bins(self):\n return self.header.time_gate_bin_count * self.header.samples_per_time_bin", "def get_number_rows(rk_settings, rock_height, star_height):\r\n\tavailable_space_y = (rk_settings.screen_height -\r\n\t\t\t\t\t\t(3 * star_height) - rock_height)\r\n\tnumber_rows = int(available_space_y / (2 * star_height))\r\n\treturn number_rows", "def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)", "def minute_asleep_frequency(self):\n minutes_asleep = defaultdict(int)\n for shift in self.shifts:\n for minute in shift.minutes_asleep:\n minutes_asleep[minute] += 1\n return minutes_asleep", "def get_nrof_pumps(self):\n pumps = 0\n for p in self.pump_array:\n if p:\n pumps += 1\n return pumps", "def freq_minutes(self):\n return 5", "def num_runs(self):\n return len(self._h5[RUNS])", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas" ]
[ "0.7543728", "0.6045018", "0.59952563", "0.5964167", "0.59383506", "0.59273905", "0.58509415", "0.5850528", "0.5845461", "0.58351386", "0.579948", "0.57471603", "0.57294184", "0.5726754", "0.5726694", "0.57095593", "0.5674112", "0.5640191", "0.56383294", "0.55924916", "0.5588853", "0.5581794", "0.5577816", "0.55738723", "0.5560887", "0.5557441", "0.5543705", "0.5538288", "0.55330014", "0.5525522" ]
0.6945059
1
Start time estimated as first time we ever saw (i.e. scheduled on CPU) the launched task.
def _start_launch_time(self, launched_event): if launched_event: interval = Interval(0, launched_event.timestamp) return self._trace.cpu.task_intervals(task=launched_event.task, interval = interval)[0].interval.start
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_time(self) -> float:\r\n ...", "def start_time(self):\n pass", "def start_time(self) -> float:\n return self._start_time", "def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__jobInfo.startTime", "def __get_starting_time(self):\n return self.__starting_time", "def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.startTime", "def start_time(self):\n return self.__start", "def set_start_time():\n __start = current_time_milli()", "def start_time(self) -> float:\n return float(self.get_from_redis(\"start_time\"))", "def start_time(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> datetime:\n return self.root_hartree.start_time", "def start_time():\n t = [time.clock(), time.time()]\n return t", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n start_time = self.cache.get('start_time')\n if start_time is not None:\n return DatePoint.unfreeze(start_time)", "def start_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"start_time\")", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")", "def time_start(self):\n return self._time_start", "def get_attempt_start_time():\n pass", "def start(self):\r\n self.start_time = time.time()" ]
[ "0.7574514", "0.7435856", "0.7330056", "0.72312284", "0.7228733", "0.7219493", "0.7141052", "0.71377635", "0.7076516", "0.7048836", "0.7000251", "0.6973097", "0.6969327", "0.696228", "0.696228", "0.696228", "0.696228", "0.696228", "0.696228", "0.696228", "0.696228", "0.692079", "0.6848267", "0.6848267", "0.68461996", "0.6824242", "0.6824242", "0.6823656", "0.6805175", "0.6773087" ]
0.8108096
0
Get agent by name
def get(self, name): validate_inputs({'name': name}) return get_storage_manager().get(models.Agent, name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"", "def byname(self, name):\n\n name = name.lower()\n for i in self.bots:\n if name == i.name:\n return i", "def get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"agents\", \"agent_id\", agent_id)", "def get_agent(self, account_id, agent_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents/' + str(agent_id), filters)", "def get_agent_of_model(self, model):\n if model.id in self.agents:\n return self.agents[model.id]\n elif model.id in self.dead_agent_store:\n return self.dead_agent_store[model.id]\n raise ValueError('agent of given model does not exist')", "def retrieve(cls: Type[T], agent_id: int, datastore: Datastore) -> T:\n agent = cls.optionally_retrieve(agent_id, datastore)\n if agent is None:\n raise NotFound\n return agent", "def get_agent_by_host(agent_host):\n session = db_api.get_session()\n with session.begin(subtransactions=True):\n query = session.query(agents_db.Agent)\n agent = query.filter(\n agents_db.Agent.host == agent_host,\n agents_db.Agent.agent_type == constants.AGENT_TYPE_DVS,\n agents_db.Agent.admin_state_up.is_(True)).first()\n if agent and agent.is_active:\n return agent\n return None", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()", "def get_obj_by_name(name: str) -> Any:\r\n module, obj_name = Onrolux.get_module_from_obj_name(name)\r\n return get_obj_from_module(module, obj_name)", "def get_target(self, name):\n return self._targets[name]", "def agent(self):\n return self.__agent", "def show_agent(self, agent, **_params):\r\n return self.get(self.agent_path % (agent), params=_params)", "def get_vm_by_name(self, name=None):\n\n vm_obj = self.get_obj(self.connect.RetrieveContent(), [vim.VirtualMachine], name)\n if vm_obj:\n return vm_obj\n else:\n print(\"VMUNAVAILABLE(NAME)\")\n # raise VMUnavaiable(name)", "def find_hero(self, name):\n for hero in self.heroes:\n if hero.name == name:\n return hero\n return 0", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def get_target_by_name(name):\n\n if not isinstance(name, str):\n raise TypeError(\"name must be str, not '%s'\" % str(name))\n targets = get_targets_by({\"name\": name})\n if targets:\n return targets[0]\n else:\n raise NoSuchTargetError(\"There is no target with name %s\" % name)", "def _get_solver_agent(self):\n # Determine selectable agent(s)\n sctx = self.context.solver\n\n alist = sctx.agent\n if alist is None:\n # Return empty solver agent\n return CpoSolverAgent(self, sctx.params, sctx)\n elif not (is_string(alist) or is_array(alist)):\n raise CpoException(\"Agent identifier in config.context.solver.agent should be a string or a list of strings.\")\n\n # Create agent\n if is_string(alist):\n aname = alist\n agent = self._create_solver_agent(alist)\n else:\n # Search first available agent in the list\n agent = None\n aname = None\n errors = []\n for aname in alist:\n try:\n agent = self._create_solver_agent(aname)\n break\n except Exception as e:\n errors.append((aname, str(e)))\n # Agent not found\n errstr = ', '.join(a + \": \" + str(e) for (a, e) in errors)\n raise CpoException(\"Agent creation error: \" + errstr)\n\n # Log solver agent\n sctx.log(1, \"Solve model '\", self.model.get_name(), \"' with agent '\", aname, \"'\")\n agent.process_infos[CpoProcessInfos.SOLVER_AGENT] = aname\n return agent", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def obtenerActor(nombre=None):\n actor = Actor(None, nombre)\n\n return actor", "def get(self, name):\r\n return self._registry[name]", "def get_room(self, name):\n for i in self.rooms:\n if self.rooms[i].name == name:\n return self.rooms[i]\n raise RuntimeError, \"Room '%s' not known\" % name", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def by_name(name, create_user=True):\n return get_obj_by_name(OBJT_HOST, name, create_user)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)", "def get_machine(self, name):\n\n return self._machine_manager.get_machine(name)", "def find_by_name(name, engines=None):\n if engines is None:\n engines = ENGINES\n\n for egn in engines:\n if egn.name() == name:\n return egn\n\n return None", "def get(name):\r\n return componentManager.components[name]", "def agent(self) -> Entity:\n return self.__agent", "def load_room(name):\n return globals().get(name)" ]
[ "0.7130851", "0.6230404", "0.6048167", "0.6006651", "0.5870144", "0.5770241", "0.57700723", "0.5706711", "0.57012594", "0.56749845", "0.5654736", "0.56485707", "0.5647193", "0.558511", "0.5584789", "0.55810803", "0.5572171", "0.55225843", "0.5484141", "0.54815894", "0.5450483", "0.5412949", "0.5397743", "0.5390286", "0.5380812", "0.537973", "0.53602976", "0.53540623", "0.53298587", "0.53295124" ]
0.7586844
0
Create a new agent or update its state if exists
def put(self, name): request_dict = get_json_and_verify_params({ 'node_instance_id': {'type': unicode}, 'state': {'type': unicode} }) validate_inputs({'name': name}) state = request_dict.get('state') self._validate_state(state) try: return self._create_agent(name, state, request_dict) except manager_exceptions.ConflictError: return self._update_agent(name, state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _register_agent(self, agent, agent_avatar: AgentBody):\n\n # Random seed for agent between 1 and 10000000, might need to be adjusted still\n agent_seed = self.__rnd_gen.randint(1, 1000000)\n\n # check if the agent can be succesfully placed at that location\n self.__validate_obj_placement(agent_avatar)\n\n # Add agent to registered agents\n self.__registered_agents[agent_avatar.obj_id] = agent_avatar\n\n if self.__verbose:\n print(f\"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.\")\n\n # Get all properties from the agent avatar\n avatar_props = agent_avatar.properties\n\n if agent_avatar.is_human_agent is False:\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed)\n else: # if the agent is a human agent, we also assign its user input action map\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed,\n key_action_map=agent_avatar.properties[\"key_action_map\"])\n\n return agent_avatar.obj_id", "def make_agent(agent_id, **kwargs):\n return agent_register[agent_id](**kwargs)", "def test_agent_creation():\n agent = AgentFactory()\n agent.name = 'agent test name'\n agent.save()\n assert agent.name == 'agent test name'", "def _add_agent_to_graph(self, agent: mantrap.agents.base.DTAgent):\n from data import Node\n is_robot = agent.is_robot\n\n # In Trajectron each node has a certain type, which is either robot or pedestrian, an id and\n # state data. Enforce the Trajectron id to the internal ids format, to be able to query the\n # results later on.\n agent_history = agent.history\n acc_history = agent.compute_acceleration(agent_history, dt=self.dt)\n\n node_data = self._create_node_data(state_history=agent_history, accelerations=acc_history)\n node_tye = self._gt_env.NodeType.PEDESTRIAN if not is_robot else self._gt_env.NodeType.ROBOT\n node = Node(node_type=node_tye, node_id=agent.id, data=node_data, is_robot=is_robot)\n if is_robot:\n self._gt_scene.robot = node\n self._gt_scene.nodes.append(node)\n\n # Re-Create online environment with recently appended node.\n self._online_env = self.create_online_env(env=self._gt_env, scene=self._gt_scene)", "def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)", "def _register_agent(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_location_description()\n self._register(description, \"registering agent on SOEF.\")", "def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])", "def createAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def add_agent(self, environment_name, agent_name, agent_params):\n assert environment_name in self._environment_dict\n assert self._is_sweep is False or self._is_sweep is None\n self._is_sweep = False\n if agent_name in self._experiment_structure[environment_name]:\n raise AttributeError(\n f'An experiment for environment {environment_name} and builders {agent_name} already exists.'\n )\n\n environment_builder_params = self._environment_dict[environment_name]['build_params']\n\n try:\n exp = self._create_experiment(environment_name, environment_builder_params, agent_name, agent_params)\n self._experiment_structure[environment_name][agent_name] = exp\n except AttributeError as e:\n self.logger.error(\n f'Unable to create experiment for the environment {environment_name} and agent {agent_name}'\n )\n self.logger.exception(e)", "def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initialization\n grid_queue = [gq for gq in self.grid_queues.values() if gq.accepts(agent)][agent.sex]\n agent.grid_queue = grid_queue.index\n self.add_to_grid_queue(agent)", "def do_PUT(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"agent id not found in uri\")\n logger.warning('PUT agent returning 400 response. agent id not found in uri ' + self.path)\n return\n\n try:\n content_length = int(self.headers.get('Content-Length', 0))\n if content_length == 0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('PUT for ' + agent_id + ' returning 400 response. Expected non zero content length.')\n return\n\n post_body = self.rfile.read(content_length)\n json_body = json.loads(post_body)\n\n if \"activate\" in rest_params:\n auth_tag=json_body['auth_tag']\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if agent['virtual']:\n raise Exception(\"attempting to activate virtual AIK using physical interface for %s\"%agent_id)\n\n if common.STUB_TPM:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n ex_mac = crypto.do_hmac(agent['key'],agent_id)\n if ex_mac == auth_tag:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n raise Exception(\"Auth tag %s does not match expected value %s\"%(auth_tag,ex_mac))\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n elif \"vactivate\" in rest_params:\n deepquote = json_body.get('deepquote',None)\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if not agent['virtual']:\n raise Exception(\"attempting to activate physical AIK using virtual interface for %s\"%agent_id)\n\n # get an physical AIK for this host\n registrar_client.init_client_tls(config, 'registrar')\n provider_keys = registrar_client.getKeys(config.get('general', 'provider_registrar_ip'), config.get('general', 'provider_registrar_tls_port'), agent_id)\n # we already have the vaik\n tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=agent['tpm_version'])\n if not tpm.check_deep_quote(hashlib.sha1(agent['key']).hexdigest(),\n agent_id+agent['aik']+agent['ek'],\n deepquote,\n agent['aik'],\n provider_keys['aik']):\n raise Exception(\"Deep quote invalid\")\n\n self.server.db.update_agent(agent_id, 'active',True)\n self.server.db.update_agent(agent_id, 'provider_keys',provider_keys)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n else:\n pass\n except Exception as e:\n common.echo_json_response(self, 400, \"Error: %s\"%e)\n logger.warning(\"PUT for \" + agent_id + \" returning 400 response. Error: %s\"%e)\n logger.exception(e)\n return", "def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)", "def new_agent(\n self,\n worker_id: str,\n unit_id: str,\n task_id: str,\n task_run_id: str,\n assignment_id: str,\n task_type: str,\n provider_type: str,\n ) -> str:\n assert_valid_provider(provider_type)\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"INSERT INTO agents(\n worker_id,\n unit_id,\n task_id,\n task_run_id,\n assignment_id,\n task_type,\n provider_type,\n status\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?);\"\"\",\n (\n int(worker_id),\n int(unit_id),\n int(task_id),\n int(task_run_id),\n int(assignment_id),\n task_type,\n provider_type,\n AgentState.STATUS_NONE,\n ),\n )\n agent_id = str(c.lastrowid)\n c.execute(\n \"\"\"\n UPDATE units\n SET status = ?, agent_id = ?, worker_id = ?\n WHERE unit_id = ?;\n \"\"\",\n (\n AssignmentState.ASSIGNED,\n int(agent_id),\n int(worker_id),\n int(unit_id),\n ),\n )\n return agent_id\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException(e)\n raise MephistoDBException(e)", "def test_create_router_on_l3_agent(self):\n with self.override_role():\n self.agents_client.create_router_on_l3_agent(\n self.agent['id'], router_id=self.router['id'])\n self.addCleanup(\n test_utils.call_and_ignore_notfound_exc,\n self.agents_client.delete_router_from_l3_agent,\n self.agent['id'], router_id=self.router['id'])", "async def post(self):\r\n\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent = Agent.get(Agent.uuid == agent_uuid)\r\n if not agent:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not present\"}\r\n logger.info(\"agent not present\")\r\n return web.Response(text=str(response_obj), status=404)\r\n try:\r\n System.create(agent_uuid=agent)\r\n logger.info(\"System created successfully!!!\")\r\n return web.Response(text=\"Successful\", status=201)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not added\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def init_agent(self, kwargs):\n\n exp_params = [('agents_info', is_string),\n ('ip', is_string)]\n try:\n agents_info, agent_ip = check_arguments(exp_params, kwargs)\n agents_info = simplejson.loads(agents_info)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info('Setting agent environment')\n\n target_dir = self.VAR_CACHE\n with open(join(target_dir, 'agents.json'), 'w') as outfile:\n simplejson.dump(agents_info, outfile)\n\n agent_role = [i['role'] for i in agents_info if i['ip'] == agent_ip][0]\n master_ip = [i['ip'] for i in agents_info if i['role'] == 'master'][0]\n\n self.env.update({'MY_IP':agent_ip})\n self.env.update({'MY_ROLE':agent_role})\n self.env.update({'MASTER_IP':master_ip})\n\n self.logger.info('Agent initialized')\n return HttpJsonResponse()", "def enter(self, env):\n env = self._find_env(env, new=True)\n env.add_agents(self)", "def put(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n agent = self.db.get_agent(agent_id)\n\n if agent is not None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('PUT returning 404 response. agent id: ' + agent_id + ' not found.')\n\n if \"reactivate\" in rest_params:\n agent['operational_state']=cloud_verifier_common.CloudAgent_Operational_State.START\n asyncio.ensure_future(self.process_agent(agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n elif \"stop\" in rest_params:\n # do stuff for terminate\n logger.debug(\"Stopping polling on %s\"%agent_id)\n self.db.update_agent(agent_id,'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"PUT returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n self.finish()", "def __init__(self, agent_id):\n # Properties of the agent\n #--------------------------------------#\n # The following parameters are mandatory\n self.agent_id = int(agent_id)\n\n # The states\n self.task_dict = dict() # Elements are {task_id:TASK(), ...}\n\n # Activation\n self.num_activated_task = 0\n self.is_activated = False # If there is a task being activated, this is set to True", "def add_manager(self, agent):\n with self.simulation_mutex:\n self.get(\"manager_agents\")[agent.name] = agent", "def level1AI(self, values):\n AI_server = AgentServer.get()\n values['e']['agent'] = AI_server.newAgent(2)\n #values['r']['agent'] = AI_server.newAgent(2)\n values['r']['agent'] = AI_server.newFakeAgent()\n values['j']['agent'] = AI_server.newFakeAgent()", "def add_transport(self, agent):\n with self.simulation_mutex:\n self.get(\"transport_agents\")[agent.name] = agent", "def _insert_agent_device(self):\n # Initialize key variables\n idx_agent = 1\n idx_device = 1\n\n # Add agent\n if db_agent.idx_agent_exists(idx_agent) is False:\n # Generate a UID and add a record in the database\n record = Agent(\n id_agent=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1109)\n\n # Add device\n if db_device.idx_device_exists(idx_device) is False:\n record = Device(\n description=general.encode(self.reserved),\n devicename=general.encode(self.reserved)\n )\n database = db.Database()\n database.add(record, 1106)\n\n # Add to Agent / Device table\n if db_deviceagent.device_agent_exists(idx_device, idx_agent) is False:\n record = DeviceAgent(idx_device=idx_device, idx_agent=idx_agent)\n database = db.Database()\n database.add(record, 1107)", "def add_agent(self, agent):\n\t\tif not (agent in self.agents_in_site):\n\t\t\tif (agent.site != None):\n\t\t\t\tagent.site.agents_in_site.remove(agent) \n\t\t\tself.agents_in_site.append(agent)\n\t\t\tagent.site = self", "async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def create_agents(agents, start_params=None, **kwargs):\n if isinstance(agents, str):\n if (start_params is not None) and (\"save_history\" in start_params):\n save_history = start_params[\"save_history\"]\n else:\n save_history = False\n return Agent(strategy=agents, save_history=save_history, **kwargs)\n return AgentGroup(agents=agents, start_params=start_params)", "async def post(self):\r\n data = await self.request.json()\r\n register_date = data[\"register_date\"]\r\n ip_address = data[\"ip_address\"]\r\n try:\r\n Agent.create(register_date=register_date, ip_address=ip_address)\r\n response_obj = {\"status\": \"success\"}\r\n return web.Response(text=str(response_obj), status=201)\r\n except Exception as exception:\r\n response_obj = {\"status\": \"failed\", \"reason\": exception}\r\n error_message = str(exception)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def create():\n upgrade()\n populate()", "def create_state():\n state = request.get_json()\n if type(state) is not dict:\n abort(400, {'Not a JSON'})\n elif 'name' not in state:\n abort(400, {'Missing name'})\n else:\n new_state = State(**state)\n storage.new(new_state)\n storage.save()\n return make_response(jsonify(new_state.to_dict()), 201)", "def level2AI(self, values):\n AI_server = AgentServer.get()\n values['e']['agent'] = AI_server.newAgent(2)\n values['r']['agent'] = AI_server.newAgent(2)\n values['j']['agent'] = AI_server.newFakeAgent()" ]
[ "0.63911676", "0.634182", "0.629022", "0.6076828", "0.5981401", "0.59083194", "0.58926135", "0.5806879", "0.5754334", "0.57336485", "0.57117426", "0.56722033", "0.5655055", "0.56291324", "0.5626939", "0.55930084", "0.5581793", "0.5577333", "0.55368817", "0.5518555", "0.55118835", "0.55082005", "0.54696274", "0.5460724", "0.54605275", "0.5431852", "0.5430205", "0.5416084", "0.5403323", "0.5366476" ]
0.68219554
0
Add a Handle asset to the adversary.
def add_handle_asset(self, value): return self.add_asset('HANDLE', value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_handle(self, handle):\n\n self.pool.append(handle)", "def add_handout(self, asset_name):\r\n self._handouts.append(asset_name)", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def add_tag(self, transaction, media_handle, tag_handle):\n media = self.dbstate.db.get_object_from_handle(media_handle)\n media.add_tag(tag_handle)\n self.dbstate.db.commit_media_object(media, transaction)", "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "def add_asset(self, asset_type, asset_value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.add_adversary_handle_asset,\n 'phone': self.tc_requests.add_adversary_phone_asset,\n 'url': self.tc_requests.add_adversary_url_asset,\n }\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id, asset_value)", "def add_tag(self, transaction, citation_handle, tag_handle):\n citation = self.dbstate.db.get_citation_from_handle(citation_handle)\n citation.add_tag(tag_handle)\n self.dbstate.db.commit_citation(citation, transaction)", "def delete_handle_asset(self, asset_id):\n return self.delete_asset(asset_id, 'HANDLE')", "def add_file(self, fieldname, filename, fileHandle, mimetype=None):\n body = fileHandle.read()\n if mimetype is None:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n self.files.append((fieldname, filename, mimetype, body))\n return", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def add_asset(self, asset, replace=False):\n assert replace or asset.short_name() not in self._assets, (\n f'Attempting to add duplicate Asset: {asset.short_name()}')\n self._assets[asset.short_name()] = asset\n return self", "def add_file(self, fieldname, filename, fileHandle, mimetype=None):\n\tbody = fileHandle.read()\n if mimetype is None:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n self.files.append((fieldname, filename, mimetype, body))\n return", "def set_handle(self, handle): # -> None:\n ...", "def get_handle_asset(self, asset_id):\n return self.get_asset(asset_id, 'HANDLE')", "def add_attachment(self, filehandle, package_name, **kwargs):\n if self._working_dir is None:\n raise PackageError(\n 'You can only add attachments in the context '\n 'of a `with` statement.')\n\n package_name = package_name.lstrip('/.')\n label = kwargs.get('label')\n if not label:\n kwargs['label'] = path.basename(package_name)\n\n att = Attachment(**kwargs)\n\n outpath = path.join(self._working_dir, package_name)\n dirs = path.dirname(outpath)\n if not path.exists(dirs):\n makedirs(dirs)\n\n if path.exists(outpath):\n msg = '{} already exists in the package directory.'.format(outpath)\n raise PackageError(msg)\n\n with open(outpath, 'wb') as out:\n readcontents = lambda: filehandle.read(2 ** 16)\n for chunk in iter(readcontents, b''):\n out.write(chunk)\n att['content'] = package_name\n self.item['attachments'].append(att)\n return att", "def upload_link(self, handle):\n return None", "def add( self, chunk ):\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)", "def add(self, asset: Asset):\n insort_left(self.asset_collection, asset)", "def add_handler(self, handler, backtrack = False):\n\n # Add Handler\n self._handlers.append(handler)\n logger.debug(\"%s: handler %s added.\" % \\\n (self.__class__.__name__, handler.__name__))\n \n # Backtrack\n if backtrack:\n for message in self.get_waiting(): handler(message)\n logger.debug(\"%s: handler %s backtracked.\" % \\\n (self.__class__.__name__, handler.__name__))", "def add_attachment(self, attachment):\n self.attachments.append(attachment.build())", "def add(self, link):\n # if path.exists(self.cachefile):\n with open(self.cachefile, 'a') as cache:\n cache.write(f\"{link}\\n\")", "def addBridge(self, bridge):\n self.bridges.append(bridge)", "def addAttachedObject(self, attachedObject):\n\t\tself.attachedObjects.append(attachedObject)", "def handle_assets(self):\n return self.assets(asset_type='HANDLE')", "def AddAssetResourceArg(parser, verb, positional=True):\n name = 'asset' if positional else '--asset'\n return concept_parsers.ConceptParser.ForResource(\n name,\n GetAssetResourceSpec(),\n 'The Asset {}'.format(verb),\n required=True).AddToParser(parser)", "def add(self, handler, on_error=None):\n self.handlers.append(handler)", "def register_module_asset(self, asset):\n self._module_assets.append(asset)", "def add_bridge(self, bridge):\n logger.info('adding bridge: %s' % bridge.name)\n data = self._add_common(bridge)\n logger.debug('bridge data: %s' % data)\n self.bridge_data[bridge.name] = data\n if bridge.routes:\n self._add_routes(bridge.name, bridge.routes)", "def add(self, requester: int, track: dict):\n self.queue.append(AudioTrack().build(track, requester))", "def appendPhandle(self, obj):\n # Create a bogus state because we only need the Phandle dictionary\n state = FdtState(addr_cells=1, size_cells=1, cpu_cells=1)\n\n phandle = state.phandle(obj)\n self.append(FdtPropertyWords(\"phandle\", [phandle]))" ]
[ "0.72719395", "0.6265543", "0.6127872", "0.60960156", "0.5908841", "0.5781675", "0.5750709", "0.5716834", "0.563507", "0.5635042", "0.5624101", "0.5616119", "0.554554", "0.55400205", "0.5443238", "0.54349756", "0.5426446", "0.53440446", "0.53244644", "0.52917016", "0.5271356", "0.5271192", "0.5271056", "0.5250433", "0.5238973", "0.52248406", "0.5178354", "0.5176615", "0.5173022", "0.5150169" ]
0.7584645
0
Add a phone asset to the adversary.
def add_phone_asset(self, value): return self.add_asset('PHONE', value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def add_merchant(street, merchant):\r\n street.append(merchant)", "def add(name, phone, db):\n database = load(db)\n if name in database:\n print(\"%r already in %r\" % (name, db))\n sys.exit(-1)\n else:\n database[name] = phone\n database = OrderedDict(sorted(database.items()))\n pickle.dump(database, open(db, 'wb'))\n print(\"added '%s (%s)' to %r\" % (name, phone, db))", "def add(self, asset_ids=None):\n if asset_ids is not None and isinstance(asset_ids, list):\n for h in asset_ids:\n self.asset_ids.append(h[:self.idlen_conf[\"asset_id\"]])", "def add(args):\n name = args[1]\n number = args[2]\n phonebook = args[3]\n with open(phonebook, 'a') as f:\n f.write('%s %s\\n' % (name, number))\n f.close()\n return [\"Successfully added %s.\" % name]", "def add_rec(self):\n print(\"Write phone number:\")\n add_phone_number_input = input()\n print(\"Write name of the record:\")\n add_name_input = input()\n print(\"Write address:\")\n add_address_input = input()\n return self.storage.add(\n add_phone_number_input, add_name_input, add_address_input\n )", "def add_asset(self, asset_type, asset_value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.add_adversary_handle_asset,\n 'phone': self.tc_requests.add_adversary_phone_asset,\n 'url': self.tc_requests.add_adversary_url_asset,\n }\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id, asset_value)", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def get_phone_asset(self, asset_id):\n return self.get_asset(asset_id, 'PHONE')", "def phone(self, new_number):\n self._phone.number = new_number", "def add(self, connection):\n id = len(self.contacts)\n self.contacts[id] = connection\n self.order.append(id)", "def add_attachment(self, attachment):\n self.attachments.append(attachment.build())", "def add_arm(cls, arm):\n cls.ARM = arm", "def add(self, transport, address=None):\r\n\r\n if not address:\r\n address = str(uuid.uuid1())\r\n\r\n if address in self.recipients:\r\n self.recipients[address].add(transport)\r\n else:\r\n self.recipients[address] = RecipientManager(transport, address)\r\n\r\n return address", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def addBridge(self, bridge):\n self.bridges.append(bridge)", "def add_asset(urn: str, asset: str, validate_assets: bool) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.add_asset(asset)\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"add assets\")\n if validate_assets:\n _abort_if_non_existent_urn(\n graph,\n asset,\n \"add assets. Use --no-validate-assets if you want to turn off validation\",\n )\n for mcp in dataproduct_patcher.build():\n graph.emit(mcp)", "def telephone(self, telephone):\n\n self._telephone = telephone", "def add_addressitem(self, addressitem):\n self.addresses.append(addressitem)", "def add_attachment(self, val: Attachment):\n self._attachments.append(val)", "def add_port(self, port):\n self._main_model.add_port(port)", "def add(name, number, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if phonebook_data.get(name):\n raise DuplicateError(\"This entry already exists. To make changes, \"\n \"use update_number or update_name.\")\n\n else:\n phonebook_data[name] = number\n print \"Entry added:\", name, number\n save(phonebook_data, phonebook)", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def _addOutlet(self, outlet, other): \n self._outlets.append(outlet)\n if self._type == 2 and other._type == 1:\n self._reservoirs.append(other)" ]
[ "0.5725073", "0.55942494", "0.55942494", "0.55942494", "0.55942494", "0.55942494", "0.5503296", "0.5492854", "0.54826987", "0.5482024", "0.5440577", "0.5427534", "0.5373172", "0.5361251", "0.53341156", "0.5293473", "0.5267561", "0.5259339", "0.524951", "0.5226366", "0.5225728", "0.52160865", "0.521468", "0.51884294", "0.5174337", "0.5167364", "0.5164527", "0.51557434", "0.5111032", "0.5094337" ]
0.7101702
0
Add a URL asset to the adversary.
def add_url_asset(self, value): return self.add_asset('URL', value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_url(self, url):\n self.queue.put(url)", "def add_link (self, src, dst):\n raise NotImplementedError", "async def add(self, ctx, url, name):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n data = await resp.read()\n with open(os.path.join(os.getcwd(), \"data\",\n \"image\", name), \"wb\") as img:\n img.write(data)\n await self._image_reload()\n await ctx.message.add_reaction(\"👍\")", "def add_url(url: str):\n xml_dict = parse_xml_file(url)\n url_object = Url(url=xml_dict['url'],\n processed=bool(not xml_dict['error_text']),\n error=xml_dict['error_text'])\n url_object.keys = [Key(value=key) for key in xml_dict['keys']]\n s.add(url_object)\n s.commit()\n # sleep(20) # uncomment this, if you want to test *real* async :)\n return {'id': url_object.id}", "def add_url(self, url, found=None, target_id=None):\n visited = False\n if found is not None: # Visited URL -> Found in [ True, False ]\n visited = True\n return self.add_to_db(url, visited, found=found, target_id=target_id)", "def add_url():\n original_url = request.form[\"original_url\"]\n if not url(original_url):\n return render_template(\"error.html\", error=\"Invalid URL, enter a valid URL.\")\n new_url = Url(original_url=original_url)\n db.session.add(new_url)\n db.session.commit()\n return render_template(\"url_added.html\", original_url=original_url,\n short_url=new_url.short_url)", "def add_url(self, torrent_url, **kwargs):\n torrent_file = None\n if os.path.exists(torrent_url):\n torrent_file = open(torrent_url, 'r')\n else:\n try:\n torrent_file = urllib2.urlopen(torrent_url)\n except:\n torrent_file = None\n\n if not torrent_file:\n raise TransmissionError('File does not exist.')\n\n torrent_data = base64.b64encode(torrent_file.read())\n return self.add(torrent_data, **kwargs)", "def add_new_url(self, url):\n if url is None:\n return \n if url not in self.new_urls and url not in self.old_urls:\n self.new_urls.add(url)", "def add(self, link):\n # if path.exists(self.cachefile):\n with open(self.cachefile, 'a') as cache:\n cache.write(f\"{link}\\n\")", "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "async def add(self, ctx: Context, url: str):\n if url not in self.urls:\n self.urls.append(url)\n self.write_vac()\n await ctx.channel.send('Registered <{}> to checker.'.format(url))\n else:\n await ctx.channel.send('<{}> is already registered to checker.'.format(url))", "def add_link(self, link):\n raise NotImplementedError", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def set_url(self, url):\n self.data['url'] = url", "def add_url(p_id, url):\n for product in all_products:\n if product['id'] == p_id:\n product['url'] = url\n product['product_id'] = p_id\n product.move_to_end('product_id', last=False)", "def addLinkToResource(link):\n\n\tif link not in variables.resources:\n\t\tvariables.resources.append(link)", "def add(self, url):\n record_sql = '''\n INSERT INTO {} (url)\n VALUES (?)\n '''.format(\n self.tablename\n )\n try:\n with self.conn:\n self.conn.execute(record_sql, (url,))\n except sqlite3.IntegrityError:\n logger.exception('Already tweeted %s!', url)", "def register_url(self, query, url):\n\n logger.debug(\n 'In %s inventory for %r, saving artifact URL %s ...',\n self.tier, query, url)\n\n if self._fs.exists(self._exact_descriptor_url_for_query(query)):\n # This shouldn't happen, because the CacheAccessor shouldn't write\n # to this inventory if we already have an exact match.\n logger.warn(\n 'In %s cache, attempted to create duplicate entry mapping %r '\n 'to %s', self.tier, query, url)\n return\n descriptor_url = self._create_and_write_descriptor(query, url)\n\n logger.debug(\n '... in %s inventory for %r, created descriptor at %s',\n self.tier, query, descriptor_url)", "def add_asset(self, asset_type, asset_value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.add_adversary_handle_asset,\n 'phone': self.tc_requests.add_adversary_phone_asset,\n 'url': self.tc_requests.add_adversary_url_asset,\n }\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id, asset_value)", "async def add_img(self, ctx: BBContext, url: str, artist: Optional[discord.User] = None):\n\n art = Art(url, artist.id, artist.name) if artist else Art(url)\n con = await ctx.get_connection()\n query = f'INSERT INTO {TABLE_ARTS}(url, artist_id, artist_name) VALUES($1, $2, $3)'\n\n await con.execute(query, art.url, art.artist_id, art.artist_name)\n await ctx.tick(True)", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def add_item(self, name, url):\n self.insert(\"\", \"end\", values=(name, url, \"\"))\n # Add the item - backend\n s.updateItem({\"item\": name, \"url\": url, \"status\": \"\", \"pstatus\": \"\"})\n\n self.selection_clear()", "def url(self, image_url):\n\n self._url = image_url" ]
[ "0.67932934", "0.652156", "0.6461821", "0.64571977", "0.6455303", "0.64421105", "0.633815", "0.6309748", "0.6232123", "0.61788785", "0.61086047", "0.6106751", "0.60589206", "0.60515255", "0.59763986", "0.59452015", "0.5936189", "0.58758444", "0.58670616", "0.5857115", "0.5842809", "0.58306664", "0.58306664", "0.58306664", "0.58306664", "0.58306664", "0.58306664", "0.58306664", "0.58171034", "0.5795325" ]
0.7426201
0
Get specific Adversary asset type from API
def asset(self, asset_id, asset_type, action='GET'): if not self.can_update(): self._handle_error(910, [self.type]) asset_methods = { 'handle': self.tc_requests.adversary_handle_asset, 'phone': self.tc_requests.adversary_phone_asset, 'url': self.tc_requests.adversary_url_asset, } # handle invalid input if asset_methods.get(asset_type.lower()) is None: self._handle_error( 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type] ) return asset_methods[asset_type.lower()](self.unique_id, asset_id, action=action)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asset_type(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"asset_type\"), kwargs)", "def _get(self) -> json_api.generic.Metadata:\n api_endpoint = ApiEndpoints.assets.fields\n return api_endpoint.perform_request(http=self.auth.http, asset_type=self.parent.ASSET_TYPE)", "def assets(self, asset_type=None):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.adversary_handle_assets,\n 'phone': self.tc_requests.adversary_phone_assets,\n 'url': self.tc_requests.adversary_url_assets,\n }\n\n if asset_type is None:\n return self.tc_requests.adversary_assets(self.unique_id)\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id)", "def get_game_asset(collection_name, return_type=flask.Response):\n\n model = importlib.import_module('app.models.%s' % collection_name)\n A = model.Assets()\n\n if return_type == dict:\n return A.assets\n elif return_type == object:\n return A\n\n return A.request_response()", "def get_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type)", "def api_asset_get():\n names = request.args.getlist(\"name\")\n\n result = []\n for name in names:\n asset = app.bank.get(name)\n if asset:\n result.append(asset)\n\n return jsonify(sorted(result)), 200", "def asset_type(self) -> \"AssetType\":\n return self._values.get(\"asset_type\")", "def asset_type(self) -> \"AssetType\":\n return self._values.get(\"asset_type\")", "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)", "def get(self, item_type):\n armor_type_id = 0 if item_type == \"light\" else 1\n where = f\"type={armor_type_id}\"\n return get_as_object(\"armor\", ArmorData, where=where)", "def specific_asset(self, asset: str) -> dict:\n \n specific_asset_url = self.network + bf_assets_url + asset\n\n response = query_blockfrost(specific_asset_url, self.api_key, self.proxies)\n \n return response", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def _get_asset_info(item, name):\n\n if name in item.assets:\n return item.assets[name]\n elif name.replace(\"B\", \"B0\") in item.assets:\n # Bx -> B0x\n return item.assets[name.replace(\"B\", \"B0\")]\n elif name.replace(\"B0\", \"B\") in item.assets:\n # B0x -> Bx\n return item.assets[name.replace(\"B0\", \"B\")]\n else:\n available = [key for key in item.assets.keys() if key not in [\"thumbnail\", \"overview\", \"info\", \"metadata\"]]\n raise KeyError(\"asset '%s' not found. Available assets: %s\" % (name, avaialable))", "def get_specific_amenity(amenity_id):\n data = storage.all('Amenity')\n name = 'Amenity.' + amenity_id\n amenity = [v.to_dict() for k, v in data.items() if k == name]\n if len(amenity) != 1:\n abort(404)\n return jsonify(amenity[0])", "def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}", "def api_asset_list():\n return jsonify(app.bank.to_list()), 200", "def getOntologyItem(self, resource, oType=0):\n\n if isinstance(resource, int):\n resource = 'ontology/{0}/{1}'.format(resource, oType)\n\n res = self.getRequest(resource)\n onto = vsdModels.Ontology(**res)\n\n return onto", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)", "def get(self, item_type, entry_name):\n armor = ArmorData(name=entry_name)\n if not hasattr(armor, 'level'):\n return {\"Error\": f\"'{entry_name}' not found in {item_type} armor. \"\n f\"Try this: '{NS.armor._path}/{item_type}/search/\"\n f\"{entry_name.replace(' ', '%20')}'\"\n }, 404\n return armor.associative_data()", "def get_asset_type_feature_value(self):\n if self.asset_class.investment_type == InvestmentType.Standard.STOCKS.value:\n return AssetFeatureValue.Standard.ASSET_TYPE_STOCK.get_object()\n elif self.asset_class.investment_type == InvestmentType.Standard.BONDS.value:\n return AssetFeatureValue.Standard.ASSET_TYPE_BOND.get_object()\n else:\n return AssetFeatureValue.objects.get_or_create(name=self.asset_class.investment_type.name,\n feature=AssetFeature.Standard.ASSET_TYPE.get_object())[0]", "def _handle_custom_award_download(self):\n self.tinyshield_models.extend(\n [\n {\n \"name\": \"agencies\",\n \"key\": \"filters|agencies\",\n \"type\": \"array\",\n \"array_type\": \"object\",\n \"object_keys\": {\n \"type\": {\"type\": \"enum\", \"enum_values\": [\"funding\", \"awarding\"], \"optional\": False},\n \"tier\": {\"type\": \"enum\", \"enum_values\": [\"toptier\", \"subtier\"], \"optional\": False},\n \"toptier_name\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"name\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": False},\n },\n },\n {\"name\": \"agency\", \"key\": \"filters|agency\", \"type\": \"integer\"},\n {\n \"name\": \"date_range\",\n \"key\": \"filters|date_range\",\n \"type\": \"object\",\n \"optional\": False,\n \"object_keys\": {\n \"start_date\": {\"type\": \"date\", \"default\": \"1000-01-01\"},\n \"end_date\": {\"type\": \"date\", \"default\": datetime.strftime(datetime.utcnow(), \"%Y-%m-%d\")},\n },\n },\n {\n \"name\": \"date_type\",\n \"key\": \"filters|date_type\",\n \"type\": \"enum\",\n \"enum_values\": [\"action_date\", \"last_modified_date\"],\n \"default\": \"action_date\",\n },\n {\n \"name\": \"place_of_performance_locations\",\n \"key\": \"filters|place_of_performance_locations\",\n \"type\": \"array\",\n \"array_type\": \"object\",\n \"object_keys\": {\n \"country\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": False},\n \"state\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"zip\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"district_original\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n \"district_current\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n # TODO: To be removed in DEV-9966\n \"district\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"county\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"city\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n },\n },\n {\n \"name\": \"place_of_performance_scope\",\n \"key\": \"filters|place_of_performance_scope\",\n \"type\": \"enum\",\n \"enum_values\": [\"domestic\", \"foreign\"],\n },\n {\n \"name\": \"prime_award_types\",\n \"key\": \"filters|prime_award_types\",\n \"type\": \"array\",\n \"array_type\": \"enum\",\n \"min\": 0,\n \"enum_values\": list(award_type_mapping.keys()),\n },\n {\n \"name\": \"recipient_locations\",\n \"key\": \"filters|recipient_locations\",\n \"type\": \"array\",\n \"array_type\": \"object\",\n \"object_keys\": {\n \"country\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": False},\n \"state\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"zip\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"district_original\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n \"district_current\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n # TODO: To be removed in DEV-9966\n \"district\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"county\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"city\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n },\n },\n {\n \"name\": \"recipient_scope\",\n \"key\": \"filters|recipient_scope\",\n \"type\": \"enum\",\n \"enum_values\": (\"domestic\", \"foreign\"),\n },\n {\"name\": \"sub_agency\", \"key\": \"filters|sub_agency\", \"type\": \"text\", \"text_type\": \"search\"},\n {\n \"name\": \"sub_award_types\",\n \"key\": \"filters|sub_award_types\",\n \"type\": \"array\",\n \"array_type\": \"enum\",\n \"min\": 0,\n \"enum_values\": all_subaward_types,\n },\n ]\n )\n\n filter_all_agencies = False\n if str(self._json_request[\"filters\"].get(\"agency\", \"\")).lower() == \"all\":\n filter_all_agencies = True\n self._json_request[\"filters\"].pop(\"agency\")\n\n self._json_request = self.get_validated_request()\n custom_award_filters = self._json_request[\"filters\"]\n final_award_filters = {}\n\n # These filters do not need any normalization\n for key, value in custom_award_filters.items():\n if key in [\n \"recipient_locations\",\n \"recipient_scope\",\n \"place_of_performance_locations\",\n \"place_of_performance_scope\",\n ]:\n final_award_filters[key] = value\n\n if get_date_range_length(custom_award_filters[\"date_range\"]) > 366:\n raise InvalidParameterException(\"Invalid Parameter: date_range total days must be within a year\")\n\n final_award_filters[\"time_period\"] = [\n {**custom_award_filters[\"date_range\"], \"date_type\": custom_award_filters[\"date_type\"]}\n ]\n\n if (\n custom_award_filters.get(\"prime_award_types\") is None\n and custom_award_filters.get(\"sub_award_types\") is None\n ):\n raise InvalidParameterException(\n \"Missing one or more required body parameters: prime_award_types or sub_award_types\"\n )\n\n self._json_request[\"download_types\"] = []\n final_award_filters[\"prime_and_sub_award_types\"] = {}\n\n if custom_award_filters.get(\"prime_award_types\"):\n self._json_request[\"download_types\"].append(\"prime_awards\")\n final_award_filters[\"prime_and_sub_award_types\"][\"prime_awards\"] = custom_award_filters[\"prime_award_types\"]\n\n if custom_award_filters.get(\"sub_award_types\"):\n self._json_request[\"download_types\"].append(\"sub_awards\")\n final_award_filters[\"prime_and_sub_award_types\"][\"sub_awards\"] = custom_award_filters[\"sub_award_types\"]\n\n if \"agency\" in custom_award_filters:\n if \"agencies\" not in custom_award_filters:\n final_award_filters[\"agencies\"] = []\n\n if filter_all_agencies:\n toptier_name = \"all\"\n else:\n toptier_name = (\n ToptierAgency.objects.filter(toptier_agency_id=custom_award_filters[\"agency\"])\n .values(\"name\")\n .first()\n )\n if toptier_name is None:\n raise InvalidParameterException(f\"Toptier ID not found: {custom_award_filters['agency']}\")\n toptier_name = toptier_name[\"name\"]\n\n if \"sub_agency\" in custom_award_filters:\n final_award_filters[\"agencies\"].append(\n {\n \"type\": \"awarding\",\n \"tier\": \"subtier\",\n \"name\": custom_award_filters[\"sub_agency\"],\n \"toptier_name\": toptier_name,\n }\n )\n else:\n final_award_filters[\"agencies\"].append({\"type\": \"awarding\", \"tier\": \"toptier\", \"name\": toptier_name})\n\n if \"agencies\" in custom_award_filters:\n final_award_filters[\"agencies\"] = [\n val for val in custom_award_filters[\"agencies\"] if val.get(\"name\", \"\").lower() != \"all\"\n ]\n\n self._json_request[\"filters\"] = final_award_filters", "def get(self, context, type_name, artifact_id, get_any_artifact=False):\n session = api.get_session()\n return api.get(context, type_name, artifact_id,\n session, get_any_artifact)", "def get_type(self, asset=None):\n if asset is None or 'pc:type' not in asset.properties:\n return self.item.properties.get('pc:type')\n else:\n return asset.properties.get('pc:type')", "def get_asr_type(cfg):\n return cfg['ASR']['type']", "def listAssetTypes(self):\n return self.get_json('/assetType')", "def get(self, accounttype):\n # print(current_identity)\n accounttype = Accounttype.query\\\n .filter(Accounttype.name == accounttype).one()\n return jsonify(accounttype)", "def get(self, item_type, search_term):\n armor_type_id = 0 if item_type == \"light\" else 1\n where = f\"type={armor_type_id}\"\n return search(f\"armor\", search_term, where=where, model=ArmorData)", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "async def getConfigurationByType(self, type=None):\n payload = {}\n \n if type:\n payload[\"type\"] = type\n \n\n # Parameter validation\n schema = CatalogValidator.getConfigurationByType()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/{type}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"type\",\"description\":\"type can be brands, categories etc.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"type\",\"description\":\"type can be brands, categories etc.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", type=type)\n query_string = await create_query_string(type=type)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/{type}/\", type=type), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")" ]
[ "0.6318828", "0.63060737", "0.60831237", "0.60739774", "0.6061961", "0.5915308", "0.5778964", "0.5778964", "0.5712838", "0.5691071", "0.55540186", "0.55070484", "0.5468222", "0.5449952", "0.5360277", "0.5352107", "0.5230858", "0.5218395", "0.52064335", "0.519413", "0.51613396", "0.51548994", "0.51442456", "0.5093407", "0.5092728", "0.50803477", "0.5078343", "0.5059782", "0.505671", "0.5040102" ]
0.6370427
0
Delete the asset with the provided asset_id.
def delete_asset(self, asset_id, asset_type): return self.asset(asset_id, asset_type=asset_type, action='DELETE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_url_asset(self, asset_id):\n return self.delete_asset(asset_id, 'URL')", "def delete_handle_asset(self, asset_id):\n return self.delete_asset(asset_id, 'HANDLE')", "def delete_phone_asset(self, asset_id):\n return self.delete_asset(asset_id, 'PHONE')", "def delete(self, asset_uid):\n Script._validate_type(asset_uid, u'asset_uid', STR_TYPE, True)\n if (self._client.CLOUD_PLATFORM_SPACES or self._client.ICP_PLATFORM_SPACES) and \\\n self._if_deployment_exist_for_asset(asset_uid):\n raise WMLClientError(\n u'Cannot delete script that has existing deployments. Please delete all associated deployments and try again')\n\n if not self._ICP:\n response = requests.delete(self._href_definitions.get_asset_href(asset_uid), params=self._client._params(),\n headers=self._client._get_headers())\n else:\n response = requests.delete(self._href_definitions.get_asset_href(asset_uid), params=self._client._params(),\n headers=self._client._get_headers(), verify=False)\n if response.status_code == 200:\n return self._get_required_element_from_response(response.json())\n else:\n return self._handle_response(204, u'delete assets', response)", "def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))", "def delete(self, context, artifact_id):\n session = api.get_session()\n api.delete(context, artifact_id, session)", "def disconnect_asset(\n self, asset_id: Identifier, conn_id: str) -> None:\n site_id = asset_id.location()\n try:\n site = self._registry_client.get_site_by_id(site_id)\n except KeyError:\n raise RuntimeError(f'Site or store at site {site_id} not found')\n\n r = requests.delete(\n f'{site.endpoint}/connections/{conn_id}',\n params={'requester': self._site}, verify=self._verify,\n cert=self._cred)\n if not r.ok:\n raise RuntimeError('Could not disconnect asset')", "async def delete_artifact(self, artifact_id: UUID) -> None:\n try:\n await self._client.delete(f\"/artifacts/{artifact_id}\")\n except httpx.HTTPStatusError as e:\n if e.response.status_code == 404:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise", "def test_delete_asset(self):\n pass", "def delete_asset(location, filename):\r\n try:\r\n content = Transcript.get_asset(location, filename)\r\n contentstore().delete(content.get_id())\r\n log.info(\"Transcript asset %s was removed from store.\", filename)\r\n except NotFoundError:\r\n pass\r\n return StaticContent.compute_location(location.course_key, filename)", "def delete_asset_metadata(self, asset_key, user_id):\n def _internal_method(all_asset_info, asset_idx):\n \"\"\"\n Remove the item if it was found\n \"\"\"\n if asset_idx is None:\n raise ItemNotFoundError(asset_key)\n\n all_asset_info.pop(asset_idx)\n return all_asset_info\n\n try:\n self._update_course_assets(user_id, asset_key, _internal_method)\n return 1\n except ItemNotFoundError:\n return 0", "def asset_id(self, asset_id: str):\n if asset_id is None:\n raise ValueError(\"Invalid value for `asset_id`, must not be `None`\") # noqa: E501\n\n self._asset_id = asset_id", "def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200", "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def amenity_delete_by_id(amenity_id):\n\n fetched_obj = storage.get(\"Amenity\", str(amenity_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})", "def remove_asset(self, name):\n if name in self.assets:\n del self.assets[name]", "def delete(self, agent_id):\n self._client.delete('scanners/1/agents/%(agent_id)s', path_params={'agent_id': agent_id})\n return True", "def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None", "def delete(self, resource_id, file_id):\n d = Deposition.get(resource_id, user=current_user)\n\n # Sort files raise ForbiddenAction if not authorized\n df = d.remove_file(file_id)\n if df is None:\n abort(404, message=\"File does not exist\", status=404)\n df.delete()\n d.save()\n return \"\", 204", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def delete_file(self, file_id):\n self.drive_service.files().delete(fileId=file_id).execute()\n return file_id", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()", "def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()", "def remove_asset(self, short_name):\n del self._assets[short_name]", "def delete(self, vehicle_id=None):\n raise NotImplementedError()", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def remove_asset(urn: str, asset: str, validate_assets: bool) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_asset(asset)\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove assets\")\n if validate_assets:\n _abort_if_non_existent_urn(\n graph,\n asset,\n \"remove assets. Use --no-validate-assets if you want to turn off validation\",\n )\n for mcp in dataproduct_patcher.build():\n graph.emit(mcp)", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass" ]
[ "0.74729943", "0.7246753", "0.69871473", "0.65593284", "0.6440845", "0.62584275", "0.6234925", "0.61335707", "0.6087217", "0.6081945", "0.6062495", "0.6017092", "0.5817338", "0.57971597", "0.57559264", "0.57495433", "0.5735357", "0.57037294", "0.5631497", "0.5613766", "0.56083256", "0.56012326", "0.5574266", "0.5560319", "0.55551237", "0.5553262", "0.5547989", "0.5544627", "0.55416715", "0.55244565" ]
0.8115228
0
Delete the handle asset with the passed in id
def delete_handle_asset(self, asset_id): return self.delete_asset(asset_id, 'HANDLE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _id):", "def delete_url_asset(self, asset_id):\n return self.delete_asset(asset_id, 'URL')", "def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()", "def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def delete_img(self, img_id):\n logger.debug('Function delete_img start')\n\n logger.info(\"Deleting img: \"+str(img_id))\n os.remove(self.img_path+str(img_id)+\".jpg\")\n\n logger.debug('Function delete_img end')", "def delete(self, cls, id):\n\n del FileStorage.__objects[key(cls, id)]", "def delete(self, id):\n raise NotImplementedError", "def delete(self, cls, id):\n pass", "def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete(self, req, id):\n context = req.environ['nova.context']\n self._image_service.delete(context, id)\n return webob.exc.HTTPNoContent()", "def processDeleteCommand(self, objId):\n editor = self._parent\n obj = editor.findWithUUID(objId)\n if obj:\n print(\"DELETE FOR\",objId)\n # delete from object cache\n if objId in editor._total['objects']:\n del editor._total['objects'][objId]\n # clear uuid\n obj.opensim.uuid = \"\"\n scene = editor.get_current_scene()\n # unlink\n scene.objects.unlink(obj)\n editor.queueRedraw()", "def delete(self, handle):\n self.LogCommand()\n tclcode = \"stc::delete \" + handle\n\n result = self.Exec(tclcode)\n logging.debug(\" - Python result - \" + str(result))\n return result", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def delete(self,id):\r\n return delete(id=id)", "def del_handle(self, handle):\n\n self.pool.remove(handle)", "def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True", "def get_handle_asset(self, asset_id):\n return self.get_asset(asset_id, 'HANDLE')", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def delete(id):\n # Get the photo requested\n photo = Photo.query.filter(Photo.id == id).one_or_none()\n\n # Did we find a photo?\n if photo is not None:\n db.session.delete(photo)\n db.session.commit()\n return make_response(\n \"Photo {id} deleted\".format(id=id), 200\n )\n\n # Otherwise, nope, didn't find that photo\n else:\n abort(\n 404,\n \"Photo not found for Id: {id}\".format(id=id),\n )", "def test_delete_asset(self):\n pass", "def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))", "def delete_image(self, image_id):\r\n self.vgbdtg.deleteObject(id=image_id)", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def delete_phone_asset(self, asset_id):\n return self.delete_asset(asset_id, 'PHONE')" ]
[ "0.69067293", "0.67395705", "0.67243016", "0.667569", "0.6663656", "0.66624874", "0.66511434", "0.6590958", "0.65704906", "0.6553715", "0.65452945", "0.6521744", "0.6521466", "0.64901394", "0.6475231", "0.6463747", "0.64565384", "0.64330906", "0.6426006", "0.6403002", "0.6397246", "0.63951623", "0.6386506", "0.6373441", "0.63606405", "0.63593245", "0.6344874", "0.63201535", "0.6246884", "0.6236161" ]
0.88750905
0
Delete the phone asset with the passed in id
def delete_phone_asset(self, asset_id): return self.delete_asset(asset_id, 'PHONE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _id):", "def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def delete(self, id):\n raise NotImplementedError", "def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def delete(self,id):\r\n return delete(id=id)", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def delete(self, id: int):\n self._select_interface(self._rc_delete, self._http_delete, id)", "def delete(self, id):\n return self._call('%s.delete' % self._shopware_model, [int(id)])", "def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "def delete_url_asset(self, asset_id):\n return self.delete_asset(asset_id, 'URL')", "def delete(self, id_):\n \n db.products.remove({'_id': ObjectId(id_)})\n return '', 204", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, request):\n try:\n data = json.loads(request.body)\n except ValueError:\n return HttpResponseBadRequest('Not valid JSON!')\n Mobile.objects.filter(id=data.get('id')).delete()\n response = HttpResponse(status=200)\n return response", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()", "def delete_item(id):\n return '', 201", "def delete(self, id):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('delete', url)", "def delete(self, cls, id):\n pass", "def delete(self, id: str) -> Any:\n\n return self.client.delete(self._url(id))", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def delete_amenities_id(amenity_id):\n my_object = storage.get('Amenity', amenity_id)\n if my_object is not None:\n storage.delete(my_object)\n storage.save()\n else:\n abort(404)\n return jsonify({}), 200" ]
[ "0.70042676", "0.6886521", "0.6831829", "0.67502534", "0.66984457", "0.66818535", "0.66647756", "0.6660624", "0.6656917", "0.65741885", "0.649827", "0.6489182", "0.64801604", "0.6471101", "0.64628196", "0.63978475", "0.63930076", "0.6368251", "0.6368251", "0.6368251", "0.6368251", "0.6368251", "0.6362703", "0.63599914", "0.6344496", "0.6333736", "0.63235915", "0.6319284", "0.63178307", "0.63045293" ]
0.8706615
0
Delete the url asset with the passed in id
def delete_url_asset(self, asset_id): return self.delete_asset(asset_id, 'URL')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __Delete(self, url, id = None):\n\n conn = self.__GetConnection()\n if (id != None):\n url += \"/\" + str(id)\n conn.request(\"DELETE\", url, \"\", self.__MakeHeaders(True))\n response = conn.getresponse()\n self.__CheckResponse(response)", "def delete(self, _id):", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()", "def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))", "def delete(self, request, url_id, *args, **kwargs):\n url_instance = self.get_object(url_id, request.user.id)\n if not url_instance:\n return Response(\n {\"detail\": \"Object with url id does not exists\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n url_instance.delete()\n return Response(\n {\"detail\": \"Object deleted!\"}, status=status.HTTP_200_OK\n )", "def delete(self, id):\n raise NotImplementedError", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def _http_delete(self, id: int):\n self._http_request(\"pl_delete&id=%i\" % id)\n self.get_playlist()", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def delete(short_id):\n try:\n url = Url.get(short_id)\n except:\n return jsonify({\"Error\", \"No Such ID\"})\n\n url.delete()\n return jsonify({\"statusCode\": 301,})", "def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True", "def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "def unlink(self, link_id):", "def delete(self,id):\r\n return delete(id=id)", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete(self, cls, id):\n pass", "async def delete_url(self, url: StrOrURL):\n await self.delete(self.create_key('GET', url))", "async def remove_img(self, ctx: BBContext, url: str):\n\n con = await ctx.get_connection()\n query = f'DELETE FROM {TABLE_ARTS} WHERE url = $1'\n\n await con.execute(query, url)\n await ctx.tick(True)", "def delete(url, data=None, **_):\n # Checks input parameters\n assert '/process/%s' % dummy_id in url in url", "def delete_img(self, img_id):\n logger.debug('Function delete_img start')\n\n logger.info(\"Deleting img: \"+str(img_id))\n os.remove(self.img_path+str(img_id)+\".jpg\")\n\n logger.debug('Function delete_img end')", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def delete_handle_asset(self, asset_id):\n return self.delete_asset(asset_id, 'HANDLE')", "def test_delete_asset(self):\n pass", "def delete(self, cls, id):\n\n del FileStorage.__objects[key(cls, id)]", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self, id):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('delete', url)", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()" ]
[ "0.7567766", "0.7203758", "0.712468", "0.7097657", "0.7070156", "0.70602894", "0.6884165", "0.68797266", "0.68733", "0.6866615", "0.68551326", "0.68472385", "0.6839838", "0.6823005", "0.6816135", "0.6811832", "0.68054557", "0.6770216", "0.67586267", "0.6758277", "0.6753747", "0.6729922", "0.67184705", "0.67133987", "0.67085594", "0.6655941", "0.6655484", "0.6639812", "0.6568062", "0.65571904" ]
0.84215575
0
Get the asset with the provided asset_id & asset_type.
def get_asset(self, asset_id, asset_type): return self.asset(asset_id, asset_type=asset_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)", "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def asset(self, asset_id, asset_type, action='GET'):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.adversary_handle_asset,\n 'phone': self.tc_requests.adversary_phone_asset,\n 'url': self.tc_requests.adversary_url_asset,\n }\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id, asset_id, action=action)", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)", "def get_asset(self, asset_class):\n\n # E1101 = instance of 'Page' has no 'assets' member\n # pylint: disable-msg=E1101\n\n if isinstance(asset_class, AssetClass):\n return self.assets.get(asset_class__pk = asset_class.pk)\n else:\n return self.assets.get(asset_class__name = asset_class)\n\n # pylint: enable-msg=E1101", "def retrieve_asset(self, site_id: Identifier, asset_id: Identifier\n ) -> Asset:\n try:\n site = self._registry_client.get_site_by_id(site_id)\n except KeyError:\n raise RuntimeError(f'Site or store at site {site_id} not found')\n\n if site.has_store:\n safe_asset_id = quote(asset_id, safe='')\n r = requests.get(\n f'{site.endpoint}/assets/{safe_asset_id}',\n params={'requester': self._site},\n verify=self._verify, cert=self._cred)\n if r.status_code == 404:\n raise KeyError('Asset not found')\n elif not r.ok:\n raise RuntimeError('Server error when retrieving asset')\n\n asset_json = r.json()\n validate_json('Asset', asset_json)\n return deserialize(Asset, asset_json)\n\n raise ValueError(f'Site {site_id} does not have a store')", "def asset_type(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"asset_type\"), kwargs)", "def retrieve_asset(self, sid, default_none=False):\n try:\n asset = self._asset_cache[sid]\n if asset is None and not default_none:\n raise SidsNotFound(sids=[sid])\n return asset\n except KeyError:\n return self.retrieve_all((sid,), default_none=default_none)[0]", "def get_asset(collection=None, _id=None, **params):\n\n if collection == 'images':\n return models.images.Image(_id=_id)\n elif collection == 'figure':\n return models.figures.Figure(_id=_id, **params)\n elif collection == 'figures':\n return models.figures.Figure(_id=_id, **params)\n elif collection == 'posts':\n return models.posts.Post(_id=_id, **params)\n elif collection == 'post':\n return models.posts.Post(_id=_id, **params)\n elif collection == 'paint':\n return models.posts.Paint(_id=_id, **params)\n elif collection == 'paints':\n return models.posts.Paint(_id=_id, **params)\n elif collection == 'attachment':\n return models.posts.Attachment(_id=_id, **params)\n elif collection == 'attachments':\n return models.posts.Attachment(_id=_id, **params)\n elif collection == 'tag':\n return models.posts.Tag(_id=_id, **params)\n elif collection == 'tags':\n return models.posts.Tag(_id=_id, **params)\n\n raise ValueError('get_asset() is not supported for %s yet!' % collection)", "def get_asset(self, asset_class, mime_type):\n\n # E1101 = instance of 'Message' has no 'assets' member\n # pylint: disable-msg=E1101\n\n logging.info('getting %s %s for Message %s (%s)' %(asset_class, mime_type, self, self.pk))\n if isinstance(asset_class, AssetClass):\n return self.assets.get(asset_class__pk = asset_class.pk, \n mime_type__name = mime_type)\n else:\n all = self.assets.filter(asset_class__name = asset_class, \n mime_type__name = mime_type).all()\n logging.info(all)\n if len(all):\n return all[0] \n else:\n logging.info('NO ASSET %s %s for Message %s (%s)' %(asset_class, mime_type, self, self.pk))\n return None", "def get_asset(self, name):\n assert self.has_asset(name), \"Asset is not created yet, use has_asset for checking\"\n return self.assets[name]", "def assets(self, asset_type=None):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.adversary_handle_assets,\n 'phone': self.tc_requests.adversary_phone_assets,\n 'url': self.tc_requests.adversary_url_assets,\n }\n\n if asset_type is None:\n return self.tc_requests.adversary_assets(self.unique_id)\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id)", "def getAssetWithName(self, name):\n return self.__assets[name]", "def asset_type(self) -> \"AssetType\":\n return self._values.get(\"asset_type\")", "def asset_type(self) -> \"AssetType\":\n return self._values.get(\"asset_type\")", "def get_asset(self, id):\n\n if not isinstance(id, six.string_types):\n raise ValueError('Param \"id\" must be a str|unicode.')\n\n asset = self.stub.get_asset(opac_pb2.TaskId(id=id))\n\n return {\n 'file': asset.file,\n 'filename': asset.filename,\n 'type': asset.type,\n 'metadata': asset.metadata,\n 'task_id': asset.task_id\n }", "def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "def _get_image(self, asset_id):\n try:\n return self.app.module_map.uploader.get(asset_id)\n except AssetNotFound:\n return None\n except Exception, e:\n return None\n return None", "def _get(self) -> json_api.generic.Metadata:\n api_endpoint = ApiEndpoints.assets.fields\n return api_endpoint.perform_request(http=self.auth.http, asset_type=self.parent.ASSET_TYPE)", "def get_url_asset(self, asset_id):\n return self.get_asset(asset_id, 'URL')", "def get_asset(location, filename):\r\n return contentstore().find(Transcript.asset_location(location, filename))", "def specific_asset(self, asset: str) -> dict:\n \n specific_asset_url = self.network + bf_assets_url + asset\n\n response = query_blockfrost(specific_asset_url, self.api_key, self.proxies)\n \n return response", "def get_game_asset(collection_name, return_type=flask.Response):\n\n model = importlib.import_module('app.models.%s' % collection_name)\n A = model.Assets()\n\n if return_type == dict:\n return A.assets\n elif return_type == object:\n return A\n\n return A.request_response()", "def get_asset(self, short_name):\n return self._assets[short_name]", "def get_handle_asset(self, asset_id):\n return self.get_asset(asset_id, 'HANDLE')", "def asset_info(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/info',\n path_params={'asset_id': asset_id})\n return AssetInfo.from_dict(loads(response.text).get('info'))", "def _retrieve_assets(self, sids, asset_tbl, asset_type):\n # Fastpath for empty request.\n if not sids:\n return {}\n\n cache = self._asset_cache\n hits = {}\n\n querying_equities = issubclass(asset_type, Equity)\n filter_kwargs = (\n _filter_equity_kwargs\n if querying_equities else\n _filter_future_kwargs\n )\n\n rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)\n for row in rows:\n sid = row['sid']\n asset = asset_type(**filter_kwargs(row))\n hits[sid] = cache[sid] = asset\n\n # If we get here, it means something in our code thought that a\n # particular sid was an equity/future and called this function with a\n # concrete type, but we couldn't actually resolve the asset. This is\n # an error in our code, not a user-input error.\n misses = tuple(set(sids) - viewkeys(hits))\n if misses:\n if querying_equities:\n raise EquitiesNotFound(sids=misses)\n else:\n raise FutureContractsNotFound(sids=misses)\n return hits", "def get_user_asset(collection=None, asset_id=None):\n\n if collection == \"settlement\":\n return settlements.Settlement(_id=asset_id)\n elif collection == \"survivor\":\n return survivors.Survivor(_id=asset_id, normalize_on_init=True)\n elif collection == \"user\":\n return users.User(_id=asset_id)\n\n raise utils.InvalidUsage(\"Collection '%s' does not exist!\" % collection, status_code=422)", "def get(self, context, type_name, artifact_id, get_any_artifact=False):\n session = api.get_session()\n return api.get(context, type_name, artifact_id,\n session, get_any_artifact)" ]
[ "0.7914934", "0.7901728", "0.75574785", "0.7353109", "0.7211423", "0.70776683", "0.707332", "0.6760442", "0.6684911", "0.6505347", "0.6489349", "0.64671385", "0.63019156", "0.62640876", "0.625495", "0.625495", "0.61809045", "0.61496", "0.59755635", "0.5967861", "0.5857224", "0.58441055", "0.5810152", "0.580552", "0.58004016", "0.5769995", "0.5756368", "0.5738654", "0.5720739", "0.55982506" ]
0.91324383
0
Get the handle asset with the passed in id
def get_handle_asset(self, asset_id): return self.get_asset(asset_id, 'HANDLE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def _get_image(self, asset_id):\n try:\n return self.app.module_map.uploader.get(asset_id)\n except AssetNotFound:\n return None\n except Exception, e:\n return None\n return None", "def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)", "def delete_handle_asset(self, asset_id):\n return self.delete_asset(asset_id, 'HANDLE')", "def image_by_id(self, id):\n if not id:\n return None\n return next((image for image in self.images() if image['Id'] == id),\n None)", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "def get_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type)", "def get_image_by_id(id):\n return Image.objects.get(id=id)", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)", "def get_handle_from_gramps_id(self, gid):\n obj = self.dbstate.db.get_object_from_gramps_id(gid)\n if obj:\n return obj.get_handle()\n else:\n return None", "def get_image(self, record_id):\n for item in self.order_items:\n img = item.get_image()\n if img is None: return None\n if img.get_recordId() == record_id:\n return img", "def get_object(id):", "def get_image(self, record_id):\n \n for img in self.img_lst:\n if img.get_recordId() == str(record_id):\n return img", "def get_image_by_id(id):\n return ImageModel.query.filter(ImageModel.id == id) \\\n .first()", "def add_handle_asset(self, value):\n return self.add_asset('HANDLE', value)", "def get_itemByImageId(self, record_id):\n for item in self.order_items:\n img = item.get_image()\n if img.get_itemId() == record_id:\n return item", "def handle_assets(self):\n return self.assets(asset_type='HANDLE')", "def get_url_asset(self, asset_id):\n return self.get_asset(asset_id, 'URL')", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get_asset(self, id):\n\n if not isinstance(id, six.string_types):\n raise ValueError('Param \"id\" must be a str|unicode.')\n\n asset = self.stub.get_asset(opac_pb2.TaskId(id=id))\n\n return {\n 'file': asset.file,\n 'filename': asset.filename,\n 'type': asset.type,\n 'metadata': asset.metadata,\n 'task_id': asset.task_id\n }", "def retrieve_asset(self, sid, default_none=False):\n try:\n asset = self._asset_cache[sid]\n if asset is None and not default_none:\n raise SidsNotFound(sids=[sid])\n return asset\n except KeyError:\n return self.retrieve_all((sid,), default_none=default_none)[0]", "def get_handle(self, pid):\r\n self._raise_unless_has_pid(pid)\r\n return self._translate_line_to_handle(self._raw[pid])", "def _get_file_by_id(id):\n query = \"\"\"SELECT * FROM files WHERE id = (:id) LIMIT 1\"\"\"\n param_obj = {'id': id}\n return _execute(query, param_obj)", "def get(self, cls, id):\n\n return FileStorage.__objects[key(cls, id)]", "def get_asset(self, name):\n assert self.has_asset(name), \"Asset is not created yet, use has_asset for checking\"\n return self.assets[name]", "def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file", "def getAssetWithName(self, name):\n return self.__assets[name]", "def get_asset_info(self, id):\n\n if not isinstance(id, six.string_types):\n msg = 'Param id must be a str|unicode.'\n logger.exception(msg)\n raise ValueError(msg)\n\n asset_info = self.stub.get_asset_info(opac_pb2.TaskId(id=id))\n\n return {\n 'url': asset_info.url,\n 'url_path': asset_info.url_path\n }", "def get_by_id(dataobj_id):\n results = list(get_data_dir().rglob(f\"{dataobj_id}-*.md\"))\n return results[0] if results else None", "def get(self, _id):" ]
[ "0.70908076", "0.69447607", "0.6808168", "0.6650068", "0.6532557", "0.6532282", "0.6431671", "0.6348124", "0.6248202", "0.6170924", "0.61310107", "0.6119488", "0.6116648", "0.6098846", "0.60746133", "0.60710746", "0.6030209", "0.6010597", "0.5991148", "0.59439325", "0.5866047", "0.57989556", "0.57912487", "0.5761919", "0.57214415", "0.5694982", "0.5675828", "0.5651286", "0.5642718", "0.56361896" ]
0.8778855
0
Get the phone asset with the passed in id
def get_phone_asset(self, asset_id): return self.get_asset(asset_id, 'PHONE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def image_by_id(self, id):\n if not id:\n return None\n return next((image for image in self.images() if image['Id'] == id),\n None)", "def get_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type)", "def get_image_by_id(id):\n return Image.objects.get(id=id)", "def get_object(id):", "def get(self, id):\n return Matstamm.find_by_id(id)", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)", "def phone_assets(self):\n return self.assets(asset_type='PHONE')", "def get_asset(self, id):\n\n if not isinstance(id, six.string_types):\n raise ValueError('Param \"id\" must be a str|unicode.')\n\n asset = self.stub.get_asset(opac_pb2.TaskId(id=id))\n\n return {\n 'file': asset.file,\n 'filename': asset.filename,\n 'type': asset.type,\n 'metadata': asset.metadata,\n 'task_id': asset.task_id\n }", "def get(self, _id):", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "def get_record(self, id: uplink.Path):\n pass", "def _get_image(self, asset_id):\n try:\n return self.app.module_map.uploader.get(asset_id)\n except AssetNotFound:\n return None\n except Exception, e:\n return None\n return None", "def get_droid(root, _info, id):\n return droid_data.get(id)", "def get_device_by_id(self, id):\n if not isinstance(id, int):\n id = int(id)\n for i in self.devices:\n if self.devices[i].id == id:\n return self.devices[i]\n raise RuntimeError, \"Device not found\"", "def get_item(self, mediaId):\n headers = { 'Authorization' : self.client.authorization_header }\n\n response = requests.get(\n self.client.url + '/media/' + mediaId,\n headers = headers\n )\n return json.loads(response.text)", "def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)", "def read_one(id):\n # Get the photo requested\n photo = Photo.query.filter(Photo.id == id).one_or_none()\n\n # Did we find a photo?\n if photo is not None:\n\n # Serialize the data for the response\n photo_schema = PhotoSchema()\n data = photo_schema.dump(photo)\n return data\n\n # Otherwise, nope, didn't find that photo\n else:\n abort(\n 404,\n \"Photo not found for Id: {id}\".format(id=id),\n )", "def get_image_by_id(id):\n return ImageModel.query.filter(ImageModel.id == id) \\\n .first()", "def delete_phone_asset(self, asset_id):\n return self.delete_asset(asset_id, 'PHONE')", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get(id=None):\n return requests.get(\"/{}\".format(id))", "def get_itemByImageId(self, record_id):\n for item in self.order_items:\n img = item.get_image()\n if img.get_itemId() == record_id:\n return item", "def getArmy(self, id):\n return self.__armies[id];", "def get_volume_from_id(item_id):\n return volumes[\"data\"][str(item_id)]", "def getContactById(self, id):\n for contact in self.contacts:\n if contact.id == id:\n return contact\n if self.profile:\n if self.profile.id == id:\n return self.profile\n\n return None", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_asset_info(self, id):\n\n if not isinstance(id, six.string_types):\n msg = 'Param id must be a str|unicode.'\n logger.exception(msg)\n raise ValueError(msg)\n\n asset_info = self.stub.get_asset_info(opac_pb2.TaskId(id=id))\n\n return {\n 'url': asset_info.url,\n 'url_path': asset_info.url_path\n }" ]
[ "0.6800033", "0.6385424", "0.6134431", "0.61205536", "0.6080133", "0.59958726", "0.5989127", "0.59726167", "0.5938593", "0.589432", "0.58850074", "0.58735317", "0.58695924", "0.5851934", "0.5848409", "0.5826806", "0.5805052", "0.57644814", "0.5744167", "0.5718924", "0.5718649", "0.57170767", "0.5711538", "0.56817454", "0.56810844", "0.5671641", "0.5663169", "0.5648484", "0.56453484", "0.5643827" ]
0.8484552
0
Get the url asset with the passed in id
def get_url_asset(self, asset_id): return self.get_asset(asset_id, 'URL')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def get_by_id(id: UUID) -> UrlModel:\n url = UrlModel.query.filter_by(id=id).first_or_404()\n\n return url", "def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)", "def GetFileAssetUrl(aid: maxon.Id) -> maxon.Url:\n # Bail when the asset ID is invalid.\n if not isinstance(aid, maxon.Id) or aid.IsEmpty():\n raise RuntimeError(f\"{aid = } is not a a valid asset ID.\")\n\n # Get the user repository, a repository which contains almost all assets, and try to find the\n # asset description, a bundle of asset metadata, for the given asset ID in it.\n repo: maxon.AssetRepositoryRef = maxon.AssetInterface.GetUserPrefsRepository()\n if repo.IsNullValue():\n raise RuntimeError(\"Could not access the user repository.\")\n \n asset: maxon.AssetDescription = repo.FindLatestAsset(\n maxon.AssetTypes.File(), aid, maxon.Id(), maxon.ASSET_FIND_MODE.LATEST)\n if asset.IsNullValue():\n raise RuntimeError(f\"Could not find file asset for {aid}.\")\n\n # When an asset description has been found, return the URL of that asset in the \"asset:///\"\n # scheme for the latest version of that asset.\n return maxon.AssetInterface.GetAssetUrl(asset, True)", "def get_image_url(self, image_id):\n if image_id in self.image_id_to_url:\n return self.image_id_to_url[image_id]\n return None", "def _get_image(self, asset_id):\n try:\n return self.app.module_map.uploader.get(asset_id)\n except AssetNotFound:\n return None\n except Exception, e:\n return None\n return None", "def get_image(self, image_id):\n index = int(image_id)\n if index >= len(self._image_urls):\n message = \"Url index does not exist: '%i'\" % index\n return (None, message)\n url = self._image_urls[index]\n message = \"Successful URL found.\"\n return (url, message)", "def get_url_from_id(doc_id):\n return f\"{BASE_URL}/view/{doc_id}\"", "def get_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type)", "def get_url(self, image_id):\n key = image_id if image_id else self.default_image\n if key:\n return u'{bucket_url}{key}'.format(\n bucket_url=self.connection.bucket_url,\n key=self.id_to_key(key))\n else:\n return None", "def get_image_url():", "def get_asset_info(self, id):\n\n if not isinstance(id, six.string_types):\n msg = 'Param id must be a str|unicode.'\n logger.exception(msg)\n raise ValueError(msg)\n\n asset_info = self.stub.get_asset_info(opac_pb2.TaskId(id=id))\n\n return {\n 'url': asset_info.url,\n 'url_path': asset_info.url_path\n }", "def get_url(self):\n raise NotImplementedError(\"This asset does not have a URL\")", "def get_image_by_id(id):\n return Image.objects.get(id=id)", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)", "def get_player_url(id):\n return JAFC_M3U8_TEMPLATE.format(id)", "def __construct_url_from_id(_video_id):\n return f\"{core.get_base_url(api_base=False)}/videos/{_video_id}\"", "def get_api_file_url(file_id):\n api_url = get_api_url()\n return f\"{api_url}/files/{file_id}\"", "def get_url(self, asset, force_save=None):\n if force_save is None:\n force_save = not settings.DEBUG\n asset = AdaptiveAsset(asset)\n if not force_save:\n try:\n return asset.get_url()\n except NotImplementedError:\n pass\n return self._storage.url(self.get_name(asset))", "def image_by_id(self, id):\n if not id:\n return None\n return next((image for image in self.images() if image['Id'] == id),\n None)", "def source_image_link(self, image_id):\n return self.image_info[image_id][\"path\"]", "def source_image_link(self, image_id):\n return self.image_info[image_id][\"path\"]", "def get_vidurl(self):\n if self.assets is None:\n self.get_assets()\n \n df = self.assets\n des = df.loc[(df['container']==self.container) & (df['display_name']==self.resolution), 'url']\n if des.shape[0] == 1:\n self.vidurl = des.iloc[0].replace('.bin',f'.{self.container}')\n return self.vidurl", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "def generate_url(self, campaign_id):\n pass", "def delete_url_asset(self, asset_id):\n return self.delete_asset(asset_id, 'URL')", "def matchidtourl(cardid):\n with open(\"finalmatchedurls.json\", \"r\") as fin:\n data = json.loads(fin.read())\n d = dict(data)\n url = d[cardid]\n return url", "def _create_url(self, event_id: str, path: str) -> str | None:\n if path == \"thumbnail.jpg\":\n return str(URL(self._host) / f\"api/events/{event_id}/thumbnail.jpg\")\n\n if path == \"snapshot.jpg\":\n return str(URL(self._host) / f\"api/events/{event_id}/snapshot.jpg\")\n\n camera = path.split(\"/\")[0]\n if path.endswith(\"clip.mp4\"):\n return str(URL(self._host) / f\"clips/{camera}-{event_id}.mp4\")", "def _assets_url(self):\r\n return \"/assets/\" + self._course_key + \"/\"", "def get_api_file_content_url(file_id):\n return f\"{get_api_file_url(file_id)}/content\"" ]
[ "0.7117298", "0.6588282", "0.6444789", "0.642756", "0.6422066", "0.63971066", "0.63964444", "0.62739843", "0.62609327", "0.62437016", "0.6228531", "0.6162225", "0.6123414", "0.60973865", "0.6085558", "0.60849786", "0.606551", "0.6038394", "0.60178864", "0.601603", "0.6014131", "0.6014131", "0.6002736", "0.5997513", "0.5975447", "0.5957502", "0.59562975", "0.5891271", "0.5870192", "0.5864086" ]
0.83747166
0
Return all of the handle assets
def handle_assets(self): return self.assets(asset_type='HANDLE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assets():", "def returnAllAssets(self):\n return self.__assets", "def assets():\n pass", "def getAssets(self):\n return self.assets", "def assets(self):\n return self._assets.values()", "def get_all_assets(self):\n return c4d.documents.GetAllAssets(self._document, False, '')", "def assets(self):\n # N.B.:\n # we don't cache the results of the {root} exploration so we can always\n # return a result that reflects the current contents of the filesystem\n yield from self._explore()\n # all done\n return", "def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]", "def _get_assets(self, quote):\n assets = self.assets.copy()\n assets.remove(quote)\n base = assets[0]\n return self._get_asset(quote), self._get_asset(base)", "def resources(self):", "def children(self) -> \"AssetList\":\n return self._cognite_client.assets.list(parent_ids=[self.id], limit=None)", "def assets(self, pattern, handler):\n return self.route(GET, pattern, handler)", "def list(self, _: List[str]) -> Tuple[str, FileSystemStorage]:\n files = self._get_bulma_css()\n files.extend(self._get_custom_css())\n files.extend(self._get_bulma_js())\n\n for path in files:\n yield path, self.storage", "def getimgs():", "def assets(self):\n static = self.static\n if static is None:\n return None\n\n assets = os.path.join(static, 'assets')\n if not os.path.isdir(assets):\n return None\n\n return assets", "def get_resources(self):\n return []", "def url_assets(self):\n return self.assets(asset_type='URL')", "def get_assets(self):\n # The size of the price_data list should not change, even when updated\n price_data_length = len(self.price_data)\n\n for index in itertools.cycle(range(price_data_length)):\n try:\n yield self.price_data[index]\n except IndexError:\n yield None", "def populateGallery():\n\n # Set the UI parent to be the scroll layout\n global objectScroll\n cmds.setParent(objectScroll)\n\n # List all assets in the direcoty\n assetList = [directory for directory in os.listdir(AC.ASSETS_PATH) if os.path.isdir(os.path.join(AC.ASSETS_PATH, directory))]\n\n # Create a ButtonIcon for each asset\n for asset in assetList:\n addButtonIcon(asset)", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def resources(self):\n return self.__resources", "def get_handle_asset(self, asset_id):\n return self.get_asset(asset_id, 'HANDLE')", "def assets(self):\n if self._assets_cache:\n return self._assets_cache\n asset_defaults = Section.assets.value\n ret_val = dict([(obj.name, obj.value) for obj in asset_defaults])\n config_vals = self._section_as_dict(Section.assets.name)\n ret_val.update(config_vals)\n self._assets_cache = AttrDict(**ret_val)\n return self._assets_cache", "def resources(self):\r\n return self.page.object_list", "def get_extra_assets(self):\n asset_list = []\n if self.extra_assets is None:\n return []\n return [ self.complete_static_filename(asset) \\\n for asset in self.extra_assets ]", "def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}", "def assets(self):\n return SpaceAssetsProxy(self._client, self.id)", "def _on_hires_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_hires()", "def getImageList(self):\n ps = getToolByName(self.context, 'portal_skins')\n folder = self.context.unrestrictedTraverse('/'.join(ps.getPhysicalPath()) + '/custom-logos')\n return folder.values()", "def hbObjects(self):\r\n return self.__hbObjs" ]
[ "0.71726817", "0.68086195", "0.6718517", "0.66561484", "0.6643501", "0.65299517", "0.63601494", "0.6312885", "0.6121192", "0.6073097", "0.6004909", "0.5978438", "0.591822", "0.5915304", "0.5890802", "0.5866422", "0.5828185", "0.5782407", "0.5778089", "0.57529914", "0.57443565", "0.57184505", "0.56925017", "0.5681018", "0.56711465", "0.5656743", "0.56136554", "0.56054544", "0.5604846", "0.55982864" ]
0.84628445
0
Return all of the phone assets
def phone_assets(self): return self.assets(asset_type='PHONE')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_assets(self):\n return c4d.documents.GetAllAssets(self._document, False, '')", "def returnAllAssets(self):\n return self.__assets", "def getAssets(self):\n return self.assets", "def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}", "def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]", "def assets(self):\n return self._assets.values()", "def children(self) -> \"AssetList\":\n return self._cognite_client.assets.list(parent_ids=[self.id], limit=None)", "def get_phone_asset(self, asset_id):\n return self.get_asset(asset_id, 'PHONE')", "def get_queryset(self):\n return self.request.user.assets.all()", "def assets():", "def get_assets(self):\n # This includes a kludge to get the objectiveBankId directly from\n # this Activity's Objective's private _my_map :o\n from ..repository.objects import AssetList\n if not self.is_asset_based_activity():\n raise IllegalState()\n url_str = (self._base_url + '/objectivebanks/' +\n self.get_objective()._my_map['objectiveBankId'] +\n '/assets/bulk?id=' + '&id='.join(self._my_map['assetIds']))\n return AssetList(self._load_json(url_str))", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def get_available_assets(limit: int = 100) -> List:\n\n js = request_and_jsonize_calm(f\"https://api.coincap.io/v2/assets\", params={'limit': str(limit)})\n\n assets = []\n for asset in js['data']:\n assets.append(\n {\n 'id': asset['id'],\n 'symbol': asset['symbol'],\n 'name': asset['name']\n }\n )\n return assets", "def api_asset_list():\n return jsonify(app.bank.to_list()), 200", "def get_resources(self):\n return []", "def sporecastAssets(sporecastId, start=0, length=20):\n url = \"%s/rest/assets/sporecast/%s/%i/%i\" % (serverString, sporecastId, start, length)\n doc = minidom.parseString(urllib.urlopen(url).read().decode(\"utf-8\", \"ignore\").encode(\"ascii\", \"xmlcharrefreplace\"))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n assets = []\n for element in doc.getElementsByTagName(\"asset\"):\n assets += [Asset()]\n assets[-1]._getInfoFromNode(element)\n return assets", "def get_assets(self):\n findstr = r'W\\.iframeInit\\({\"assets\":(\\[.*\\])'\n try:\n page = str(requests.get(self.srcpage).content, 'utf-8')\n asset_search = re.search(findstr, page)\n if asset_search:\n assets = asset_search.group(1)\n try:\n assets = json.loads(assets)\n except ValueError:\n print(\"Error loading JSON string\")\n self.assets = pd.DataFrame(assets)\n return self.assets\n else:\n raise AssetNotFoundError\n except:\n print(\"Failed to get asset information from page.\\nCheck video ID.\")", "def assets():\n pass", "def resources(self):\r\n return self.page.object_list", "def getAssets(self, start=None, length=20):\n if start == None:\n start = len(self.assets)\n doc = minidom.parse(urllib.urlopen(\"%s/rest/assets/user/%s/%i/%i\" % (serverString, self.name, start, length)))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n for element in doc.getElementsByTagName(\"asset\"):\n self.assets += [Asset()]\n self.assets[-1]._getInfoFromNode(element)", "def get_entries(self):\n return self.find_by_device_description({\n \"manufacturer\": \"Cambridge Audio\",\n \"deviceType\": \"urn:schemas-upnp-org:device:MediaRenderer:1\"\n })", "def api_asset_get():\n names = request.args.getlist(\"name\")\n\n result = []\n for name in names:\n asset = app.bank.get(name)\n if asset:\n result.append(asset)\n\n return jsonify(sorted(result)), 200", "def pic (self, list) : \n result = []\n for pmod in list :\n result.append (pmod.photo_uri)\n return result", "def get_all_jsons():\r\n res = get_all_mps_ids()\r\n for id in res.keys():\r\n get_mp_json_from_file(id)", "def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))", "def read_all_ram(self):\n return self.RAM", "def read_all():\n # Create the list of photos from our data\n photos = Photo.query.order_by(Photo.sample_id).all()\n\n # Serialize the data for the response\n photo_schema = PhotoSchema(many=True)\n data = photo_schema.dump(photos)\n return data", "def get_json_assets(self):\n qn = gql_helper.NodeGQL(\"allAuthorities\", [ 'assets { assetId routerName nodeName t128Version status assetIdConfigured text failedStatus }' ],\n [ ], top_level=True, debug=self.debug)\n json_reply={}\n query_status = qn.send_query(self.gql_token, json_reply)\n if not query_status == 200:\n print('\\n*** Unable to query conductor assets ***\\n')\n sys.exit(1)\n\n flatter_json = qn.flatten_json(json_reply, \"/allAuthorities/assets\")\n #match_string=f\"node.assets[*]\"\n #flatter_json = jmespath.search(match_string, json_reply)\n\n if self.debug:\n print('........ flattened list ..........')\n pprint.pprint(flatter_json)\n\n self.json_assets = flatter_json\n return query_status", "def _get_all_resources(self):\n all_resources = []\n for resource in ResourceModel.scan():\n all_resources.append(resource)\n return all_resources", "def handle_assets(self):\n return self.assets(asset_type='HANDLE')" ]
[ "0.6741875", "0.66295177", "0.64593947", "0.6341789", "0.6338924", "0.62795734", "0.60422313", "0.6029229", "0.5980535", "0.59802115", "0.58091706", "0.5757311", "0.5744994", "0.57162285", "0.56765515", "0.56604904", "0.564866", "0.5614903", "0.5594506", "0.5583894", "0.55418193", "0.5533227", "0.5512887", "0.55099034", "0.5504481", "0.5501769", "0.5489469", "0.5481477", "0.54195195", "0.5412805" ]
0.8454118
0
Return all of the url assets
def url_assets(self): return self.assets(asset_type='URL')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getURLs():", "def get_urls():\r\n return []", "def assets():", "def urls(self) -> list[str]:\r\n ...", "def get_urls(type, assets=\"default\"):\n return [\n default_asset_cache.get_url(asset)\n for asset in StaticAsset.load(type, assets)\n ]", "def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]", "def returnAllAssets(self):\n return self.__assets", "def getAssets(self):\n return self.assets", "def assets():\n pass", "def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)", "def get_all_assets(self):\n return c4d.documents.GetAllAssets(self._document, False, '')", "def urls(self):\n return self._list_urls()", "def all_urls(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/allUrls/')))", "def assets(self):\n return self._assets.values()", "def _assets_url(self):\r\n return \"/assets/\" + self._course_key + \"/\"", "def get_extra_assets(self):\n asset_list = []\n if self.extra_assets is None:\n return []\n return [ self.complete_static_filename(asset) \\\n for asset in self.extra_assets ]", "def assets(self):\n static = self.static\n if static is None:\n return None\n\n assets = os.path.join(static, 'assets')\n if not os.path.isdir(assets):\n return None\n\n return assets", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def get_resource_urls():\n base_url = 'http://developer.pardot.com/'\n pattern = re.compile(\n r'(?ims)\\<a [^>]*?href=\"(kb/api-version-3/[^>]*?/)\"[^>]*?\\>'\n r'[^<]*?\\</a\\>')\n response = requests.get(base_url)\n return [\n '%s/%s' % (base_url, url) for url in pattern.findall(response.text)]", "def get_urls(self, **kwargs):\n pass # pragma: no cover", "async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images", "def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def get_json_urls(self):\n gi = GetImageURLs(self.json_url)\n self.urls = gi.get_image_url()\n\n # Turn it into a Python set\n self.urls_from_json = Set(self.urls)", "def get_urls(db):\n return db.meta.find_one({'name':\"urls\"})['urls']", "def get_urls(self):\n return patterns('')", "def urls(self):\n if not self._urls:\n urls = []\n for host in self.hosts:\n # Must end without a slash\n urls.append('http://%(host)s:%(port)s%(path)s' % {\n 'host': host,\n 'port': self.port,\n 'path': self.path,\n })\n self._urls = urls\n return self._urls", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def get_links_from_url(url):\n return [get_base(url)]", "def getRessources(self, page):\n\n res = HTTP.RES_REGEX.findall(self.get[page]['data'])\n to_fetch = []\n\n # Regex give location, extension and parameters of each field\n for loc,ext,param in res:\n if ext in ['html', 'htm', 'php']:\n if loc.startswith('../'):\n # Fetch res with relative path\n loc = os.path.normpath(os.path.join(os.path.dirname(page), loc))\n to_fetch.append(loc)\n elif loc.startswith('/'):\n # Fetch res with absolute path\n to_fetch.append(loc[1:])\n else: continue # External res not fetched\n return to_fetch" ]
[ "0.7462337", "0.7460199", "0.74512386", "0.74139196", "0.73395413", "0.7084657", "0.70750874", "0.69541717", "0.6926014", "0.6827585", "0.68090737", "0.6759235", "0.6714712", "0.66763306", "0.66452646", "0.65941185", "0.65488195", "0.64891714", "0.645243", "0.64495164", "0.6408921", "0.63588905", "0.6346953", "0.63402516", "0.63327175", "0.63271564", "0.63243955", "0.6322756", "0.63079196", "0.63020223" ]
0.8349477
0
A function to undistort the distorted images in a folder.
def undistort(basedir, img_extension, output_dir, output_prefix, calibration, distortion, output_image_shape=(640, 480), scaling_param=1): search = os.path.join(basedir, '*'+img_extension) img_paths = glob.glob(search) img_paths.sort() print("Number of Images: ", len(img_paths)) maxlen = len(img_paths) if maxlen == 0: raise IOError( 'No images were found (maybe wrong \'image extension\' parameter?)') if not os.path.exists(os.path.dirname(output_dir)): os.makedirs(os.path.dirname(output_dir)) for img_idx, img_path in enumerate(img_paths): img = cv2.imread(img_path, 1) height, width, _ = img.shape new_camera_matrix = calibration # scaling parameter between 0 (when all the pixels in the undistorted image are valid) # and 1 (when all the source image pixels are retained in the undistorted image) new_camera_mtx, roi = cv2.getOptimalNewCameraMatrix( calibration, distortion, (width, height), scaling_param, output_image_shape) print("calibration", calibration) print("new_camera_matrix", new_camera_matrix) # undistort mapx, mapy = cv2.initUndistortRectifyMap( calibration, distortion, None, new_camera_mtx, output_image_shape, 5) dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR) # crop the image x, y, w, h = roi dst = dst[y:y+h, x:x+w] output_path = output_dir+output_prefix+'_%d' % img_idx+img_extension print(output_path) cv2.imwrite(output_path, dst) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")", "def undistort_img(img, mtx, dist, debug=False):\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n if (debug):\n window_name = \"Undistorted Image\"\n cv2.imshow('Undistorted Image', undist)\n cv2.moveWindow(\"Undistorted Image\", 10, 50);\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return undist", "def undistort(self, image):\n dst = cv2.undistort(image, self.mtx, self.dist_coeff, None)\n\n if self.args.is_test:\n self.image_logger.save_image(dst, 'undistorted')\n images = [[{'title': 'Original', 'data': image},\n {'title': 'Undistorted', 'data': dst}]]\n self.image_logger.plot_results(images)\n return dst", "def undistort_image(mtx_, dist_, img_):\n dst = cv2.undistort(img_, mtx_, dist_, None, mtx_)\n return dst", "def undistort(self,src):\n # note: no check over src.shape and self.size\n return cv2.undistort(src,self.K,self.dist)", "def undistort(self, image):\n return cv2.undistort(image, self.mtx, self.dst, None, self.mtx)", "def undistort(img, mtx, dist):\n return cv2.undistort(img, mtx, dist, None, mtx)", "def undistort(self, image):\n return cv2.undistort(image, self.camera_matrix, self.distortion_coeffs, None, self.camera_matrix)", "def distort_images(self, images, seed):\n if self.mode == \"train\":\n images = image_processing.distort_image(images, seed)\n\n # Rescale to [-1,1] instead of [0, 1]\n images = tf.subtract(images, 0.5)\n images = tf.multiply(images, 2.0)\n return images", "def cal_undist(self, img = None):\n return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def un_distort_image(image):\n global _remap_x, _remap_y\n image = cv2.UMat(image)\n res = cv2.remap(image, _remap_x, _remap_y, cv2.INTER_LINEAR) # 进行remap\n res = res.get()\n return res", "def undistort_image(frame, mtx, dist, display=True):\r\n frame_undistorted = cv2.undistort(frame, mtx, dist, newCameraMatrix=mtx)\r\n\r\n if display:\r\n fig, ax = plt.subplots(nrows=1, ncols=2)\r\n # fig.suptitle('Undistort Image Before & After')\r\n ax[0].set_title('Before calibration')\r\n ax[1].set_title('After calibration')\r\n ax[0].imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n ax[1].imshow(cv2.cvtColor(frame_undistorted, cv2.COLOR_BGR2RGB))\r\n \r\n #for comparing camera undistorted\r\n plt.savefig('../output_images/undistort_image_before_to_after.jpg',dpi=300)\r\n plt.show()\r\n\r\n return frame_undistorted", "def undistort_image(self, img, calibration_dict: dict):\n if img is None:\n return None\n\n if 'mtx' not in calibration_dict or 'dist' not in calibration_dict:\n raise ValueError('Missing mtx or dist in calibration dictionary.')\n\n return cv2.undistort(img, calibration_dict['mtx'], calibration_dict['dist'], None, calibration_dict['mtx'])", "def __call__(self, img):\n if self.camera_matrix is not None and self.distortion_coef is not None:\n return cv2.undistort(\n img, self.camera_matrix, self.distortion_coef, None, self.camera_matrix)\n else:\n print(\"You should calculate Camera Matrix and Distortion coefficient first!\")\n return img", "def undistort_points(points, K, dist):\n return cv2.undistortPoints(points, K, dist)", "def div_imgs(dir_path: str, holdout: int) -> None:\n VH_REGEX = re.compile(r'(.*)_([0-9]+).vh.tif')\n\n for file in os.listdir(dir_path):\n m = re.match(VH_REGEX, file)\n if not m:\n continue\n\n pre, num = m.groups()\n vv = f'{pre}_{num}.vv.tif'\n mask = f'{pre}_{num}.mask.tif'\n\n if not os.path.isfile(os.path.join(dir_path, mask)):\n print(f\"Tile: {file} is missing a mask {mask}!\")\n\n if not os.path.isfile(os.path.join(dir_path, vv)):\n print(f\"Tile: {file} is missing a mask {vv}!\")\n\n test_or_train = 'train' if random.random() > holdout else 'test'\n\n folder = os.path.join(dir_path, test_or_train)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n os.rename(\n os.path.join(dir_path, file), os.path.join(folder, file)\n )\n os.rename(\n os.path.join(dir_path, vv),\n os.path.join(folder, vv)\n )\n os.rename(\n os.path.join(dir_path, mask),\n os.path.join(folder, mask)\n )", "def distorted_inputs():\n if not FLAGS.dir_data:\n raise ValueError('Please supply a dir_data')\n\n dir_data = os.path.join(FLAGS.dir_data, 'batches', 'train_batch')\n images, labels = Unet_input.distorted_inputs(dir_data=dir_data, batch_size=FLAGS.batch_size)\n\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n\n return images, labels", "def remove_bad_images(path_images):\n images = sorted(os.listdir(path_images))\n for k in range(len(images)):\n os.remove(path_images + images[k])", "def distorted_inputs():\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = FLAGS.data_dir\n if FLAGS.batches_dir.strip():\n print(\"putting on the batches\")\n data_dir = os.path.join(FLAGS.data_dir, FLAGS.batches_dir)\n\n print(\"The data dir is {} in distorted_inputs\".format(data_dir))\n images, labels = qpNet_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size, distort=False)\n\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n\n # binaries the labels if necessary:\n print(\"Binarising here\")\n labels = binariseTheLabels(labels)\n\n return images, labels", "def remove_unactionable_images(data):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if is_useful(path, 0.5) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)\n else:\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, product + '.tiff'))", "def normalise_images(self, folder_name, max_possible_intensity=65535.0):\n normalised_folder_name = folder_name + '_normalised'\n\n try:\n print(\"Making dir \" + str(normalised_folder_name) + \" for normalisation\")\n os.mkdir(normalised_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this normalisation??\")\n return\n\n minimum_intensity, maximum_intensity = self.get_min_max_values(folder_name)\n intensity_scaling_factor = max_possible_intensity / float(maximum_intensity - minimum_intensity)\n print(\"Factor = \" + str(intensity_scaling_factor))\n\n print(\"Writing to folder: \" + str(normalised_folder_name))\n photo_list = self.get_photo_list(folder_name, '*.png')\n for i, name in enumerate(photo_list):\n file_name = folder_name + '/' + name\n normalised_image_name = normalised_folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_ANYDEPTH)\n subtracted_image = np.maximum(np.subtract(image.astype(np.int32), np.full(np.shape(image),\n minimum_intensity.astype(np.int32))),\n np.zeros(np.shape(image)))\n\n normalised_image = (subtracted_image * intensity_scaling_factor)\n cv2.imwrite(normalised_image_name, normalised_image.astype(np.uint16))", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def _undistort_lookup(self, xy: np.ndarray, density: Number = 1) -> np.ndarray:\n # Estimate undistorted camera coordinate bounds\n uv_edges = self.imgsz * np.array(\n [[0, 0], [0.5, 0], [1, 0], [1, 0.5], [1, 1], [0.5, 1], [0, 1], [0, 0.5]]\n )\n xyu_edges = (uv_edges - (self.imgsz / 2 + self.c)) / self.f\n xyd_edges = self._distort(xyu_edges)\n # Build undistorted camera coordinates on regular grid\n ux = np.linspace(\n min(xyu_edges[:, 0].min(), xyd_edges[:, 0].min()),\n max(xyu_edges[:, 0].max(), xyd_edges[:, 0].max()),\n int(density * self.imgsz[0]),\n )\n uy = np.linspace(\n min(xyu_edges[:, 1].min(), xyd_edges[:, 1].min()),\n max(xyu_edges[:, 1].max(), xyd_edges[:, 1].max()),\n int(density * self.imgsz[1]),\n )\n UX, UY = np.meshgrid(ux, uy)\n uxy = np.column_stack((UX.flatten(), UY.flatten()))\n # Distort grid\n dxy = self._distort(uxy)\n # Interpolate distortion removal from gridded results\n # NOTE: Cannot use faster grid interpolation because dxy is not regular\n return scipy.interpolate.griddata(dxy, uxy, xy, method=\"linear\")", "def _distort_resize(image, image_size):\n distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n distorted_image = tf.image.random_contrast(\n distorted_image, lower=0.2, upper=1.8)\n distorted_image.set_shape([image_size, image_size, 3])\n return distorted_image", "def clean(allimages, alldirs):\n\n for img in allimages:\n # Delete HTML files\n htmlfn = join(opts.root, img._dir._path, img._pagefn)\n if exists(htmlfn):\n if opts.verbose:\n print \"Deleting\", htmlfn\n try:\n os.unlink(htmlfn)\n except:\n print >> sys.stderr, \"Error: deleting\", htmlfn\n\n # Delete thumbnails\n if img._thumbfn:\n thumbfn = join(opts.root, img._thumbfn)\n if exists(thumbfn):\n if opts.verbose:\n print \"Deleting\", thumbfn\n try:\n os.unlink(thumbfn)\n img._thumbfn = None\n except:\n print >> sys.stderr, \"Error: deleting\", thumbfn\n\n for d in alldirs:\n files = dircache.listdir(join(opts.root, d._path))\n\n # Delete HTML files in directories\n for f in files:\n fn = join(opts.root, d._path, f)\n if f in [ dirindex_fn, allindex_fn, allcidx_fn,\n sortindex_fn, css_fn ] or \\\n f.startswith('trackindex-'):\n if opts.verbose:\n print \"Deleting\", fn\n try:\n os.unlink(fn)\n pass\n except:\n print >> sys.stderr, \"Error: deleting\", fn\n\n if f == index_fn and islink(fn):\n os.unlink(fn)", "def _undistort(\n self, xy: np.ndarray, method: str = \"oulu\", **kwargs: Any\n ) -> np.ndarray:\n # X = (X' - dt) / dr\n if not any(self.k) and not any(self.p):\n return xy\n if self.k[0] and not any(self.k[1:]) and not any(self.p):\n return self._undistort_k1(xy)\n if method == \"lookup\":\n return self._undistort_lookup(xy, **kwargs)\n if method == \"oulu\":\n return self._undistort_oulu(xy, **kwargs)\n if method == \"regulafalsi\":\n return self._undistort_regulafalsi(xy, **kwargs)\n raise ValueError(f\"Undistort method not supported: {method}\")", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def distorted_inputs():\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n return images, labels", "def space_cleaning():\n for file in os.listdir(\".\"):\n if file.endswith(\".png\"):\n os.remove(file)" ]
[ "0.8213168", "0.7284235", "0.72337604", "0.7060535", "0.6990136", "0.6985227", "0.6975924", "0.69152296", "0.6569945", "0.62887996", "0.6255778", "0.61799085", "0.61173475", "0.593172", "0.5870097", "0.5704076", "0.5546572", "0.55459356", "0.55330724", "0.5527427", "0.5504683", "0.55012435", "0.5446705", "0.52968013", "0.52935547", "0.5283992", "0.5283517", "0.5277852", "0.52717674", "0.5271187" ]
0.7427232
1
Test that no sensors in deconz results in no sensor entities.
async def test_no_binary_sensors(hass, aioclient_mock): await setup_deconz_integration(hass, aioclient_mock) assert len(hass.states.async_all()) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_sensor_without_unit(client, sensor_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert (\n 'sensor_state{domain=\"sensor\",'\n 'entity=\"sensor.trend_gradient\",'\n 'friendly_name=\"Trend Gradient\"} 0.002' in body\n )\n\n assert (\n 'sensor_state{domain=\"sensor\",'\n 'entity=\"sensor.text\",'\n 'friendly_name=\"Text\"} 0' not in body\n )\n\n assert (\n 'sensor_unit_text{domain=\"sensor\",'\n 'entity=\"sensor.text_unit\",'\n 'friendly_name=\"Text Unit\"} 0' not in body\n )", "async def test_no_sensors(hass, mock_bridge):\n mock_bridge.allow_groups = True\n mock_bridge.mock_sensor_responses.append({})\n await setup_bridge(hass, mock_bridge)\n assert len(mock_bridge.mock_requests) == 1\n assert len(hass.states.async_all()) == 0", "def test_no_source_measurements(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def test_temperatures_when_data_is_not_present(self):\n\n tt = TemperatureTracker()\n result = tt.temperatures()\n self.assertEqual(result, [])", "async def test_no_transactions(self):\n response = await self.collect(get_request_text=\"\")\n self.assert_measurement(response, value=\"0\")", "async def test_no_transactions(self):\n response = await self.collect(get_request_json_return_value={})\n self.assert_measurement(response, value=\"0\")", "async def test_no_transactions(self):\n response = await self.collect(get_request_json_return_value={})\n self.assert_measurement(response, value=\"0\")", "async def test_setup_missing_config(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: {\"platform\": DOMAIN}}\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0", "def test_get_device_detects_none(hass, mock_openzwave):\n node = MockNode()\n value = MockValue(data=0, node=node)\n values = MockEntityValues(primary=value, node=node)\n\n device = cover.get_device(hass=hass, node=node, values=values, node_config={})\n assert device is None", "def test_door_no_data(self):\n door = Door({})\n\n assert door.warning is None\n assert door.closed is None\n assert door.locked is None", "async def test_sensor_without_forecast(\n hass: HomeAssistant, entity_registry_enabled_by_default: None\n) -> None:\n await init_integration(hass)\n registry = er.async_get(hass)\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state == \"3200.0\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_ICON) == \"mdi:weather-fog\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfLength.METERS\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.DISTANCE\n\n entry = registry.async_get(\"sensor.home_cloud_ceiling\")\n assert entry\n assert entry.unique_id == \"0123456-ceiling\"\n assert entry.options[\"sensor\"] == {\"suggested_display_precision\": 0}\n\n state = hass.states.get(\"sensor.home_precipitation\")\n assert state\n assert state.state == \"0.0\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert (\n state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n == UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR\n )\n assert state.attributes.get(ATTR_ICON) is None\n assert state.attributes.get(\"type\") is None\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n assert (\n state.attributes.get(ATTR_DEVICE_CLASS)\n == SensorDeviceClass.PRECIPITATION_INTENSITY\n )\n\n entry = registry.async_get(\"sensor.home_precipitation\")\n assert entry\n assert entry.unique_id == \"0123456-precipitation\"\n\n state = hass.states.get(\"sensor.home_pressure_tendency\")\n assert state\n assert state.state == \"falling\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_ICON) == \"mdi:gauge\"\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.ENUM\n assert state.attributes.get(ATTR_STATE_CLASS) is None\n assert state.attributes.get(ATTR_OPTIONS) == [\"falling\", \"rising\", \"steady\"]\n\n entry = registry.async_get(\"sensor.home_pressure_tendency\")\n assert entry\n assert entry.unique_id == \"0123456-pressuretendency\"\n assert entry.translation_key == \"pressure_tendency\"\n\n state = hass.states.get(\"sensor.home_realfeel_temperature\")\n assert state\n assert state.state == \"25.1\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_realfeel_temperature\")\n assert entry\n assert entry.unique_id == \"0123456-realfeeltemperature\"\n\n state = hass.states.get(\"sensor.home_uv_index\")\n assert state\n assert state.state == \"6\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UV_INDEX\n assert state.attributes.get(\"level\") == \"High\"\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_uv_index\")\n assert entry\n assert entry.unique_id == \"0123456-uvindex\"\n\n state = hass.states.get(\"sensor.home_apparent_temperature\")\n assert state\n assert state.state == \"22.8\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_apparent_temperature\")\n assert entry\n assert entry.unique_id == \"0123456-apparenttemperature\"\n\n state = hass.states.get(\"sensor.home_cloud_cover\")\n assert state\n assert state.state == \"10\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n assert state.attributes.get(ATTR_ICON) == \"mdi:weather-cloudy\"\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_cloud_cover\")\n assert entry\n assert entry.unique_id == \"0123456-cloudcover\"\n\n state = hass.states.get(\"sensor.home_dew_point\")\n assert state\n assert state.state == \"16.2\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_dew_point\")\n assert entry\n assert entry.unique_id == \"0123456-dewpoint\"\n\n state = hass.states.get(\"sensor.home_realfeel_temperature_shade\")\n assert state\n assert state.state == \"21.1\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_realfeel_temperature_shade\")\n assert entry\n assert entry.unique_id == \"0123456-realfeeltemperatureshade\"\n\n state = hass.states.get(\"sensor.home_wet_bulb_temperature\")\n assert state\n assert state.state == \"18.6\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_wet_bulb_temperature\")\n assert entry\n assert entry.unique_id == \"0123456-wetbulbtemperature\"\n\n state = hass.states.get(\"sensor.home_wind_chill_temperature\")\n assert state\n assert state.state == \"22.8\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n\n entry = registry.async_get(\"sensor.home_wind_chill_temperature\")\n assert entry\n assert entry.unique_id == \"0123456-windchilltemperature\"\n\n state = hass.states.get(\"sensor.home_wind_gust_speed\")\n assert state\n assert state.state == \"20.3\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert (\n state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n == UnitOfSpeed.KILOMETERS_PER_HOUR\n )\n assert state.attributes.get(ATTR_ICON) is None\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.WIND_SPEED\n\n entry = registry.async_get(\"sensor.home_wind_gust_speed\")\n assert entry\n assert entry.unique_id == \"0123456-windgust\"\n\n state = hass.states.get(\"sensor.home_wind_speed\")\n assert state\n assert state.state == \"14.5\"\n assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION\n assert (\n state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n == UnitOfSpeed.KILOMETERS_PER_HOUR\n )\n assert state.attributes.get(ATTR_ICON) is None\n assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.WIND_SPEED\n\n entry = registry.async_get(\"sensor.home_wind_speed\")\n assert entry\n assert entry.unique_id == \"0123456-wind\"", "def test_light_no_data(self):\n light = Light({})\n\n assert light.warning is None\n assert light.off is None", "def test_no_el():\n levels = np.array([959., 867.9, 779.2, 647.5, 472.5, 321.9, 251.]) * units.mbar\n temperatures = np.array([22.2, 17.4, 14.6, 1.4, -17.6, -39.4, -52.5]) * units.celsius\n dewpoints = np.array([19., 14.3, -11.2, -16.7, -21., -43.3, -56.7]) * units.celsius\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_nan(el_pressure, levels.units)\n assert_nan(el_temperature, temperatures.units)", "def test_count_when_data_is_not_present(self):\n\n temp_data = []\n\n tt = TemperatureTracker()\n result = tt.count_from(temp_data)\n self.assertEqual(result, 0)", "def test_accept_missing_sources_as_tech_debt_expired(self):\n metric = Metric(\n self.DATA_MODEL,\n {\"addition\": \"sum\", \"type\": \"tests\", \"accept_debt\": True, \"debt_end_date\": \"2020-01-01\"},\n METRIC_ID,\n )\n measurement = self.measurement(metric)\n self.assertIsNone(measurement.status())", "def test_setup_with_invalid_config(self):\n setup_component(self.hass, \"sensor\", INVALID_CONFIG_MINIMAL)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is None", "async def test_view_empty_namespace(client, sensor_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert \"# HELP python_info Python platform information\" in body\n assert (\n \"# HELP python_gc_objects_collected_total \"\n \"Objects collected during gc\" in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.radio_energy\",'\n 'friendly_name=\"Radio Energy\"} 1.0' in body\n )\n\n assert (\n 'last_updated_time_seconds{domain=\"sensor\",'\n 'entity=\"sensor.radio_energy\",'\n 'friendly_name=\"Radio Energy\"} 86400.0' in body\n )", "def testPluginNoError(self):\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=0)\n task.run(cat, exposure)\n source = cat[0]\n self.assertFalse(source.get(self.algName + \"_flag\"))\n self.assertFalse(source.get(self.algName + \"_flag_containsNan\"))\n self.assertFalse(source.get(self.algName + \"_flag_edge\"))", "def test_accept_missing_sources_as_tech_debt(self):\n metric = Metric(self.DATA_MODEL, {\"addition\": \"sum\", \"type\": \"tests\", \"accept_debt\": True}, METRIC_ID)\n measurement = self.measurement(metric)\n self.assertEqual(\"debt_target_met\", measurement.status())", "def test_status_missing(self):\n measurement = Measurement(self.metric())\n self.assertEqual(None, measurement.status())", "def test_get_node_sensors(self):\n pass", "def test_empty_apply(self):\n dev = qml.device(\"orquestra.qiskit\", wires=2, analytic=False)\n\n assert dev.apply([]) is None", "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def test_no_telemetry(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n self.assertEqual([], json.loads(response.content))", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def test_watchdogs_no_relevant(self):\n\n # distillery with no categories\n distillery = Distillery.objects.get_by_natural_key('mongodb.test_database.test_docs')\n distillery.collection.insert = Mock(return_value=self.mock_doc_id)\n\n doc_id = distillery._save_and_send_signal(self.data)\n\n alerts = Alert.objects.all()\n self.assertEqual(alerts.count(), 0)\n self.assertEqual(doc_id, self.mock_doc_id)", "async def test_device_not_accessible(hass):\n with patch.object(axis.device, \"get_device\", side_effect=axis.errors.CannotConnect):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}", "def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)", "def test_getESD_no_model(self):\n\n\t\tesd = self.watcher.get_ESD(model=self.model, layer=self.second_layer)\n\t\tself.assertEqual(len(esd), 576)", "async def test_nr_of_missing_metrics_without_reports(self):\n self.set_source_parameter(\"reports\", [])\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )" ]
[ "0.69611347", "0.6781437", "0.6709309", "0.6360453", "0.63589764", "0.6296304", "0.6296304", "0.62603533", "0.62527657", "0.6221658", "0.62147963", "0.61684513", "0.6140223", "0.6115561", "0.6073571", "0.6066095", "0.60566604", "0.6052553", "0.60229397", "0.59628767", "0.59416056", "0.59282243", "0.592643", "0.5899918", "0.5891024", "0.58896667", "0.5877748", "0.5866361", "0.58648014", "0.5864418" ]
0.7260665
0
Test that adding a new binary sensor works.
async def test_add_new_binary_sensor(hass, aioclient_mock, mock_deconz_websocket): event_added_sensor = { "t": "event", "e": "added", "r": "sensors", "id": "1", "sensor": { "id": "Presence sensor id", "name": "Presence sensor", "type": "ZHAPresence", "state": {"presence": False}, "config": {"on": True, "reachable": True}, "uniqueid": "00:00:00:00:00:00:00:00-00", }, } await setup_deconz_integration(hass, aioclient_mock) assert len(hass.states.async_all()) == 0 await mock_deconz_websocket(data=event_added_sensor) await hass.async_block_till_done() assert len(hass.states.async_all()) == 1 assert hass.states.get("binary_sensor.presence_sensor").state == STATE_OFF
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_sensor_defaults_binary(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"binary_sensor.test_monitored\",\n \"state_characteristic\": \"count\",\n \"sampling_size\": 20,\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_BINARY:\n hass.states.async_set(\n \"binary_sensor.test_monitored\",\n value,\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == str(len(VALUES_BINARY))\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None\n assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT\n assert state.attributes.get(\"buffer_usage_ratio\") == round(9 / 20, 2)\n assert state.attributes.get(\"source_value_valid\") is True\n assert \"age_coverage_ratio\" not in state.attributes", "async def test_binary_sensor(client, binary_sensor_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert (\n 'binary_sensor_state{domain=\"binary_sensor\",'\n 'entity=\"binary_sensor.door\",'\n 'friendly_name=\"Door\"} 1.0' in body\n )\n\n assert (\n 'binary_sensor_state{domain=\"binary_sensor\",'\n 'entity=\"binary_sensor.window\",'\n 'friendly_name=\"Window\"} 0.0' in body\n )", "def test_add_sensor(self):\n #empty platform\n p2 = Platform(\"platform 2\", \"p2\", [], [], [])\n\n # procedure object\n proc1 = Procedure(\"procedure 1\", \"proc1\")\n proc2 = Procedure(\"procedure 2\", \"proc2\")\n # list of procedures\n proList = [proc1, proc2]\n # observable property object\n obs1 = ObservableProperty(\"obs-property1\", \"obs-property\")\n obs2 = ObservableProperty(\"obs-property2\", \"obs-property2\")\n obs3 = ObservableProperty(\"obs-property3\", \"obs-property3\")\n # list of observable properties\n obsList = [obs1, obs2]\n\n # sensor object\n s1 = Sensor(\"Sensor 1\", \"first sensor\", obsList, proList)\n\n p2.add_sensor(s1)", "def test_add_device(self):\n\n pass", "def test_binary_sensor(self):\n with patch.dict(TYPES, {'BinarySensor': self.mock_type}):\n state = State('binary_sensor.opening', 'on',\n {ATTR_DEVICE_CLASS: 'opening'})\n get_accessory(None, state, 2, {})", "async def test_add_new_binary_sensor_ignored(\n hass, aioclient_mock, mock_deconz_websocket\n):\n sensor = {\n \"name\": \"Presence sensor\",\n \"type\": \"ZHAPresence\",\n \"state\": {\"presence\": False},\n \"config\": {\"on\": True, \"reachable\": True},\n \"uniqueid\": \"00:00:00:00:00:00:00:00-00\",\n }\n event_added_sensor = {\n \"t\": \"event\",\n \"e\": \"added\",\n \"r\": \"sensors\",\n \"id\": \"1\",\n \"sensor\": sensor,\n }\n\n config_entry = await setup_deconz_integration(\n hass,\n aioclient_mock,\n options={CONF_MASTER_GATEWAY: True, CONF_ALLOW_NEW_DEVICES: False},\n )\n\n assert len(hass.states.async_all()) == 0\n\n await mock_deconz_websocket(data=event_added_sensor)\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 0\n assert not hass.states.get(\"binary_sensor.presence_sensor\")\n\n entity_registry = er.async_get(hass)\n assert (\n len(async_entries_for_config_entry(entity_registry, config_entry.entry_id)) == 0\n )\n\n aioclient_mock.clear_requests()\n data = {\"groups\": {}, \"lights\": {}, \"sensors\": {\"1\": sensor}}\n mock_deconz_request(aioclient_mock, config_entry.data, data)\n\n await hass.services.async_call(DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH)\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 1\n assert hass.states.get(\"binary_sensor.presence_sensor\")", "async def test_binary_sensor(\n hass: HomeAssistant, requests_mock: requests_mock.Mocker\n) -> None:\n with patch(\n \"ring_doorbell.Ring.active_alerts\",\n return_value=[\n {\n \"kind\": \"motion\",\n \"doorbot_id\": 987654,\n \"state\": \"ringing\",\n \"now\": time(),\n \"expires_in\": 180,\n }\n ],\n ):\n await setup_platform(hass, \"binary_sensor\")\n\n motion_state = hass.states.get(\"binary_sensor.front_door_motion\")\n assert motion_state is not None\n assert motion_state.state == \"on\"\n assert motion_state.attributes[\"device_class\"] == \"motion\"\n\n ding_state = hass.states.get(\"binary_sensor.front_door_ding\")\n assert ding_state is not None\n assert ding_state.state == \"off\"", "async def test_airzone_create_binary_sensors(hass: HomeAssistant) -> None:\n\n await async_init_integration(hass)\n\n # Systems\n state = hass.states.get(\"binary_sensor.system_1_problem\")\n assert state.state == STATE_OFF\n\n # Zones\n state = hass.states.get(\"binary_sensor.despacho_air_demand\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.despacho_battery_low\")\n assert state.state == STATE_ON\n\n state = hass.states.get(\"binary_sensor.despacho_floor_demand\")\n assert state is None\n\n state = hass.states.get(\"binary_sensor.despacho_problem\")\n assert state.state == STATE_ON\n assert state.attributes.get(\"errors\") == [API_ERROR_LOW_BATTERY]\n\n state = hass.states.get(\"binary_sensor.dorm_1_air_demand\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_1_battery_low\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_1_floor_demand\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_1_problem\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_2_air_demand\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_2_battery_low\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_2_floor_demand\")\n assert state is None\n\n state = hass.states.get(\"binary_sensor.dorm_2_problem\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_ppal_air_demand\")\n assert state.state == STATE_ON\n\n state = hass.states.get(\"binary_sensor.dorm_ppal_battery_low\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dorm_ppal_floor_demand\")\n assert state.state == STATE_ON\n\n state = hass.states.get(\"binary_sensor.dorm_ppal_problem\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.salon_air_demand\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.salon_battery_low\")\n assert state is None\n\n state = hass.states.get(\"binary_sensor.salon_floor_demand\")\n assert state is None\n\n state = hass.states.get(\"binary_sensor.salon_problem\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.airzone_2_1_battery_low\")\n assert state is None\n\n state = hass.states.get(\"binary_sensor.airzone_2_1_problem\")\n assert state.state == STATE_OFF\n\n state = hass.states.get(\"binary_sensor.dkn_plus_battery_low\")\n assert state is None\n\n state = hass.states.get(\"binary_sensor.dkn_plus_problem\")\n assert state.state == STATE_OFF", "def test_create_device_data(self):\n pass", "async def test_switch_added(hass: HomeAssistant, mock_bridge_v2) -> None:\n await mock_bridge_v2.api.load_test_data([FAKE_DEVICE, FAKE_ZIGBEE_CONNECTIVITY])\n\n await setup_platform(hass, mock_bridge_v2, \"switch\")\n\n test_entity_id = \"switch.hue_mocked_device_motion\"\n\n # verify entity does not exist before we start\n assert hass.states.get(test_entity_id) is None\n\n # Add new fake entity (and attached device and zigbee_connectivity) by emitting events\n mock_bridge_v2.api.emit_event(\"add\", FAKE_BINARY_SENSOR)\n await hass.async_block_till_done()\n\n # the entity should now be available\n test_entity = hass.states.get(test_entity_id)\n assert test_entity is not None\n assert test_entity.state == \"on\"\n\n # test update\n updated_resource = {**FAKE_BINARY_SENSOR, \"enabled\": False}\n mock_bridge_v2.api.emit_event(\"update\", updated_resource)\n await hass.async_block_till_done()\n test_entity = hass.states.get(test_entity_id)\n assert test_entity is not None\n assert test_entity.state == \"off\"", "async def async_add_binary_sensor(mac):\n if USB_MOTION_ID in api_stick.devices[mac].features:\n _LOGGER.debug(\"Add binary_sensors for %s\", mac)\n async_add_entities([USBBinarySensor(api_stick.devices[mac])])\n\n # Register services\n platform.async_register_entity_service(\n SERVICE_CONFIGURE_SCAN,\n {\n vol.Required(ATTR_SCAN_SENSITIVITY_MODE): vol.In(\n SCAN_SENSITIVITY_MODES\n ),\n vol.Required(ATTR_SCAN_RESET_TIMER): vol.All(\n vol.Coerce(int), vol.Range(min=1, max=240)\n ),\n vol.Required(ATTR_SCAN_DAYLIGHT_MODE): cv.boolean,\n },\n \"_service_configure_scan\",\n )\n platform.async_register_entity_service(\n SERVICE_CONFIGURE_BATTERY,\n {\n vol.Required(ATTR_SED_STAY_ACTIVE): vol.All(\n vol.Coerce(int), vol.Range(min=1, max=120)\n ),\n vol.Required(ATTR_SED_SLEEP_FOR): vol.All(\n vol.Coerce(int), vol.Range(min=10, max=60)\n ),\n vol.Required(ATTR_SED_MAINTENANCE_INTERVAL): vol.All(\n vol.Coerce(int), vol.Range(min=5, max=1440)\n ),\n vol.Required(ATTR_SED_CLOCK_SYNC): cv.boolean,\n vol.Required(ATTR_SED_CLOCK_INTERVAL): vol.All(\n vol.Coerce(int), vol.Range(min=60, max=10080)\n ),\n },\n \"_service_configure_battery_savings\",\n )", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def test_create_device1(self):\n pass", "def test_upload_binary(self):\n uploadFile = os.path.join(testdatadir, \"upload.data.gz\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED", "def test_add_devicedata(client):\n client.socketio.emit('devicedata', {'data': '1500000010,1|12,22,1'})\n\n res = client.socketio.get_received()\n\n message_exist = False\n for message in res:\n if message['name'] == 'status':\n message_exist = True\n assert message['args'][0]['id'] == 1\n assert message['args'][0]['time'] == '2017-07-14T02:40:10+00:00'\n assert message['args'][0]['data'] == {\n 'control': True,\n 'test1': 12.0,\n 'test2': 22.0\n }\n assert message_exist is True", "def test_artefact_add() -> None:\n options()\n server = MockServer()\n db, store = server.new_connection()\n\n a = _graph.constant_artefact(db, store, b\"bla bla\")\n b = _graph.Artefact[bytes].grab(db, a.hash)\n c = _graph.get_data(db, store, a)\n assert b is not None\n assert a == b\n assert c == b\"bla bla\"", "def test_device_registration(self):\n sensor = self._get_dummy_sensor()\n responses = []\n sensor.set_response_callback(func=lambda response: responses.append(response))\n sensor.register()\n time.sleep(wait_seconds)\n for response in responses:\n print(json.loads(response.content.decode()))\n assert len(responses) > 0\n assert json.loads(responses[0].content.decode())['module_name'] == 'test_get_sensor'\n sensor.stopped.set()", "async def test_binary_sensors(hass: HomeAssistant, surepetcare) -> None:\n assert await async_setup_component(hass, DOMAIN, MOCK_CONFIG)\n await hass.async_block_till_done()\n\n entity_registry = er.async_get(hass)\n state_entity_ids = hass.states.async_entity_ids()\n\n for entity_id, unique_id in EXPECTED_ENTITY_IDS.items():\n assert entity_id in state_entity_ids\n state = hass.states.get(entity_id)\n assert state\n assert state.state == \"on\"\n entity = entity_registry.async_get(entity_id)\n assert entity.unique_id == unique_id", "def test_update_bios_unit(self):\n pass", "async def test_new_sensor_discovered(hass, mock_bridge):\n mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)\n\n await setup_bridge(hass, mock_bridge)\n assert len(mock_bridge.mock_requests) == 1\n assert len(hass.states.async_all()) == 6\n\n new_sensor_response = dict(SENSOR_RESPONSE)\n new_sensor_response.update({\n \"7\": PRESENCE_SENSOR_3_PRESENT,\n \"8\": LIGHT_LEVEL_SENSOR_3,\n \"9\": TEMPERATURE_SENSOR_3,\n })\n\n mock_bridge.mock_sensor_responses.append(new_sensor_response)\n\n # Force updates to run again\n sm_key = hue_sensor_base.SENSOR_MANAGER_FORMAT.format('mock-host')\n sm = hass.data[hue.DOMAIN][sm_key]\n await sm.async_update_items()\n\n # To flush out the service call to update the group\n await hass.async_block_till_done()\n\n assert len(mock_bridge.mock_requests) == 2\n assert len(hass.states.async_all()) == 9\n\n presence = hass.states.get('binary_sensor.bedroom_sensor_motion')\n assert presence is not None\n assert presence.state == 'on'\n temperature = hass.states.get('sensor.bedroom_sensor_temperature')\n assert temperature is not None\n assert temperature.state == '17.75'", "def test_0_0_create(self):\n\n self.assertTrue(self.b1)", "def test_add():\n data = io.create_sample_Dataset()\n tmp = data + data\n assert tmp[\"u\"][0, 0, 0] == 2.0", "async def test_all_bit_sensor(hass, cfg, regs, expected):\n sensor_name = \"modbus_test_sensor\"\n state = await base_test(\n hass,\n {\n CONF_NAME: sensor_name,\n CONF_ADDRESS: 1234,\n CONF_BIT_NUMBER: 5,\n **cfg,\n },\n sensor_name,\n SENSOR_DOMAIN,\n CONF_BIT_SENSORS,\n CONF_INPUTS,\n regs,\n expected,\n method_discovery=True,\n scan_interval=5,\n )\n assert state == expected", "def test_create_study(self):\n study_spec = sample_study_spec()\n\n now = datetime.datetime.now()\n study_id = self.storage.create_study(study_spec)\n # Study ID must be a 32 char string.\n self.assertLen(study_id, 32)\n self.assertEqual(study_id, study_spec.id)\n self.assertGreaterEqual(study_spec.creation_time.ToDatetime(), now)\n\n # Read back the study and check that it is the same.\n self.assertEqual(self.storage.get_study(study_id), study_spec)", "def test_analog_write_successfull_write_operation(self):\n resp = json.loads(bolt.analogWrite(self.ANALOG_WRITE_PIN,\n self.ANALOG_WRITE_VALUE))\n self.assertEqual(resp[\"success\"], self.SUCCESS_RESPONSE)\n self.assertEqual(resp[\"value\"], self.SUCCESS_RESPONSE)", "def test_create_hardwareinjection(self):\n \"\"\"sim-inj.xml\"\"\"\n time.sleep(SLEEP_TIME)\n eventFile = os.path.join(testdatadir, \"sim-inj.xml\")\n hardwareinjection_event = gracedb.createEvent(\n \"Test\", \"HardwareInjection\", eventFile,\n instrument=\"H1\", source_channel=\"\",\n destination_channel=\"\").json()\n self.assertEqual(hardwareinjection_event['group'], \"Test\")\n self.assertEqual(hardwareinjection_event['pipeline'], \"HardwareInjection\")\n self.assertEqual(hardwareinjection_event['instruments'], \"H1\")", "def test_version_sensor(self):\n config = {\"sensor\": {\"platform\": \"version\"}}\n\n assert setup_component(self.opp, \"sensor\", config)", "def test_lsusb_binary_object_store(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_binary_object_store, quiet=True), self.generic_lsusb_binary_object_store_json)", "def test_WINFF_FT_S_REG_19(self):\n\n # Save sas version\n version = self._sas._sas_version\n # Use higher than supported version\n self._sas._sas_version = 'v2.0'\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n try:\n # Register\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 100)\n self.assertFalse('cbsdId' in response)\n except AssertionError as e:\n # Allow HTTP status 404\n self.assertEqual(e.args[0], 404)\n finally:\n # Put sas version back\n self._sas._sas_version = version" ]
[ "0.70032024", "0.6901163", "0.68574667", "0.6848556", "0.66131777", "0.6590479", "0.634464", "0.6333184", "0.6317183", "0.62723935", "0.6238028", "0.623732", "0.623732", "0.61875206", "0.61809593", "0.61583835", "0.6148876", "0.61349654", "0.61256504", "0.6102588", "0.6078365", "0.6056995", "0.6055356", "0.60474354", "0.6046181", "0.59710294", "0.59602535", "0.59591645", "0.59525293", "0.59493804" ]
0.769578
0
close the socket and remove the file
def close_client_socket(client_socket, client_socket_file): client_socket.close() if os.path.exists(client_socket_file): os.remove(client_socket_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\r\n self._fileobjclosed = True\r\n self._sockobj.close()", "def close(self):\r\n self.rfile.close()\r\n self.sock_chan.close()", "def cleanup(self):\n self._socket.close()\n os.remove(_get_control_socket_path())", "def __del__(self):\n self.connection.close()\n filename = self.fname.split(\"/\")[-1]\n print(f\"Connection to file {filename} closed.\")", "def _cleanupSocket(self, sock):\n sock.close()", "def __del__(self):\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()", "def unlink(self,):\n self._wait()\n self.fd.close()\n self.fd = None\n os.unlink(self.fname)", "def delete(self, host, file):", "def cleanup(self):\n self.sock.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n if self.socket:\n self.socket.close()\n self.socket = None", "def close(self):\n if self.socket:\n self.socket.close()\n self.socket = None", "def stop(self):\n self.logger.info('Close socket')\n self.sock.close()", "def server_close(self):\n\t\tself.socket.close()", "def __del__(self):\n self.file.close()", "def close(self) -> None:\n os.remove(self.FILE_NAME)", "def close(self):\n self._sock.close()\n self._sock = None", "def closesock(s):\n s.close()\n s = None", "def __del__(self):\r\n if not self.is_dummy:\r\n self.socket.close()", "def teardown(self):\n if self.__socket:\n self.__socket.close()", "def delete(self):\n with self.write():\n os.remove(self.filename)\n os.remove(self.proxy)", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def Close(self):\n self._sock.close()", "def __del__(self):\n self.f.close()", "def shutdown(self):\n self.sock.close()" ]
[ "0.7515783", "0.73575044", "0.7349963", "0.7186087", "0.7166717", "0.6988371", "0.6986388", "0.6879862", "0.68531454", "0.6772425", "0.6772425", "0.6772425", "0.6772425", "0.6772425", "0.6772425", "0.6757314", "0.6757314", "0.67505753", "0.67467415", "0.6740791", "0.67257035", "0.6714182", "0.66227525", "0.66147673", "0.6613784", "0.65934694", "0.65897965", "0.65851456", "0.65848213", "0.65759957" ]
0.7450703
1
Annotates single cell types at each level and adds the result to the object.
def getCellTypes(self): sc_data = Utils.convertAnnDataToDf(self.sc_data) try: self.sc_annot, self.de_dict = Annotate.annotateTree(sc_data, self.refDataset, self.refAnnot) except: print("Columns of annotations should be cell type levels. Additionally, higher levels should contain lower levels bound with ':'. Example structure; level1 (including B-cells), level2 (including B-cells:Naive)")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_annotations(annot_tuples, ref_data, annot_type):\n\n for annot in ref_data.annotations.select_type(annot_type):\n annot_begin, annot_end = annot.spans[0]\n annot_tuples.append((annot_begin, annot_end, annot.id))", "def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)", "def iterate_types(self) -> Iterator[FakeAnnotation]:\n yield from self.type_annotation.iterate_types()", "def annotations(cls, types: dict) -> dict:\n return {\n name: Optional[cls.type_map[types[name]]]\n for name in types\n if types[name] in cls.type_map\n }", "def GetCellTypes(self):\n if not self.VTKObject.GetCellTypesArray():\n return None\n return vtkDataArrayToVTKArray(\n self.VTKObject.GetCellTypesArray(), self)", "def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)", "def add_sub_hierarchy(outdoc, type_hierarchy, depth=0, indent_step=' '):\n for k, v in type_hierarchy.items():\n type_list_item = indent_step*depth + '* '\n type_list_item += outdoc.get_reference(RSTSectionLabelHelper.get_section_label(k), k)\n type_list_item += outdoc.newline\n outdoc.add_text(type_list_item)\n if len(v) > 0:\n outdoc.add_text(outdoc.newline)\n add_sub_hierarchy(outdoc=outdoc,\n type_hierarchy=v,\n depth=depth+1,\n indent_step=indent_step)", "def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types", "def resolve_typerefs(leaf, types, min):\n for attr in type_refs[leaf.leaf_type]:\n ref = getattr(leaf, attr)\n if isinstance(ref, list):\n newrefs = []\n for r in ref:\n if r < min:\n newrefs.append(base_type._decode(r,{}))\n else:\n newrefs.append(types[r])\n newrefs = ListContainer(newrefs)\n setattr(leaf, attr, newrefs)\n else:\n if ref < min:\n setattr(leaf, attr, base_type._decode(ref,{}))\n elif ref >= min:\n try:\n setattr(leaf, attr, types[ref])\n except KeyError: pass\n return leaf", "def build_coverage_index(\n self,\n data_pack: DataPack,\n outer_type: Type[Annotation],\n inner_type: Type[EntryType]):\n if not isinstance(inner_type, (Annotation, Link, Group)):\n raise ValueError(f\"Do not support coverage index for {inner_type}.\")\n\n if not self.coverage_index_is_valid:\n self._coverage_index = dict()\n\n # prevent the index from being used during construction\n self.deactivate_coverage_index()\n\n self._coverage_index[(outer_type, inner_type)] = dict()\n for range_annotation in data_pack.get_entries_by_type(outer_type):\n if isinstance(range_annotation, Annotation):\n entries = data_pack.get(inner_type, range_annotation)\n entry_ids = {e.tid for e in entries}\n self._coverage_index[\n (outer_type, inner_type)][range_annotation.tid] = entry_ids\n\n self.activate_coverage_index()", "def cellAnalysis(celltypelist, fullcsvpaths):\n typelist, paths = [], []\n with open(celltypelist, 'r') as fIn:\n for line in fIn:\n typelist.append(line.strip().split(','))\n with open(fullcsvpaths, 'r') as fIn:\n for line in fIn:\n paths.append(line.strip())\n \n # Create the default dicts\n types = list(set([p[0] for p in typelist]))\n groups = list(set([p[2] for p in typelist]))\n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {typ: {ch: {gr: {} for gr in groups} for ch in checks} for typ in types}\n # Add a few more keys\n for typ in types:\n props[typ]['activity'] = {gr: {} for gr in groups}\n props[typ]['duration'] = {gr: {} for gr in groups}\n \n # Find the matching csv files\n paths = [p for p in paths if p.split('_')[-1]=='clusters.csv'] # If it's a clusters file\n reffils = [f.split('/')[-1].split('_')[0].split('.')[0] for f in paths] # ref to cluster file\n typepaths = []\n #print(\n \n for fil in typelist:\n t_ = fil[1].split('.')[0]\n if t_ in reffils:\n typepaths.append(paths[reffils.index(t_)])\n else:\n typepaths.append('none')\n \n # Populate the dictionary\n fail, success = [], []\n print('%i (of %i) files seem to be present' %(len(typepaths)-typepaths.count('none'),\n len(typepaths)))\n for g in range(len(typepaths)): # This retains the order of typelist\n try:\n df = pd.read_csv(typepaths[g])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n type_ = typelist[g][0]\n group_ = typelist[g][2]\n cell_ = typelist[g][1].split('.')[0]\n for clust in range(numClusts):\n props[type_][ch][group_][cell_].append(df[df['clust_inds']==clust][ch].dropna().values)\n else: # Just one cluster\n for ch in checks:\n props[type_][ch][group_][cell_] = [df[ch].dropna().values]\n \n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props[type_]['activity'][group_][cell_] = [tIn, cBouts]\n props[type_]['duration'][group_][cell_] = df.times.iloc[-1]\n success.append(typelist[g])\n \n except:\n fail.append(typelist[g])\n \n #print(failed)\n return props, success, fail", "def associate_node_types_settings(df, tr, root_attr_cols, node=\"\"):\n node_type_cols, node_attr_dict = get_node_types_attrs(\n df=df,\n node=node,\n root_node_type=tr.get_root_node(),\n root_attr_columns=root_attr_cols,\n )\n node_types = {col for col in node_type_cols}\n\n settings = []\n\n for node_type in node_type_cols:\n path_val, settings_val = tr.get_uml_settings(node_key=node_type)\n if settings_val:\n if \"id\" in settings_val:\n settings_value = get_setting_node_name_from_df(\n df=df, column=settings_val.split(\"-\")[-1], node=node\n )\n settings.extend(\n [{path_val: value} for value in settings_value]\n )\n elif isinstance(settings_val, list) and any(\n \"id\" in item for item in settings_val\n ): # TODO: Test This\n id_calls = [\n id.split(\"-\")[-1]\n for id in filter(lambda x: \"id\" in x, settings_val)\n ]\n for col in id_calls:\n settings_value = get_setting_node_name_from_df(\n df=df, column=col, node=node\n )\n settings.extend(\n [{path_val: [value]} for value in settings_value]\n )\n else:\n settings.append({path_val: settings_val})\n else:\n settings = []\n\n type_setting_dict = {\n \"settings\": settings,\n \"node_types\": list(node_types),\n \"attributes\": node_attr_dict,\n }\n return type_setting_dict", "def celltypesref(pl):\n ppl = set(pl)\n refnd = dict(zip(ppl, range(len(ppl))))\n return refnd", "def _type_data(self, row, index):\n typed_row = []\n for i, (entry, entry_type) in enumerate(zip(row, self.column_types)):\n try:\n typed_row.append(entry_type(entry))\n except ValueError:\n if (entry_type == int or entry_type == float) and self.missing_to_zero:\n typed_row.append(entry_type(0))\n else:\n if [i + 1, index + 2, entry, entry_type] not in self.invalid_typed and self.print_warnings:\n self.invalid_typed.append([i + 1, index + 2, entry, entry_type])\n typed_row.append(entry)\n return typed_row", "def getTreePlot(self):\n try:\n Utils.plotTree(self.sc_annot)\n except:\n print(\"Please run getCellTypes first to get cell annotations. This step is needed for plotting.\")", "def baselineStats(cells, show=True):\n # All by genotype\n props, glist, clist = {'Rm': {}, 'Cm': {}, 'Ra' : {}}, [], []\n for c in cells.keys():\n gen_, typ_ = cells[c]['genotype'], cells[c]['type']\n glist.append(gen_)\n clist.append(typ_)\n for pr in props.keys():\n if gen_ not in props[pr].keys():\n props[pr][gen_] = {}\n if typ_ not in props[pr][gen_].keys():\n props[pr][gen_][typ_] = []\n \n # Now that the dict is prepared, add the info\n temp_ = simpleProp(cells[c], where=-1)\n for k in temp_.keys():\n try:\n props[k][gen_][typ_].append(float(temp_[k]))\n except:\n print(k, gen_, typ_)\n \n # Round out the dictionaries\n for p in props.keys():\n for g in glist:\n for c in clist:\n try:\n _ = props[p][g][c]\n except:\n props[p][g][c] = []\n \n # Some basic plotting\n if show:\n genoByCell(props)\n return props", "def flatten(self, single_layer=None, single_datatype=None, single_texttype=None):\n self.labels = self.get_labels()\n if single_layer is not None and single_datatype is not None:\n for lbl in self.labels:\n lbl.layer = single_layer\n lbl.texttype = single_texttype\n elif single_layer is not None:\n for lbl in self.labels:\n lbl.layer = single_layer\n elif single_datatype is not None:\n for lbl in self.labels:\n lbl.texttype = single_texttype\n self.polygons = self.get_polygonsets()\n self.paths = self.get_paths()\n if single_layer is not None and single_datatype is not None:\n for poly in self.polygons:\n poly.layers = [single_layer] * len(poly.polygons)\n poly.datatypes = [single_datatype] * len(poly.polygons)\n for path in self.paths:\n path.layers = [single_layer] * path.n\n path.datatypes = [single_datatype] * path.n\n elif single_layer is not None:\n for poly in self.polygons:\n poly.layers = [single_layer] * len(poly.polygons)\n for path in self.paths:\n path.layers = [single_layer] * path.n\n elif single_datatype is not None:\n for poly in self.polygons:\n poly.datatypes = [single_datatype] * len(poly.polygons)\n for path in self.paths:\n path.datatypes = [single_datatype] * path.n\n self.references = []\n return self", "def df_to_cell_types(df, cell_types):\n df_cum_sums = df[cell_types].cumsum(axis=1)\n\n df_c = df.copy()\n\n for i in df_cum_sums.columns:\n df_c[i] = df_cum_sums[i]\n\n cell_types_mapped = defaultdict(list)\n for i_index, i in enumerate(cell_types):\n for j_index, j in df_c.iterrows():\n start_ind = 0 if i_index == 0 else j[cell_types[i_index - 1]]\n end_ind = j[i]\n cell_types_mapped[i].extend(j['centroids'][start_ind:end_ind].tolist())\n return cell_types_mapped", "def _inferred_type_levels(self) -> list[str]:\n return [i.inferred_type for i in self.levels]", "def map_to_per_etype(self, ids): # -> None:\n ...", "def print_itype_tree(node, one_handers=None, two_handers=None, current_depth=0):\n if not node:\n return\n\n output_str = \" \" * (4 * current_depth) + node.code\n\n if one_handers and node.code in one_handers:\n output_str = Fore.GREEN + output_str + \" <-- 1h\" + Fore.RESET\n elif two_handers and node.code in two_handers:\n output_str = Fore.CYAN + output_str + \" <-- 2h\" + Fore.RESET\n\n print(output_str)\n\n for child in node.children:\n print_itype_tree(child, one_handers, two_handers, current_depth + 1)", "def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'", "def convert_type(self):\r\n tree = self.substitute.tree_\r\n classes = self.substitute.classes_\r\n node_list = []\r\n for i in range(tree.capacity):\r\n if tree.feature[i] == -2:\r\n node_list.append(Node(label=classes[np.argmax(tree.value[i, 0, :])]))\r\n else:\r\n node_list.append(Node(tree.feature[i], tree.threshold[i]))\r\n for i in range(tree.capacity):\r\n if tree.children_left[i] != -1:\r\n node_list[i].left = node_list[tree.children_left[i]]\r\n node_list[tree.children_left[i]].parents.append(node_list[i]) if node_list[i] not in node_list[tree.children_left[i]].parents else node_list[tree.children_left[i]].parents\r\n if tree.children_right[i] != -1:\r\n node_list[i].right = node_list[tree.children_right[i]]\r\n node_list[tree.children_right[i]].parents.append(node_list[i]) if node_list[i] not in node_list[tree.children_right[i]].parents else node_list[tree.children_right[i]].parents\r\n return node_list[0]", "def cellannotation2ID(self, annotation_type):\n annotations = list(self.adata.obs[annotation_type])\n annotations_set = sorted(set(annotations))\n \n mapping = {a:idx for idx,a in enumerate(annotations_set)}\n \n truth_labels = [mapping[a] for a in annotations]\n self.adata.obs['label'] = pd.Categorical(values=truth_labels)\n #18m-unannotated\n # \n return mapping", "def _get_annotation_class_attr(self, index, el):\r\n\r\n attr = {}\r\n cls = ['annotatable-span', 'highlight']\r\n highlight_key = 'highlight'\r\n color = el.get(highlight_key)\r\n\r\n if color is not None:\r\n if color in self.highlight_colors:\r\n cls.append('highlight-' + color)\r\n attr['_delete'] = highlight_key\r\n attr['value'] = ' '.join(cls)\r\n\r\n return {'class': attr}", "def map_to_per_ntype(self, ids): # -> None:\n ...", "def index_rtypes(self):\n metric = self.index_metric\n out = {}\n for fnode in self.nodes.values():\n # only consider outgoing relationships because looping over\n # all object anyways, so will cover everything\n\n for (rtype, dest) in fnode.outgoing_relations:\n dnode = self.nodes[dest]\n\n # merge outgoing and attributes - distinction should not change\n # how vectors are formed\n a1 = fnode.rtypes | fnode.atypes\n b1 = dnode.rtypes | dnode.atypes\n c1 = a1 - b1\n d1 = b1 - a1\n e1 = b1 & a1\n f1 = b1 ^ a1\n g1 = b1 | a1\n\n # merge outgoing and attributes - distinction should not change\n # how vectors are formed\n #a2 = {b for a,b in fnode.outgoing_relations} | {b for a,b in fnode.attributes}\n #b2 = {b for a,b in dnode.outgoing_relations} | {b for a,b in dnode.attributes}\n #c2 = a2 - b2\n #d2 = b2 - a2\n #e2 = b2 & a2\n #f2 = b2 ^ a2\n #g2 = b2 | a2\n\n rval = out.setdefault(rtype, NULL_VEC())\n\n \"\"\"\n TODO: add similarity measure between node and prototype nodes\n\n Idea is to get a ground-truth value for the rtype by measuring\n how src --<rtype>--> dest compares to prototype transformations\n\n \n \n \"\"\"\n\n #types only\n score = np.array([metric(a1, b1),\n metric(a1, c1),#1\n metric(a1, e1),#2\n metric(a1, f1),#3\n metric(a1, g1),#4\n metric(b1, d1),#1\n metric(b1, e1),#2\n metric(b1, f1),#3\n metric(b1, g1),#4\n metric(c1, d1),\n metric(c1, f1),#5\n metric(d1, f1),#5\n metric(f1, g1),\n ],dtype=np.float)\n\n \n #types and objects\n #score = np.array([metric(a1, b1),\n # metric(a1, c1),\n # metric(a1, e1),\n # metric(a1, f1),\n # metric(a1, g1),\n # metric(b1, d1),\n # metric(b1, e1),\n # metric(b1, f1),\n # metric(b1, g1),\n # metric(c1, d1),\n # metric(c1, f1),\n # metric(c1, c2),\n # metric(c1, e2),\n # metric(d1, f1),\n # metric(d1, d2),\n # metric(d1, e2),\n # metric(f1, g1),\n # metric(a2, b2),\n # metric(a2, c2),\n # metric(a2, e2),\n # metric(a2, f2),\n # metric(a2, g2),\n # metric(b2, d2),\n # metric(b2, e2),\n # metric(b2, f2),\n # metric(b2, g2),\n # metric(c2, f2),\n # metric(d2, f2),\n # metric(f2, g2)],dtype=np.float)\n\n out[rtype] = rval + score\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n\n \n #with open(\"rrw.pkl\",\"wb+\") as f:\n # pickle.dump(out, f, -1)\n\n #normalize everything\n for r,v in out.items():\n #out[r] = v / max(v)\n out[r] = v / sqrt(v.dot(v))\n #out[r] = softmax(v/maxm)\n #out[r] = softmax(v/max(v))\n #out[r] = softmax((v-avg)/maxm)\n\n #for debugging purposes\n #np.save(\"utils/vectest.npy\",np.array(list(out.values())))\n \n\n '''\n rcount = self.usage_counts\n vs1 = {}\n for rtype, vec in out.items():\n vs1[rtype] = softmax(vec/rcount[rtype])\n\n data = np.array(list(vs1.values()))\n d2 = data - PCA(data, 1)#eliminate projection onto first principal component\n\n for i,v in enumerate(vs1):#iteration order is preserved\n #rescale output\n out[v] = softmax(d2[i]/rcount[v])\n '''\n return out", "def collect_level_info(annotation):\n iscrowd = 0 if annotation['legible'] else 1\n vertices = np.array(annotation['vertices'])\n polygon = Polygon(vertices)\n area = polygon.area\n min_x, min_y, max_x, max_y = polygon.bounds\n bbox = [min_x, min_y, max_x - min_x, max_y - min_y]\n segmentation = [i for j in vertices for i in j]\n anno = dict(\n iscrowd=iscrowd,\n category_id=1,\n bbox=bbox,\n area=area,\n segmentation=[segmentation])\n return anno", "def get_dtypes_for_group_annots(header: List, annot_types: List):\n group_dtypes = {}\n for annotation, annot_type in zip(header, annot_types):\n if annot_type != \"numeric\":\n group_dtypes[annotation] = np.str\n return group_dtypes", "def type_hint_annotations(self) -> list[FakeAnnotation]:\n result: list[FakeAnnotation] = []\n for child in self.children:\n if child.type_annotation.get_local_types():\n result.append(child.type_annotation)\n return result" ]
[ "0.53548366", "0.49283662", "0.49159172", "0.48532856", "0.4844311", "0.48404062", "0.48219582", "0.48180592", "0.47917625", "0.47327456", "0.47157717", "0.4695715", "0.46835762", "0.46732008", "0.46381748", "0.4630978", "0.45994526", "0.4596766", "0.4594033", "0.45796517", "0.45618016", "0.45547435", "0.45544714", "0.4552222", "0.45499066", "0.45406383", "0.45387602", "0.4536929", "0.45342925", "0.45319942" ]
0.6900952
0
Finds marker genes and adds the result to the object.
def getMarkerGenes(self): try: self.marker_genes = Utils.findMarkers(self.sc_annot, self.de_dict) except: print("Please run getCellTypes first to get cell annotations. This step is needed for marker gene finding.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allmarkers(repo):\n for markerdata in repo.obsstore:\n yield marker(repo, markerdata)", "def collectMarkers(self, ingeno):\n with open(ingeno,'r') as fin:\n for line in fin:\n if line.startswith('#'):\n l = line.strip('#').strip().split()\n for i,e in enumerate(l):\n self.mark[e] = {'chrom':'0',\n 'pos':i,\n 'alleles': [],\n 'rank':i}\n self.marklist.append(e)\n break\n else:\n l = line.strip().split()\n if self.ia == 3:\n for i in xrange(0,len(l[self.ic:])//2):\n self.mark[str(i)] = {'chrom':'0',\n 'pos':i,\n 'alleles': [],\n 'rank':i}\n self.marklist.append(str(i))\n else:\n for i,e in enumerate(l[self.ic:]):\n self.mark[str(i)] = {'chrom':'0',\n 'pos':i,\n 'alleles': [],\n 'rank':i}\n self.marklist.append(str(i))", "def writeMarkerGenes(self, location):\n try:\n writer = pd.ExcelWriter(location+\"marker_genes.xlsx\", engine=\"xlsxwriter\")\n for key in self.marker_genes:\n self.marker_genes.get(key).to_excel(writer, sheet_name=key)\n writer.save()\n except:\n print(\"Please run getMarkerGenes first to get marker genes. This step is needed to write them to excel.\")", "def vcf_add_gene(vrecs,gi_by_chr_map):\n def _overlap(gi,vr):\n return gi['end_i'] >= vr.start and gi['start_i'] <= vr.end\n def _genes_for_vr(vr,gi_by_chr_map):\n cm = gi_by_chr_map[vr.CHROM]\n genes = [ gi['gene'] for gi in cm if _overlap(gi,vr) ] \n return genes\n for vr in vrecs:\n vr.genes = _genes_for_vr(vr,gi_by_chr_map)", "def search_genres(self, needle):\n return self._genre_search.search(searchable(needle))", "def check_markers(de_genes, marker_genes):\n # create a dict for the results\n matches = dict()\n\n # loop over clusters\n for group in de_genes.columns:\n\n # add a new entry in the results dict\n matches[group] = dict()\n\n # extract the de genes for that cluster\n de_genes_group = de_genes[group].values\n\n # loop over cell types\n for key in marker_genes:\n\n genes_found = list()\n # loop over the markers for this key\n for gene in marker_genes[key]:\n regex = re.compile('^' + gene + '$', re.IGNORECASE)\n result = [l for l in de_genes_group for m in [regex.search(l)] if m]\n if result: genes_found.append(result[0])\n\n # save the matches in the dict\n if genes_found: matches[group][key] = genes_found\n\n return(matches)", "def return_markers(self):\r\n ent_file = join(self.filename, self._basename + '.ent')\r\n if not exists(ent_file):\r\n ent_file = join(self.filename, self._basename + '.ent.old')\r\n\r\n try:\r\n ent_notes = _read_ent(ent_file)\r\n\r\n except (FileNotFoundError, PermissionError):\r\n markers = []\r\n\r\n else:\r\n allnote = []\r\n for n in ent_notes:\r\n try:\r\n n['value'].keys()\r\n allnote.append(n['value'])\r\n except AttributeError:\r\n lg.debug('Note of length {} was not '\r\n 'converted to dict'.format(n['length']))\r\n\r\n s_freq = self._hdr['erd']['sample_freq']\r\n pcname = '0CFEBE72-DA20-4b3a-A8AC-CDD41BFE2F0D'\r\n note_time = []\r\n note_name = []\r\n note_note = []\r\n for n in allnote:\r\n if n['Text'] == 'Analyzed Data Note':\r\n continue\r\n if not n['Text']:\r\n continue\r\n if 'User' not in n['Data'].keys():\r\n continue\r\n user1 = n['Data']['User'] == 'Persyst'\r\n user2 = False # n['Data']['User'] == 'eeg'\r\n user3 = n['Data']['User'] == pcname\r\n user4 = n['Data']['User'] == 'XLSpike - Intracranial'\r\n user5 = n['Data']['User'] == 'XLEvent - Intracranial'\r\n if user1 or user2 or user3 or user4 or user5:\r\n continue\r\n if len(n['Data']['User']) == 0:\r\n note_name.append('-unknown-')\r\n else:\r\n note_name.append(n['Data']['User'].split()[0])\r\n note_time.append(n['Stamp'] / s_freq)\r\n note_note.append(n['Text'])\r\n\r\n markers = []\r\n for time, name, note in zip(note_time, note_name, note_note):\r\n m = {'name': note + ' (' + name + ')',\r\n 'start': time,\r\n 'end': time,\r\n 'chan': None,\r\n }\r\n markers.append(m)\r\n\r\n return markers", "def generateMarkers(self, *args, **kwargs): \n return 'var PloneMapMarkers = [' + \\\n ''.join([\"{'type': '%s','options': { 'position': new google.maps.LatLng( %s, %s ), 'title' : '%s', 'title_' : '%s' }},\" \n % (object.markerIcon, object.latitude, object.longitude, object.Title(), object.getId()) \n for object in self.context.objectValues() \n if hasattr(object, 'latitude') and len(object.latitude) > 0 ])[:-1] \\\n + '];'", "def simple_de_matching(adata, markers, n_genes=100):\n gene_groups = adata.uns['rank_genes_groups']\n de_genes = pd.DataFrame(data=gene_groups['names']).head(n_genes)\n #print(de_genes.head(10))\n\n matches = check_markers(de_genes, markers)\n for key, value in matches.items():\n print(f'-- cluster {key} --')\n print(value)\n\n return de_genes", "def set_neighbor_markers(self):\n marker_texture_path = self.PATHS[\"MINIMAP_BG_TEXTURE\"]\n marker_texture = self.loader.loadTexture(marker_texture_path)\n for location in self.locations:\n location_pos = location.get_position()\n for neighbor_id in location.get_neighbors():\n neighbor = next(self.find_location_by_id(neighbor_id))\n neighbor_pos = neighbor.get_position()\n neighbor_displaced = self.calculate_displacement(location_pos, neighbor_pos).tolist()\n neighbor_displaced_x, neighbor_displaced_y = neighbor_displaced\n reference_displaced = self.calculate_displacement(location_pos, self.reference_point).tolist()\n reference_displaced_x, reference_displaced_y = reference_displaced\n angle = self.calculate_angle(neighbor_displaced, reference_displaced)\n\n def reference_line(x_pos):\n slope = reference_displaced_y / reference_displaced_x\n return slope * x_pos\n\n if reference_line(neighbor_displaced_x) > neighbor_displaced_y:\n angle = 360-angle\n\n location.add_neighbor_marker(neighbor, angle, marker_texture)", "def map_and_save_gene_ids(hit_genes_location, all_detectable_genes_location=''):\n\n standardized_hits = [] # [primary_set]\n standardized_secondary_hits = [] # [secondary_set=None]\n\n if type(hit_genes_location) == str or isinstance(hit_genes_location, pathlib.PurePath):\n # log.info('codepath 1')\n standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location)]\n standardized_secondary_hits = [None]\n\n if type(hit_genes_location) == tuple:\n # log.info('codepath 2')\n standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location[0])]\n standardized_secondary_hits = [cast_external_refs_to_internal_ids(hit_genes_location[1])]\n\n if type(hit_genes_location) == list:\n # log.info('codepath 3')\n for sub_hit_genes_location in hit_genes_location:\n # log.info('codepath 3.0')\n if type(sub_hit_genes_location) == str or isinstance(sub_hit_genes_location, pathlib.PurePath):\n # log.info('codepath 3.1')\n standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location)]\n standardized_secondary_hits += [None]\n if type(sub_hit_genes_location) == tuple:\n # log.info('codepath 3.2')\n standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[0])]\n standardized_secondary_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[1])]\n\n log.debug('standardized primary hits:\\n\\t%s' % standardized_hits)\n log.debug('standardized secondary_hits:\\n\\t%s' % standardized_secondary_hits)\n\n dump_object(Dumps.analysis_set_bulbs_ids, (standardized_hits, standardized_secondary_hits))\n\n if all_detectable_genes_location:\n background_set = cast_external_refs_to_internal_ids(all_detectable_genes_location)\n # print(background_set)\n primary_set = [y for x in standardized_hits for y in x] # flattens the mapped ids list\n # print(primary_set)\n\n formatted_secondary_hits = [_l\n if _l is not None\n else []\n for _l in standardized_secondary_hits]\n\n sec_set = [y for x in formatted_secondary_hits for y in x]\n\n re_primary_set = set()\n for _id in primary_set:\n if type(_id) == str or type(_id) == int:\n re_primary_set.add(_id)\n else:\n re_primary_set.add(_id[0])\n\n primary_set = re_primary_set\n\n re_secondary_set = set()\n for _id in sec_set:\n if type(_id) == str or type(_id) == int:\n re_secondary_set.add(_id)\n else:\n re_secondary_set.add(_id[0])\n\n sec_set = re_primary_set\n\n if type(background_set[0]) == str or type(background_set[0]) == int: # unweighted\n background_set = list(set(background_set).union(primary_set).union(sec_set))\n\n else:\n bck_set = {_id[0] for _id in background_set}\n bck_set = list(bck_set)\n\n if not primary_set.issubset(bck_set):\n log.info('Nodes ids %s are missing in background set and are added with weight 0' %\n (primary_set - bck_set))\n background_set += [(_id, 0) for _id in (primary_set - bck_set)]\n\n if not sec_set.issubset(bck_set):\n log.info('Secondary set nodes ids %s are missing in background set and are added '\n 'with weight 0' % (sec_set - bck_set))\n background_set += [(_id, 0) for _id in (sec_set - bck_set)]\n\n else:\n background_set = []\n\n dump_object(Dumps.background_set_bulbs_ids, background_set)\n\n return standardized_hits, standardized_secondary_hits, background_set", "def mark_selected():\n (buffer, start, end) = get_selection_or_word()\n selection = buffer.get_chars(start, end)\n\n if selection != \"\":\n for m in buffer.file().search(selection, regexp=False):\n GPS.Locations.add(\"Local occurrences\",\n m.file(), m.line(), m.column(),\n selection,\n highlight=\"dynamic occurrences\",\n length=len(selection))", "def _get_obj_geneset(self, obj):\n obj_geneset = set(obj.input.get(\"mutations\", []))\n if not obj_geneset:\n # Geneset is given via geneset input:\n gs = self.resolwe.geneset.get(obj.input[\"geneset\"])\n obj_geneset = set(gs.genes)\n\n # Convert to gene symbols in case genes are given as feature ID's\n if gs.output[\"source\"] != \"UCSC\":\n qs = self.resolwe.feature.filter(feature_id__in=list(obj_geneset))\n id_2_name = {obj.feature_id: obj.name for obj in qs}\n obj_geneset = set([id_2_name[gene] for gene in obj_geneset])\n\n return obj_geneset", "def locus2gene(scaflist, gbeglist, gendlist, gdatalist=False, gff=dbpaths['gff'], comprehensive=True ):\n cuffgenes = {}\n\n for result in range(len(scaflist)):\n if result % 1000 == 0:\n print \"%d genes matched of %d\" % (result, len(scaflist))\n cur_scaf = scaflist[result]\n cur_gbeg = gbeglist[result]\n cur_gend = gendlist[result]\n if gdatalist:\n cur_gdata = gdatalist[result]\n else:\n cur_gdata = 0\n fobj = open(gff, 'rb')\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n if (cur_scaf, cur_gbeg) in cuffgenes:\n cuffgenes[(cur_scaf, cur_gbeg, 2)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n else:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n if not comprehensive:\n break\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes", "def find_aruco_markers(img, marker_size=6, total_markers=250, draw=True):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n key = getattr(aruco, f'DICT_{marker_size}X{marker_size}_{total_markers}')\n aruco_dict = aruco.Dictionary_get(key)\n aruco_param = aruco.DetectorParameters_create()\n bboxs, ids, rejected_bboxs = aruco.detectMarkers(img_gray, aruco_dict,\n parameters=aruco_param, cameraMatrix=camera_matrix, distCoeff=camera_distortion)\n\n # print(bboxs)\n if draw:\n aruco.drawDetectedMarkers(img, bboxs, ids)\n rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(\n corners=bboxs, markerLength=2, cameraMatrix=camera_matrix, distCoeffs=camera_distortion)\n for rvec, tvec in zip(rvecs, tvecs):\n aurco.drawAxis(img, cameraMatrix=camera_matrix,\n distCoeffs=camera_distortion, rvec=rvec, tvec=tvec, length=1)\n return bboxs, ids", "def markers (self):\n return self._markers", "def onLoadMarkersButton(self):\n start_time = time.time() \n fileName = self.seedsPath + self.fileNameSeedsLineEdit.text\n \n markupsNode = slicer.mrmlScene.GetFirstNodeByName(\"MarkupsFiducial\")\n if markupsNode == None:\n markupsNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLMarkupsFiducialNode\")\n \n markupsNode.RemoveAllMarkups()\n \n # name, point_ras, label\n markups = self.loadMarkupsFromSeedFile(fileName)\n \n for i in range(len(markups)):\n point_ras = markups[i][1]\n markupsNode.AddFiducial(point_ras[0], point_ras[1], point_ras[2])\n markupsNode.SetNthFiducialLabel(i, markups[i][0])\n markupsNode.SetNthControlPointDescription(i, str(markups[i][2]))\n markupsNode.SetNthMarkupLocked (i, False)\n\n loadTime = time.time() - start_time\n logging.info('Markers loaded from ' + fileName + ': ' + str(loadTime) + \" seconds\")", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # Seasonal 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # Seasonal 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # Seasonal 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order',\n 'ssn_AR_order', 'ssn_I_order', 'ssn_MA_order']", "def add_gene_ids(self, genes_list):\n orig_num_genes = len(self.genes)\n\n for g in list(set(genes_list)):\n if not self.genes.has_id(g):\n new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)\n if self.model:\n self.model.genes.append(new_gene)\n else:\n self.genes.append(new_gene)\n\n log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))", "def refine_location(markers_filtered, rf_pairs):\n for marker in markers_filtered:\n print(marker)\n Fork([marker], 0, rf_pairs)\n return", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def findgene(fname, dbpaths=dbpaths):\n scaf = []\n gbeg = []\n gend = []\n gfor = []\n gsta = []\n gdif = []\n cuffgenes = {}\n\n fobj = open(fname)\n for line in fobj:\n col = line.split()\n scaf.append( re.search('[sCcafold]*[0-9]+', col[3]).group() )\n gbeg.append( int(re.search(':(.*)-', col[3]).groups()[0]) )\n gend.append( int(re.search('-(.*)', col[3]).groups()[0]) )\n gfor.append(float(col[7]))\n gsta.append(float(col[8]))\n gdif.append(float(col[9]))\n\n fobj.close()\n print \"Significant transcripts read\"\n\n\n for result in range(len(scaf)):\n cur_scaf = scaf[result]\n cur_gbeg = gbeg[result]\n cur_gend = gend[result]\n cur_gfor = gfor[result]\n cur_gsta = gsta[result]\n cur_gdif = gdif[result]\n fobj = open(dbpaths['gff'])\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gfor, cur_gsta, cur_gdif)\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes", "def update_markers(self):\n self.markers.delete_array(marker_id_start=1001,\n pose_array=self.inspection['pose_list'\n ][self.data['x']\n ['current']])\n sleep(0.01)\n self.markers. \\\n small_sphere_pose_array(marker_id_start=1001,\n pose_array=self.inspection['pose_list']\n [self.data['x']['current']])\n sleep(0.01)\n self.markers.add_small_sphere_stamped(marker_id=1000,\n pose=self.data['poses'][\n 'marker'])\n return", "def gene_search(\n self,\n genes:list=[\"MYL2\"], \n ):\n try: \n assert isinstance(genes, list)\n except AssertionError as e:\n e.args += (\"[genes] argument needs to be type(list)\", )\n raise\n \n\n self.genes = genes\n\n self.requestURL = f\"https://www.ebi.ac.uk/proteins/api/proteins?offset=0&size=100&gene={'%2C%20'.join(genes)}&organism=human\"\n \n r = requests.get(self.requestURL, headers={ \"Accept\" : \"application/json\"})\n \n if not r.ok:\n r.raise_for_status()\n sys.exit()\n\n self.responseBody = r.text\n self.data = json.loads(self.responseBody)\n\n return self.responseBody", "def update (self) :\n for met in self.gene :\n met(self)", "def load_pose_markers(self, size=0.25):\n [p.removeBody(i) for i in self.marker_ids]\n self.marker_ids = []\n\n for color, pos in self.markers:\n vs_id = p.createVisualShape(p.GEOM_SPHERE, radius=size, rgbaColor=color)\n self.marker_ids.append(p.createMultiBody(basePosition=pos, baseCollisionShapeIndex=-1, baseVisualShapeIndex=vs_id))", "def get_genes(variant):\n genes = {}\n transcripts = []\n mongo_genes = []\n \n # Conversion from ensembl to refseq\n # ensembl_to_refseq is a dictionary with ensembl transcript id as keys and\n # a list of refseq ids as values\n ensembl_to_refseq = {}\n for gene_info in variant['info_dict'].get(\n 'Ensembl_transcript_to_refseq_transcript', []):\n splitted_gene = gene_info.split(':')\n transcript_info = splitted_gene[1]\n for transcript in transcript_info.split('|'):\n splitted_transcript = transcript.split('>')\n if len(splitted_transcript) > 1:\n ensembl_id = splitted_transcript[0]\n refseq_ids = splitted_transcript[1].split('/')\n ensembl_to_refseq[ensembl_id] = refseq_ids\n \n # A dictionary with clinical gene descriptions\n gene_descriptions = {}\n for gene_info in variant['info_dict'].get('Gene_description', []):\n splitted_gene = gene_info.split(':')\n hgnc_symbol = splitted_gene[0]\n description = splitted_gene[1]\n gene_descriptions[hgnc_symbol] = description\n \n # First we get all vep entrys that we find and put them under their \n # corresponding gene symbol in 'genes'\n for vep_entry in variant['vep_info'].get(variant['ALT'], []):\n transcript = get_transcript(vep_entry, ensembl_to_refseq)\n hgnc_symbol = transcript.hgnc_symbol\n if hgnc_symbol:\n if hgnc_symbol in genes:\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n else:\n genes[hgnc_symbol] = {}\n genes[hgnc_symbol]['transcripts'] = {}\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['omim_gene_id'] = None\n genes[hgnc_symbol]['phenotypic_terms'] = []\n genes[hgnc_symbol]['best_rank'] = 40\n genes[hgnc_symbol]['ensembl_id'] = transcript.ensembl_id\n \n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n \n ######################################################################\n ## There are two types of OMIM terms, one is the OMIM gene entry ##\n ## and one is for the phenotypic terms. ##\n ## Each key in the 'omim_terms' dictionary reprecents a gene id. ##\n ## Values are a dictionary with 'omim_gene_id' = omim_gene_id and ##\n ## 'phenotypic_terms' = [list of OmimPhenotypeObjects] ##\n ######################################################################\n\n # Fill the omim gene id:s:\n for annotation in variant['info_dict'].get('OMIM_morbid', []):\n if annotation:\n splitted_record = annotation.split(':')\n try:\n hgnc_symbol = splitted_record[0]\n omim_term = splitted_record[1]\n genes[hgnc_symbol]['omim_gene_id'] = omim_term\n except (ValueError, KeyError):\n pass\n\n # Fill the omim phenotype terms:\n for gene_annotation in variant['info_dict'].get('Phenotypic_disease_model', []):\n if gene_annotation:\n splitted_gene = gene_annotation.split(':')\n hgnc_symbol = splitted_gene[0]\n for omim_entry in splitted_gene[1].split('|'):\n splitted_record = omim_entry.split('>')\n \n phenotype_id = splitted_record[0]\n inheritance_patterns = []\n if len(splitted_record) > 1:\n inheritance_patterns = splitted_record[1].split('/')\n \n disease_model = PhenotypeTerm(\n phenotype_id=phenotype_id,\n disease_models=inheritance_patterns\n )\n \n genes[hgnc_symbol]['phenotypic_terms'].append(disease_model)\n \n for hgnc_symbol in genes:\n gene_info = genes[hgnc_symbol]\n most_severe = gene_info['most_severe_transcript']\n # Create a mongo engine gene object for each gene found in the variant\n mongo_gene = Gene(hgnc_symbol=hgnc_symbol)\n mongo_gene.description = gene_descriptions.get(hgnc_symbol)\n mongo_gene.ensembl_gene_id = gene_info.get('ensembl_id', None)\n mongo_gene.omim_gene_entry = gene_info.get(\n 'omim_gene_id', \n None\n )\n\n mongo_gene.omim_phenotypes = gene_info.get(\n 'phenotypic_terms', \n []\n )\n\n # Add a list with the transcripts:\n mongo_gene.transcripts = []\n for transcript_id in gene_info['transcripts']:\n mongo_gene.transcripts.append(gene_info['transcripts'][transcript_id])\n\n try:\n mongo_gene.functional_annotation = gene_info['most_severe_function']\n except AttributeError:\n pass\n try:\n mongo_gene.region_annotation = SO_TERMS[mongo_gene.functional_annotation]['region']\n except AttributeError:\n pass\n try:\n mongo_gene.sift_prediction = most_severe.sift_prediction\n except AttributeError:\n pass\n try:\n mongo_gene.polyphen_prediction = most_severe.polyphen_prediction\n except AttributeError:\n pass\n # Add the mongo engine gene to the dictionary\n mongo_genes.append(mongo_gene)\n\n return mongo_genes", "def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order']", "def geneProcess(self, name):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n positions = self.geneFeatures[name]\n exons = []\n for position in positions:\n self.mm.seek(position)\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n attributes = row[-1].split(\"; \")\n for attribute in attributes:\n if attribute.startswith(\"gene_type\"):\n _gt = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_id\"):\n _gid = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_name\"):\n _gn = attribute.split(\" \")[-1][1:-1]\n exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))\n self.fileHandle.close()\n exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',\n 'strand', 'gene_type', 'gene_id', 'gene_name'])\n\n for record in self.geneExonicRegions(exons_df):\n yield record" ]
[ "0.5793127", "0.57150245", "0.5687851", "0.5483316", "0.5397726", "0.5339215", "0.5312121", "0.53074116", "0.5284343", "0.5267562", "0.51718825", "0.51595294", "0.51243556", "0.508369", "0.50633913", "0.5061443", "0.5056034", "0.5050732", "0.50467813", "0.5020721", "0.49919966", "0.49739155", "0.49693856", "0.49646357", "0.49635518", "0.4959208", "0.49576995", "0.49442667", "0.4944128", "0.493366" ]
0.7286202
0
Writes marker genes to an excel file. Each sheet indicates different levels.
def writeMarkerGenes(self, location): try: writer = pd.ExcelWriter(location+"marker_genes.xlsx", engine="xlsxwriter") for key in self.marker_genes: self.marker_genes.get(key).to_excel(writer, sheet_name=key) writer.save() except: print("Please run getMarkerGenes first to get marker genes. This step is needed to write them to excel.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_excel(self, filename):\n writer = pd.ExcelWriter(filename)\n self.df_avg.to_excel(writer, 'Simulation')\n self.manager_df.to_excel(writer, 'FleetManagers')\n self.customer_df.to_excel(writer, 'Customers')\n self.transport_df.to_excel(writer, 'Transports')\n writer.save()", "def save_per_gene(filename, tn_per_gene, reads_per_gene, aliases_designation):\n\n with open(filename, \"w\") as f:\n\n f.write(\"Gene name\\tNumber of transposons per gene\\tNumber of reads per gene\\n\")\n\n for gene in tn_per_gene:\n tnpergene = tn_per_gene[gene]\n readpergene = reads_per_gene[gene]\n if gene in aliases_designation:\n gene_alias = aliases_designation.get(gene)[0]\n else:\n gene_alias = gene\n f.write(gene_alias + \"\\t\" + str(tnpergene) + \"\\t\" + str(readpergene) + \"\\n\")", "def writeToExcel(self, filename = \"Interfaces.xlsx\", idx = None, prec = 4,\\\n verbose = 1):\n\n if idx is None:\n idx = np.arange(self.atoms.shape[0])\n elif type(idx) is int: \n idx = np.array([idx])\n else:\n idx = np.array(idx)\n \n\n dataDict = {\"Index\": idx, \"Original Rotation\": self.ang[idx],\\\n \"Length a\": np.round(self.getCellLengths(idx = idx, cell = 1)[:, 0], prec),\\\n \"Length b\": np.round(self.getCellLengths(idx = idx, cell = 1)[:, 1], prec),\\\n \"Angle a/b\": np.round(self.getBaseAngles(cell = 1)[idx], prec),\\\n \"Atoms\": self.atoms[idx],\\\n \"Area\": self.getAreas()[idx],\\\n \"Strain 11\": np.round(self.eps_11[idx], prec),\\\n \"Strain 22\": np.round(self.eps_22[idx], prec),\\\n \"Strain 12\": np.round(self.eps_12[idx], prec),\\\n \"Strain MAS\": np.round(self.eps_mas[idx], prec),\\\n \"Base 1 ax\": np.round(self.cell_1[idx, 0, 0], prec),\\\n \"Base 1 ay\": np.round(self.cell_1[idx, 1, 0], prec),\\\n \"Base 1 bx\": np.round(self.cell_1[idx, 0, 1], prec),\\\n \"Base 1 by\": np.round(self.cell_1[idx, 1, 1], prec),\\\n \"Base 2 ax\": np.round(self.cell_2[idx, 0, 0], prec),\\\n \"Base 2 ay\": np.round(self.cell_2[idx, 1, 0], prec),\\\n \"Base 2 bx\": np.round(self.cell_2[idx, 0, 1], prec),\\\n \"Base 2 by\": np.round(self.cell_2[idx, 1, 1], prec),\\\n \"Rep 1 ax\": np.round(self.rep_1[idx, 0, 0], prec),\\\n \"Rep 1 ay\": np.round(self.rep_1[idx, 1, 0], prec),\\\n \"Rep 1 bx\": np.round(self.rep_1[idx, 0, 1], prec),\\\n \"Rep 1 by\": np.round(self.rep_1[idx, 1, 1], prec),\\\n \"Rep 2 ax\": np.round(self.rep_2[idx, 0, 0], prec),\\\n \"Rep 2 ay\": np.round(self.rep_2[idx, 1, 0], prec),\\\n \"Rep 2 bx\": np.round(self.rep_2[idx, 0, 1], prec),\\\n \"Rep 2 by\": np.round(self.rep_2[idx, 1, 1], prec)}\n\n for i in range(self.e_int_c.shape[1]):\n key = \"E_int_c_T%i\" % (i)\n dataDict[key] = np.round(self.e_int_c[idx, i], prec)\n\n for i in range(self.w_sep_c.shape[1]):\n key = \"W_sep_c_T%i\" % (i)\n dataDict[key] = np.round(self.w_sep_c[idx, i], prec)\n\n for i in range(self.w_seps_c.shape[1]):\n key = \"W_seps_c_T%i\" % (i)\n dataDict[key] = np.round(self.w_seps_c[idx, i], prec)\n\n for i in range(self.e_int_d.shape[1]):\n key = \"E_int_d_T%i\" % (i)\n dataDict[key] = np.round(self.e_int_d[idx, i], prec)\n\n for i in range(self.w_sep_d.shape[1]):\n key = \"W_sep_d_T%i\" % (i)\n dataDict[key] = np.round(self.w_sep_d[idx, i], prec)\n\n for i in range(self.w_seps_d.shape[1]):\n key = \"W_seps_d_T%i\" % (i)\n dataDict[key] = np.round(self.w_seps_d[idx, i], prec)\n\n\n data = pd.DataFrame(dataDict)\n data.to_excel(filename)\n\n if verbose > 0:\n string = \"Data written to Excel file: %s\" % filename\n ut.infoPrint(string)", "def save_to_xyz(self, filename): \n with open( filename, 'a' ) as F:\n F = open( filename, 'a' )\n F.write( '%d\\n'%self.num_atoms )\n F.write( \"XYZ\\n\" )\n for num,row in enumerate(self.atoms):\n try:\n F.write('%s '%self.species[num])\n except:\n F.write('X%d '%num)\n F.write( mat2str( row, \"%16.10f\" ) )\n F.write( \"\\n\" )", "def write(self, filename, delimiter = ','):\n # Names of individuals, plus mothers and fathers.\n nms = np.column_stack([self.names, self.mothers, self.fathers])\n # format genotype data as a strings\n output = self.geno.sum(2).astype('str')\n output[output == '-18'] = 'NA' # coerce missing data to NA\n\n output = np.concatenate([nms, output], axis=1)\n header = 'ID,mother,father,' + ','.join(self.markers)\n np.savetxt(filename, output, delimiter=delimiter, fmt=\"%s\", header=header, comments='')", "def render_sheet_to_file(self, file_name, sheet, **keywords):\n raise NotImplementedError(\"We are not writing to file\")", "def getMarkerGenes(self):\n try:\n self.marker_genes = Utils.findMarkers(self.sc_annot, self.de_dict)\n except:\n print(\"Please run getCellTypes first to get cell annotations. This step is needed for marker gene finding.\")", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def write_xyz_file(allxyz):\n if SAVEXYZ:\n print('+> Saving riverbed topography file...', end='')\n if MODE == 1:\n np.savetxt('kinoshita_topo.xyz', allxyz, fmt='%.6e')\n elif MODE == 2:\n np.savetxt(FNAME.rsplit('.', 1)[0] + '_topo.xyz', allxyz, fmt='%.6e')\n print(' [done]')", "def save(self, passedWriter, replicates=False):\n\n #saves replicates\n with passedWriter as writer:\n for i in range(len(self.cellData)):\n try:\n self[i].to_excel(writer, sheet_name=str(self.cellLines[i]))\n except IndexError:\n pass\n for i in range(len(self.cellReferenceIntersections)):\n try:\n self.cellReferenceIntersections[i].to_excel(writer, sheet_name=str(self.cellLines[i]) + \" to reference\")\n except AttributeError:\n print(\"ERROR: Replicate reference intersections not calculated\")\n break\n except IndexError:\n pass\n try:\n self.cellFullIntersection.to_excel(writer, sheet_name=\"Full Intersection\")\n except AttributeError:\n print(\"ERROR: Replicate full intersection not calculated\")\n pass\n except IndexError:\n pass", "def print_markers(filename, day_values, hour_values, behavior_markers, \\\n behavior_change, dm, day, hour, bm, pp):\n cf = config.Config()\n fullname = os.path.join(cf.datapath, filename)\n if pp == True:\n outfile = fullname + '.day'\n str_header = generate_day_header()\n np.savetxt(outfile, day_values, delimiter=',', header=str_header)\n outfile = fullname + '.hour'\n str_header = generate_hour_header()\n np.savetxt(outfile, hour_values, delimiter=',', header=str_header)\n outfile = fullname + '.bm'\n str_header = generate_behavior_header()\n np.savetxt(outfile, [behavior_markers], delimiter=',', header=str_header)\n outfile = fullname + '.bcd'\n str_header = generate_bcd_header()\n np.savetxt(outfile, behavior_change, delimiter=',', header=str_header)\n else:\n outfile = fullname + '.bm'\n np.savetxt(outfile, [behavior_markers], delimiter=',')\n outfile = fullname + '.bcd'\n np.savetxt(outfile, behavior_change, delimiter=',')", "def save(self, filename):\n writer = pd.ExcelWriter(filename+\".xlsx\")\n for tab in self.group.df_assignment_merge.keys():\n self.group.df_assignment_merge[tab].to_excel(writer,tab)\n\n writer.save()\n\n #df.to_excel(\"filename.xlsx\")", "def write_to_file(\n user_lookup, user, level, bought_items, recommended_items, recommended_items_new\n):\n\n user_key = str(list(user_lookup[\"customer\"][user_lookup[\"user_id\"] == user])[0])\n df_info = pd.DataFrame(\n [[user_key], [level]], index=[\"User\", \"level\"], columns=[\"Value\"]\n )\n\n filename = \"scratchpad/Results/Recommendations_output_\" + str(user) + \".xlsx\"\n bought_df = pd.DataFrame(bought_items)\n with pd.ExcelWriter(filename) as writer:\n df_info.to_excel(writer, sheet_name=\"info\")\n bought_df.to_excel(writer, sheet_name=\"bought_items\")\n recommended_items.to_excel(writer, sheet_name=\"recommended_items\")\n recommended_items_new.to_excel(writer, sheet_name=\"recommended_items_new\")", "def writeMarkerFile(self, markerFileName):\n markerFileVol = vol(12, len(self._ProjectionList._list), len(self._Markers))\n markerFileVol.setAll(0.)\n for (imark, Marker) in enumerate(self._Markers):\n for (itilt, proj) in enumerate(self._ProjectionList._list):\n markerFileVol.setV(Marker.get_xProj(itilt), 1, itilt, imark)\n markerFileVol.setV(Marker.get_yProj(itilt), 2, itilt, imark)\n if imark == 0:\n markerFileVol.setV(int(round(proj._tiltAngle)), 0, int(itilt), int(imark))\n markerFileVol.write(markerFileName)", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def write_drugs_name(self):\r\n for elem in range(len(self.output_zakladki)):\r\n n, first_row = 0, 3\r\n\r\n sheet = self.output_zakladki[elem]\r\n sh = self.output_file.get_sheet_by_name(sheet)\r\n no_of_row = self.liczba_pelnych_linii\r\n\r\n if self.niepelna_liniia:\r\n no_of_row += 1\r\n\r\n while no_of_row != 0:\r\n for lek in self.output_leki[elem]:\r\n sh['A' + str(first_row + n)] = lek\r\n n += 1\r\n first_row += 2\r\n no_of_row -= 1", "def saveAll(self):\r\n path = saveFile(ftype='xlsx')\r\n writer = pd.ExcelWriter(path)\r\n df = pd.DataFrame(self.saveAll)\r\n df.to_excel(writer, header=False, index=False)\r\n writer.save()\r\n \r\n #Format the excel file\r\n try:\r\n import openpyxl\r\n from openpyxl.styles import Alignment, Font, Border, Side\r\n #Load the workbook and worksheet\r\n wb = openpyxl.load_workbook(filename=path)\r\n ws = wb.get_sheet_by_name(\"Sheet1\")\r\n cells = ['E1','H1','K1','N1','Q1','T1','W1','Z1']\r\n ws.merge_cells('E1:G1')\r\n ws.merge_cells('H1:J1')\r\n ws.merge_cells('K1:M1')\r\n ws.merge_cells('N1:P1')\r\n ws.merge_cells('Q1:S1')\r\n ws.merge_cells('T1:V1')\r\n ws.merge_cells('W1:Y1')\r\n ws.merge_cells('Z1:AB1')\r\n #Bold and center the headers\r\n ft = Font(bold=True)\r\n for cell in cells:\r\n ws[cell].alignment = Alignment(horizontal=\"center\")\r\n ws[cell].font = ft\r\n #Add borders\r\n rows,_ = self.saveAll.shape\r\n for i in range(rows):\r\n for cell in cells:\r\n c = cell[0]+str(i+1)\r\n ws[c].border = Border(left=Side(style='thin'))\r\n\r\n \r\n \r\n wb.save(path)\r\n \r\n except ImportError:\r\n pass", "def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')", "def test_excel(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write excel file\n excel_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(filename=excel_file)\n assert os.path.isfile(excel_file)\n\n # Read in and make sure it worked.\n new_gpm = gpmap.read_excel(filename=excel_file,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,new_gpm)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_excel(filename=excel_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(out_file)\n\n gpm_read = gpmap.read_excel(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_excel(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))", "def write_excel(self, sheet_number):\n for i in range(len(self.arrays[sheet_number])):\n self.formats[sheet_number].append([])\n for j in range(len(self.arrays[sheet_number][0])):\n cell_format = self.workbook.add_format(self.base_format)\n\n if type(self.arrays[sheet_number][i][j]).__name__ == 'int' and self.arrays[sheet_number][i][j] > 0:\n cell_format.set_bg_color('#ffd5d5')\n self.formats[sheet_number][i].append(cell_format)\n self.worksheets[sheet_number].write(i, j, self.arrays[sheet_number][i][j], self.formats[sheet_number][i][j])", "def dfs_tabs(df_list, sheet_list, file_name):\n\n writer = pd.ExcelWriter(file_name,engine='xlsxwriter') \n for dataframe, sheet in zip(df_list, sheet_list):\n dataframe.to_excel(writer, sheet_name=sheet, startrow=0 , startcol=0, index=False) \n writer.save()", "def excel_print(data1, data2, data3, data4, data5, data6):\r\n\r\n list_data = [data1, data2, data3, data4, data5, data6]\r\n name_list = ['Old elec', 'New elec', 'Old elec dup', 'New elec dup',\r\n 'Diff After Strip', 'New Elec Before Strip']\r\n zipped = zip(list_data, name_list)\r\n excel_writer = pd.ExcelWriter('elec_delta2.xlsx', engine='xlsxwriter')\r\n for data, name in zipped:\r\n data.to_excel(excel_writer, sheet_name=name,\r\n index=False, freeze_panes=(1, 0))\r\n num_cols = len(list(data))\r\n worksheet = excel_writer.sheets[name]\r\n worksheet.autofilter(0, 0, 0, num_cols-1)\r\n worksheet.set_column(0, 0, 23.56)\r\n worksheet.set_column(1, 1, 34.89)\r\n excel_writer.save()", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def export_to_excel(self, workbook, tailan_queryset):\n\t\t# workbook argumentdaa avna\n\t\tif tailan_queryset:\n\t\t\t#[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\t\n\t\t\tworksheet = workbook.add_worksheet(u'Гүний худаг')\n\t\t\tqueryset = Hudag.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.gunii_hudags:\n\t\t\t\t\tqueryset = tailan.gunii_hudags.hudags.all()\n\t\t\t\t\t[row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsevershuuleh:\n\t\t\t\t\tqueryset = tailan.tsevershuuleh.tsevershuuleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tseverleh:\n\t\t\t\t\tqueryset = tailan.tseverleh.tseverleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Усан сан')\n\t\t\tqueryset = UsanSan.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.usansan:\n\t\t\t\t\tqueryset = tailan.usansan.usan_sans.all()\n\t\t\t\t\t[row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_nasos_stants:\n\t\t\t\t\tqueryset = tailan.tsever_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_nasos_stants:\n\t\t\t\t\tqueryset = tailan.bohir_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Лаборатори')\n\t\t\tqueryset = Lab.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.lab:\n\t\t\t\t\tqueryset = tailan.lab.labs.all()\n\t\t\t\t\t[row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.tsever_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.bohir_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'АХББ')\n\t\t\tqueryset = ABB.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.abb:\n\t\t\t\t\tqueryset = tailan.abb.abbs.all()\n\t\t\t\t\t[row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв')\n\t\t\tqueryset = UsDamjuulahBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_damjuulah_tov:\n\t\t\t\t\tqueryset = tailan.us_damjuulah_tov.usDamjuulahBair.all()\n\t\t\t\t\t[row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус түгээх байр')\n\t\t\tqueryset = UsTugeehBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_tugeeh:\n\t\t\t\t\tqueryset = tailan.us_tugeeh.us_tugeeh_bairs.all()\n\t\t\t\t\t[row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны машин')\n\t\t\tqueryset = WaterCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.water_car:\n\t\t\t\t\tqueryset = tailan.water_car.water_cars.all()\n\t\t\t\t\t[row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны машин')\n\t\t\tqueryset = BohirCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_car:\n\t\t\t\t\tqueryset = tailan.bohir_car.bohir_cars.all()\n\t\t\t\t\t[row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ажилчдын судалгаа')\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.ajiltans:\n\t\t\t\t\tqueryset = tailan.ajiltans.ajiltans.all()\n\t\t\t\t\t[row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\t\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def _save_chromosome_at_index(self, index, file_name):\n how_to_open = 'w' if index == 0 else 'a'\n with open(file_name, how_to_open) as out_file:\n for category in self.population[index].get_genes():\n out_file.write(''.join(category) + '\\t')\n out_file.write(\n '\\n{}\\n'.format(self.population[index].get_fitness())\n )", "def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)", "def _write2excel(self, sheet: object, data: list, start_row: int, start_col: int):\n for r in range(0,len(data)):\n for c in range(0,len(data[0])):\n sheet.cell(r+start_row,c+start_col).value=data[r][c]", "def start_output(self):\r\n self.create_output_file()\r\n\r\n for elem in range(len(self.output_zakladki)):\r\n self.output_file.create_sheet(self.output_zakladki[elem], elem)\r\n\r\n self.remowe_first_sheet()", "def to_xyz(self, extended_xyz: bool = True,\n print_stds: bool = False,\n print_forces : bool = False,\n print_max_stds: bool = False,\n write_file: str = '')->str:\n species_list = [Z_to_element(x) for x in self.coded_species]\n xyz_str = ''\n xyz_str += f'{len(self.coded_species)} \\n'\n\n # Add header line with info about lattice and properties if extended\n # xyz option is called.\n if extended_xyz:\n cell = self.cell\n\n xyz_str += f'Lattice=\"{cell[0,0]} {cell[0,1]} {cell[0,2]}'\n xyz_str += f' {cell[1,0]} {cell[1,1]} {cell[1,2]}'\n xyz_str += f' {cell[2,0]} {cell[2,1]} {cell[2,2]}\"'\n xyz_str += f' Proprties=\"species:S:1:pos:R:3'\n\n if print_stds:\n xyz_str += ':stds:R:3'\n stds = self.stds\n if print_forces:\n xyz_str += ':forces:R:3'\n forces = self.forces\n if print_max_stds:\n xyz_str += ':max_std:R:1'\n xyz_str += '\\n'\n else:\n xyz_str += '\\n'\n\n for i, pos in enumerate(self.positions):\n # Write positions\n xyz_str += f\"{species_list[i]} {pos[0]} {pos[1]} {pos[2]}\"\n\n # If extended XYZ: Add in extra information\n if print_stds and extended_xyz:\n xyz_str += f\" {stds[i,0]} {stds[i,1]} {stds[i,2]}\"\n if print_forces and extended_xyz:\n xyz_str += f\" {forces[i,0]} {forces[i,1]} {forces[i,2]}\"\n if print_max_stds and extended_xyz:\n xyz_str += f\" {np.max(stds[i,:])} \"\n xyz_str += '\\n'\n\n # Write to file, optionally\n if write_file:\n with open(write_file, 'w') as f:\n f.write(xyz_str)\n\n return xyz_str", "def excel_out(employees_dict, path):\n # Create workbook and worksheet\n try:\n workbook = xlsxwriter.Workbook(path)\n except:\n return False\n worksheet = workbook.add_worksheet(name='Прокуратура')\n # Add format to workbook\n format_headers_po = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 14,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFCA28',\n 'border': 2})\n format_headers_department = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 13,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFD54F',\n 'border': 2})\n format_headers_division = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFE082',\n 'border': 2})\n format_header = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFF59D',\n 'border': 2})\n employee_format_b = workbook.add_format( {'align': 'left',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n employee_format = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n format_attribute = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 10,\n 'font_name': 'Times New Roman',\n 'border': 1})\n\n # Set width of columns and height of rows\n worksheet.set_default_row(40, False)\n worksheet.set_column(0, 0, 5)\n worksheet.set_column(1, 1, 25)\n worksheet.set_column(2, 2, 21)\n worksheet.set_column(3, 3, 21)\n worksheet.set_column(4, 4, 21)\n\n # Begin from row\n row = 0\n\n # Parser for employees dictionary\n for po in employees_dict:\n # Прокуратура\n worksheet.merge_range(row, 0, row, 4, data=po.name, cell_format=format_headers_po)\n row += 1\n # Атрибуты Прокуратуры\n row = add_attribute(po, worksheet, row, format_attribute)\n # Header\n row = add_header(worksheet, row, format_header)\n # Работники Прокуратуры\n if 'employees' in employees_dict[po]:\n for num, employee in enumerate(employees_dict[po]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Управление\n if 'departments' in employees_dict[po]:\n for department in employees_dict[po]['departments']:\n worksheet.merge_range(row, 0, row, 4, data=department.name, cell_format=format_headers_department)\n row += 1\n # Атрибуты Управления\n row = add_attribute(department, worksheet, row, format_attribute)\n # Работники Управления\n if 'employees' in employees_dict[po]['departments'][department]:\n for num, employee in enumerate(employees_dict[po]['departments'][department]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n # Отдел Управления\n if 'divisions' in employees_dict[po]['departments'][department]:\n for division in employees_dict[po]['departments'][department]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['departments'][department]['divisions'][division], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Отдел Прокуратуры\n if 'divisions' in employees_dict[po]:\n for division in employees_dict[po]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['divisions'][division], 1):\n row += add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n try:\n workbook.close()\n except:\n return False\n return True" ]
[ "0.5928262", "0.58982015", "0.58421063", "0.5730327", "0.5662606", "0.5617152", "0.557254", "0.5518996", "0.5494513", "0.5493021", "0.5490403", "0.5479139", "0.5447389", "0.5395281", "0.53894603", "0.5379124", "0.53154147", "0.5310389", "0.52733517", "0.5262296", "0.52478814", "0.5244832", "0.5242181", "0.52315885", "0.52227587", "0.5222531", "0.52128744", "0.5212625", "0.520826", "0.520747" ]
0.81966275
0
Plots tree representation of single cell annotations.
def getTreePlot(self): try: Utils.plotTree(self.sc_annot) except: print("Please run getCellTypes first to get cell annotations. This step is needed for plotting.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotTree(self):\n t = self.make(self.tree)\n t.draw()", "def plot_tree(tree, name):\n graph = pydot.Dot(graph_type='graph')\n tree_graph = pydot.Cluster(\n graph_name=\"Learner Tree\",\n label=\"Learner Tree\",\n fontsize=\"15\",\n )\n graphlegend = pydot.Cluster(\n graph_name=\"legend\",\n label=\"Legend\",\n fontsize=\"15\",\n rankdir=\"LR\")\n legends = []\n for NODE_KEY in list(NODE_TYPES.keys()):\n legend = pydot.Node(\n NODE_KEY,\n style=\"filled\",\n fillcolor=NODE_COLOR_DICT[NODE_KEY],\n rank=\"same\"\n )\n graphlegend.add_node(legend)\n legends.append(legend)\n\n walk_dictionary(tree_graph, tree)\n graph.add_subgraph(tree_graph)\n graph.add_subgraph(graphlegend)\n for legend_index in range(1, len(legends)):\n graph.add_edge(pydot.Edge(legends[legend_index - 1], legends[legend_index], style=\"invis\"))\n graph.write_png('./' + name + '.png')", "def draw_tree(self):\n nx.draw(self.diffusion_tree, with_labels=True)", "def drawtree(self):\r\n\r\n Phylo.draw(self.tree)", "def plot(df, ax, myself, names):\n\n df = df.sort_values(by=\"time\", ascending=True)\n offset = df.iloc[0][\"time\"]\n\n nodes = {}\n for name in names:\n nodes[name] = {\n \"master\": [],\n \"observer\": []\n }\n\n for (_id, row) in df[df[\"type\"] != \"R\"].iterrows():\n if row[\"type\"] == \"M\":\n time = row[\"time\"]\n target = row[\"args\"]\n for (name, blocks) in nodes.items():\n if name == target:\n close_block(blocks[\"observer\"], time)\n open_block(blocks[\"master\"], time)\n else:\n open_block(blocks[\"observer\"], time)\n elif row[\"type\"] == \"T\":\n time = row[\"time\"]\n target = row[\"args\"]\n blocks = nodes[target]\n close_block(blocks[\"master\"], time)\n open_block(blocks[\"observer\"], time)\n elif row[\"type\"] == \"F\":\n time = row[\"time\"]\n for blocks in nodes.values():\n close_block(blocks[\"master\"], time)\n close_block(blocks[\"observer\"], time)\n\n for (index, blocks) in enumerate(nodes.values()):\n plot_blocks(ax, index, blocks[\"master\"], offset, \"tab:blue\")\n plot_blocks(ax, index, blocks[\"observer\"], offset, \"tab:orange\")\n\n x_ticks = range(0, 10)\n y_ticks = [10, 20, 30, 40, 50]\n\n ax.title.set_text(\"View of node: {0}\".format(myself))\n ax.set_xlabel(\"seconds since start\")\n ax.set_xticks(x_ticks)\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(names)\n ax.grid(True)\n\n # Add annotations:\n\n index = list(nodes.keys()).index(myself)\n for (_id, row) in df[df[\"type\"] == \"R\"].iterrows():\n x = (row[\"time\"] - offset).total_seconds()\n y = y_ticks[index]\n ax.annotate(\n \"Round {0}\".format(row[\"args\"]),\n xycoords=\"data\",\n xy=(x, y),\n xytext=(x, y + 5),\n arrowprops=dict(\n facecolor=\"black\",\n shrink=0.05\n )\n )", "def draw_tree(self, agent, color='b'):\n for edge in self.all_edges[agent]:\n parent, child = edge\n for cords in self.xy_cords:\n plt.plot([parent.state[cords[0]], child.state[cords[0]]],\n [parent.state[cords[1]], child.state[cords[1]]], c=color)\n plt.xlim(self.Xi[0])\n plt.ylim(self.Xi[1])\n plt.show()", "def show(self):\n if self._tree is None:\n raise RuntimeError(\"Estimator not fitted, call `fit` first\")\n\n import tree_plotter\n tree_plotter.createPlot(self._tree)", "def visualize(self, A):\n G = nx.from_numpy_matrix(np.array(A))\n nx.draw(G, with_labels=True)\n plt.show()\n plt.clf()\n exit(0)", "def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))", "def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))", "def plot_tiles(self):\n \n #TODO: adjust plot, border and text_box sizes\n \n ordered_projections = []\n flat_clusters = []\n colors_2D = []\n\n for cluster, nodes in clusters.items():\n for n in nodes:\n ordered_projections.append(projection_2D[n])\n\n for n in nodes:\n flat_clusters.append(n)\n\n for i, n in enumerate(G.nodes):\n if n in nodes:\n colors_2D.append(colors[i])\n\n grid_cols = int(np.ceil(np.sqrt(len(ordered_projections))))\n\n if len(ordered_projections) <= (grid_cols**2 - grid_cols):\n grid_rows = grid_cols - 1\n else:\n grid_rows = grid_cols\n\n #assuming images are same size, get shape\n l, w = ordered_projections[0].shape\n\n #add blank images to pack in grid\n while len(ordered_projections) < grid_rows*grid_cols:\n ordered_projections.append(np.zeros((l, w)))\n colors_2D.append((0., 0., 0.))\n flat_clusters.append('')\n\n f = Figure()\n\n grid = ImageGrid(f, 111, #similar to subplot(111)\n nrows_ncols=(grid_rows, grid_cols), #creates grid of axes\n axes_pad=0.05) #pad between axes in inch\n \n lw = 1.75\n text_box_size = 5 \n props = dict(boxstyle='round', facecolor='white')\n \n for i, (ax, im) in enumerate(zip(grid, ordered_projections)):\n ax.imshow(im, cmap='gray')\n\n for side, spine in ax.spines.items():\n spine.set_color(colors_2D[i])\n spine.set_linewidth(lw)\n\n ax.get_yaxis().set_ticks([])\n ax.get_xaxis().set_ticks([])\n\n text = str(flat_clusters[i])\n ax.text(1, 1, text, va='top', ha='left', bbox=props, size=text_box_size)\n \n newWindow = tk.Toplevel()\n newWindow.grid_rowconfigure(0, weight=1)\n newWindow.grid_columnconfigure(0, weight=1)\n \n #PLOT FRAME\n plotFrame = tk.Frame(newWindow, bg='lightgrey', width=600, height=400)\n plotFrame.grid(row=0, column=0, sticky='nsew')\n \n canvas = FigureCanvasTkAgg(f, plotFrame)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n canvas.figure.tight_layout()\n \n\n #TOOLBAR FRAME\n toolbarFrame = ttk.Frame(newWindow, width=600, height=100)\n toolbarFrame.grid(row=1, column=0, sticky='nsew')\n toolbarFrame.grid_propagate(0)\n \n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)\n toolbar.update()", "def tree(self):\n return self.to_geom()", "def to_cell_coordinates(self):\n self.plotter.to_cell_coordinates(self.ax)\n self.plotter.replot(self.ax)\n self.plotter.cells.draw(self.ax)\n self.x_label.set_text(self.plotter.plot_xlabel)\n self.fig.canvas.draw()", "def __repr__(self: 'BarTree') -> str:\n return 'BarTree({}, {})'.format(repr(self.children[0]), \n repr(self.children[1]))", "def _plot_ftree(self, rows, lines, labels):\n g = Graphics()\n for i in range(len(rows)):\n cur = rows[i]\n for j in range(len(cur)):\n e, f, k = cur[j]\n if e is not None:\n if ZZ(e).is_irreducible():\n c = (1, 0, 0)\n else:\n c = (0, 0, .4)\n g += text(\"$%s$\" % latex(e), (j * 2 - len(cur), -i), rgbcolor=c, **labels)\n if k is not None and f is not None:\n g += line([(j * 2 - len(cur), -i), (k * 2 - len(rows[i - 1]), -i + 1)], axes=False, **lines)\n return g", "def visualize(tree, depth=0):\r\n\r\n if depth == 0:\r\n print('TREE')\r\n\r\n for index, split_criterion in enumerate(tree):\r\n sub_trees = tree[split_criterion]\r\n\r\n # Print the current node: split criterion\r\n print('|\\t' * depth, end='')\r\n print('+-- [SPLIT: x{0} = {1}]'.format(split_criterion[0], split_criterion[1]))\r\n\r\n # Print the children\r\n if type(sub_trees) is dict:\r\n visualize(sub_trees, depth + 1)\r\n else:\r\n print('|\\t' * (depth + 1), end='')\r\n print('+-- [LABEL = {0}]'.format(sub_trees))", "def draw_tree(self, ax: plt.Axes = None, tree_depth: int = None, exclude_empty: bool = False,\n line_width: int = 1, edge_color='red', plot_nodes: bool = False, plot_points: bool = False):\n\n manager = self.root.manager\n manager._finalize_data()\n\n root_quad = self.root\n norm = matplotlib.colors.Normalize(vmin=root_quad.settings['min_depth'], vmax=root_quad.settings['max_depth'])\n cmap = matplotlib.cm.rainbow\n\n if ax is None:\n ax = plt.subplots(figsize=[11, 7], dpi=150)[1]\n\n if tree_depth is None or tree_depth == 0:\n if exclude_empty and not self.index:\n pass\n else:\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n if self.quad_index != -1:\n try:\n idx = self.quad_index[0], self.quad_index[1]\n except:\n idx = self.quad_index\n quad_z = manager.node_data['z'][idx].compute()\n rect = matplotlib.patches.Rectangle(self.mins, *sizes, zorder=2, alpha=0.5, lw=line_width, ec=edge_color, fc=cmap(norm(quad_z)))\n if plot_nodes:\n quad_x = manager.node_data['x'][idx].compute()\n quad_y = manager.node_data['y'][idx].compute()\n ax.scatter(quad_x, quad_y, s=5)\n if plot_points:\n ax.scatter(manager.data['x'][self.index].compute(),\n manager.data['y'][self.index].compute(), s=2)\n else: # no depth for the quad\n rect = matplotlib.patches.Rectangle(self.mins, *sizes, zorder=2, alpha=1, lw=line_width, ec=edge_color, fc='None')\n ax.add_patch(rect)\n\n if tree_depth is None:\n for child in self.children:\n child.draw_tree(ax, tree_depth=None, exclude_empty=exclude_empty, line_width=line_width, edge_color=edge_color, plot_points=plot_points, plot_nodes=plot_nodes)\n elif tree_depth > 0:\n for child in self.children:\n child.draw_tree(ax, tree_depth=tree_depth - 1, exclude_empty=exclude_empty, line_width=line_width, edge_color=edge_color, plot_points=plot_points, plot_nodes=plot_nodes)\n\n if (self.tree_depth == 0) or (tree_depth is None and self.tree_depth == 0):\n xsize = self.maxs[0] - self.mins[0]\n ysize = self.maxs[1] - self.mins[1]\n ax.set_ylim(self.mins[1] - ysize / 10, self.maxs[1] + ysize / 10)\n ax.set_xlim(self.mins[0] - xsize / 10, self.maxs[0] + xsize / 10)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n plt.gcf().colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Depth (+down, meters)')\n\n return ax", "def plot(decisionTree):\n def toString(decisionTree, indent=''):\n if decisionTree.results != None: # leaf node\n return str(decisionTree.results)\n else:\n if isinstance(decisionTree.value, int) or isinstance(decisionTree.value, float):\n decision = 'Column %s: x >= %s?' % (decisionTree.col, decisionTree.value)\n else:\n decision = 'Column %s: x == %s?' % (decisionTree.col, decisionTree.value)\n trueBranch = indent + 'yes -> ' + toString(decisionTree.trueBranch, indent + '\\t\\t')\n falseBranch = indent + 'no -> ' + toString(decisionTree.falseBranch, indent + '\\t\\t')\n return (decision + '\\n' + trueBranch + '\\n' + falseBranch)\n\n print(toString(decisionTree))", "def plotInternalNodes( self ):\n\n \n for node_id in self.mTree.chain.keys():\n\n node = self.mTree.node( node_id )\n if node.succ == []: continue\n \n x = self.mNodeWidthsEnd[node_id]\n y = self.mNodeHeights[node_id]\n\n e = self.mDecoratorInternalNodes.getElements( node_id,\n self.getHeaderWidth() + x,\n self.getHeaderHeight() + y )\n \n self.addElements(e)", "def plot_graph(self) -> None:", "def plot_edges(self, node_list):\n tree = MarkerArray()\n id = 1\n for node in self.node_list:\n if node.parent:\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = id\n id += 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n\n path.color.r = 1.0\n path.color.g = 0.7\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n p1 = Point()\n p1.x = node.parent.x\n p1.y = node.parent.y\n p1.z = 0.02\n path.points.append(p1)\n\n p2 = Point()\n p2.x = node.x\n p2.y = node.y\n p2.z = 0.02\n path.points.append(p2)\n \n tree.markers.append(path)\n\n self.pub_edges.publish(tree)", "def plotInterface(self, background = \"both\", annotate = True,\\\n idx = 0, verbose = 1, align_base = \"cell_1\", scale = False,\\\n save = False, format = \"pdf\", dpi = 100,\\\n col = None, row = None, N = None, handle = False):\n\n if verbose > 0: self.printInterfaces(idx = idx)\n\n if not handle:\n hFig = plt.figure()\n col, row, N = (1, 1, 1)\n hAx = plt.subplot(row, col, N)\n\n lw = 1.4\n\n mat = self.cell_1[idx, :, :]\n top = self.cell_2[idx, :, :]\n rot_1 = 0\n rot_2 = self.ang[idx]\n aDeg = 0\n\n if align_base.lower() == \"cell_1\":\n mat, aRad = ut.align(mat, align_to = [1, 0], verbose = verbose - 1)\n top = ut.rotate(top, aRad, verbose = verbose - 1)\n rot_1 = rot_1 + np.rad2deg(aRad)\n rot_2 = rot_2 + np.rad2deg(aRad)\n elif align_base.lower() == \"both\":\n mat, aRad = ut.align(mat, align_to = [1, 0], verbose = verbose - 1)\n top, bRad = ut.align(top, align_to = [1, 0], verbose = verbose - 1)\n rot_1 = rot_1 + np.rad2deg(aRad)\n rot_2 = rot_2 + np.rad2deg(bRad)\n elif align_base.lower() == \"center\":\n mat, aRad = ut.align(mat, align_to = [1, 0], verbose = verbose - 1)\n top, bRad = ut.center(mat, top, verbose = verbose - 1)\n rot_1 = rot_1 + np.rad2deg(aRad)\n rot_2 = rot_2 + np.rad2deg(bRad)\n\n if background.lower() == \"cell_1\" or background.lower() == \"both\":\n ut.overlayLattice(lat = self.base_1, latRep = self.rep_1[idx, :, :],\\\n rot = rot_1, hAx = hAx, ls = '-')\n\n if background.lower() == \"cell_2\" or background.lower() == \"both\":\n ut.overlayLattice(lat = self.base_2, latRep = self.rep_2[idx, :, :],\\\n rot = rot_2, hAx = hAx, ls = '--')\n\n \"\"\"Origo to a\"\"\"\n vec_ax = np.array([0, mat[0, 0]])\n vec_ay = np.array([0, mat[1, 0]])\n hAx.plot(vec_ax, vec_ay, linewidth = lw, color = 'b', label = r\"$\\vec a_1$\")\n\n \"\"\"Origo to b\"\"\"\n vec_bx = np.array([0, mat[0, 1]])\n vec_by = np.array([0, mat[1, 1]])\n hAx.plot(vec_bx, vec_by, linewidth = lw, color = 'r', label = r\"$\\vec a_2$\")\n\n \"\"\"a to a + b\"\"\"\n vec_abx = np.array([vec_ax[1], vec_ax[1] + vec_bx[1]])\n vec_aby = np.array([vec_ay[1], vec_ay[1] + vec_by[1]])\n hAx.plot(vec_abx, vec_aby, linewidth = lw, color = 'k', ls = '-')\n\n \"\"\"b to b + a\"\"\"\n vec_bax = np.array([vec_bx[1], vec_bx[1] + vec_ax[1]])\n vec_bay = np.array([vec_by[1], vec_by[1] + vec_ay[1]])\n hAx.plot(vec_bax, vec_bay, linewidth = lw, color = 'k', ls = '-')\n\n \"\"\"Annotate with original B lattice vectors, if selected\"\"\"\n if annotate:\n vec_ax_an = np.array([0, top[0, 0]])\n vec_ay_an = np.array([0, top[1, 0]])\n hAx.plot(vec_ax_an, vec_ay_an, linewidth = lw, color = 'b',\\\n label = r\"$\\vec b_1$\", ls = '--')\n\n vec_bx_an = np.array([0, top[0, 1]])\n vec_by_an = np.array([0, top[1, 1]])\n hAx.plot(vec_bx_an, vec_by_an, linewidth = lw, color = 'r',\\\n label = r\"$\\vec b_2$\", ls = '--')\n\n vec_abx_an = np.array([vec_ax_an[1], vec_ax_an[1] + vec_bx_an[1]])\n vec_aby_an = np.array([vec_ay_an[1], vec_ay_an[1] + vec_by_an[1]])\n hAx.plot(vec_abx_an, vec_aby_an, linewidth = 1, color = 'k', ls = '--')\n\n vec_bax_an = np.array([vec_bx_an[1], vec_bx_an[1] + vec_ax_an[1]])\n vec_bay_an = np.array([vec_by_an[1], vec_by_an[1] + vec_ay_an[1]])\n hAx.plot(vec_bax_an, vec_bay_an, linewidth = 1, color = 'k', ls = '--')\n\n if scale:\n box = self.getBox(pad = 3, cell = np.hstack((mat, top)))\n else:\n box = self.getBox(pad = 3, cell = np.hstack((mat, top)), equal = False)\n\n hAx.set_xlim(left = box[0], right = box[1])\n hAx.set_ylim(bottom = box[2], top = box[3])\n\n hAx.set_title(\"Interface %s\" % idx)\n if not handle:\n hAx.set_ylabel(r\"y, ($\\AA$)\")\n hAx.set_xlabel(r\"x, ($\\AA$)\")\n\n hAx.legend(framealpha = 1)\n else:\n if np.isin(N, range(1, row*col + 1, col)):\n hAx.set_ylabel(\"y, ($\\AA$)\")\n if np.isin(N, range((row - 1) * col + 1, row * col + 1)):\n hAx.set_xlabel(\"x, ($\\AA$)\")\n\n if handle: return\n\n plt.tight_layout()\n if save:\n if save is True:\n ut.save_fig(filename = \"interface_%s.%s\" % (idx, format),\\\n format = format, dpi = dpi, verbose = verbose)\n else:\n ut.save_fig(filename = save, format = format, dpi = dpi,\\\n verbose = verbose)\n plt.close()\n else:\n plt.show()", "def visualize_tree(tree, feature_names):\n with open(\"dt.dot\", 'w') as f:\n export_graphviz(tree, out_file=f,\n feature_names=feature_names)\n\n command = [\"dot\", \"-Tpng\", \"dt.dot\", \"-o\", \"dt.png\"]\n try:\n subprocess.check_call(command)\n except:\n exit(\"Could not run dot, ie graphviz, to \"\n \"produce visualization\")", "def mark_treegrid(self):\n self.mark_xml = xml.SubElement(\n self.context.parent, \"g\", \n id=self.context.get_id(self.mark),\n attrib={\"class\": \"toytree-mark-Grid\"},\n )", "def __repr__(self):\n return self.displayTree(0)", "def plot_rna(data_merged, id_cell, framesize=(7, 7), path_output=None,\n ext=\"png\"):\n # TODO Sanity check of the dataframe\n\n # get cloud points\n cyto = data_merged.loc[id_cell, \"pos_cell\"]\n cyto = np.array(cyto)\n rna = data_merged.loc[id_cell, \"RNA_pos\"]\n rna = np.array(rna)\n\n # plot\n plt.figure(figsize=framesize)\n plt.plot(cyto[:, 1], cyto[:, 0], c=\"black\", linewidth=2)\n plt.scatter(rna[:, 1], rna[:, 0], c=\"firebrick\", s=50, marker=\"x\")\n plt.title(\"Cell id: {}\".format(id_cell), fontweight=\"bold\", fontsize=15)\n plt.tight_layout()\n save_plot(path_output, ext)\n plt.show()\n\n return", "def tree():\n nobv.visual_tree()", "def visualize_tree(tree, feature_names, save_dir='./'):\n with open(save_dir+'/'+\"dt.dot\", 'w') as f:\n export_graphviz(tree, out_file=f,\n feature_names=feature_names)\n\n command = [\"dot\", \"-Tpng\", save_dir+\"/dt.dot\", \"-o\", save_dir+\"/dt.png\"]\n try:\n subprocess.check_call(command)\n except:\n exit(\"Could not run dot, ie graphviz, to \"\n \"produce visualization\")", "def plot_mcc_tree():\n t = ete2.Tree(\"mcct.nex\")\n ts = ete2.treeview.TreeStyle()\n ts.show_scale = False\n ts.show_leaf_name = False\n ts.show_branch_support = False\n ts.scale = 500\n margin = 10\n ts.margin_top = margin\n ts.margin_bottom = margin\n ts.margin_left = margin\n ts.margin_right = margin\n\n germ_style = ete2.NodeStyle()\n germ_style[\"bgcolor\"] = \"LightSteelBlue\"\n proto_germ = t.get_common_ancestor(\"Danish\", \"Norwegian\",\"Icelandic\",\"Swedish\", \"Dutch\", \"German\", \"English\")\n proto_germ.set_style(germ_style)\n\n bs_style = ete2.NodeStyle()\n bs_style[\"bgcolor\"] = \"Moccasin\"\n proto_bs = t.get_common_ancestor(\"Bulgarian\", \"Czech\",\"Polish\",\"Russian\")\n proto_bs.set_style(bs_style)\n\n ital_style = ete2.NodeStyle()\n ital_style[\"bgcolor\"] = \"DarkSeaGreen\"\n proto_ital = t.get_common_ancestor(\"French\", \"Romanian\", \"Italian\", \"Portuguese\", \"Spanish\")\n proto_ital.set_style(ital_style)\n\n t.render(\"mcct.eps\", style_func, tree_style=ts, dpi=600, units=\"px\", w=2250)", "def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()" ]
[ "0.68197376", "0.59498745", "0.5782144", "0.5766384", "0.57036364", "0.5702009", "0.5632257", "0.5629917", "0.5601988", "0.5601988", "0.55770046", "0.5539725", "0.55021167", "0.54975194", "0.54721516", "0.5458151", "0.5425423", "0.5405625", "0.5379783", "0.53536665", "0.5349854", "0.5343429", "0.5326852", "0.53179973", "0.5310282", "0.52891123", "0.5283237", "0.5282764", "0.52817523", "0.5281596" ]
0.7746578
0
Prints Query Results For httpErrors Query
def printError(queryResults): print (queryResults[1]) # For loop created for the httpErrors array for results in queryResults[0]: print ( results[0], "-", str(results[1]) + "% errors")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_errors():\r\n conn, cur = connect()\r\n query3 = (\"select * from errors where error >1\")\r\n cur.execute(query3)\r\n res3 = cur.fetchall()\r\n conn.close()\r\n print(\"\\nDays with more than 1% of requests lead to errors:\\n\")\r\n for i in range(0, len(res3), 1):\r\n print(str(res3[i][0]) + \" --> \" + str(round(res3[i][1], 2))+\" %errors\")", "def search_bad_query(error):\n current_app.logger.debug(str(error))\n return render_template(\"search.html\", query=error.query, error=error)", "def search_err(error):\n\n url = \"https://api.stackexchange.com/\" + \"/2.2/search?page=2&order=desc&sort=activity&tagged=python&intitle={}&site=stackoverflow\".format(error)\n \n resp = requests.get(url)\n return resp.json()", "def print_errors():\n\n create_view_total_requests = (\n \"CREATE VIEW total_requests AS \" +\n \"SELECT date(time), count(status) as count \" +\n \"FROM log GROUP BY date;\")\n create_view_error_requests = (\n \"CREATE VIEW error_requests AS \" +\n \"SELECT date(time), count(status) as count \" +\n \"FROM log WHERE status LIKE '404%' GROUP BY date;\")\n calculate_error_percentage = (\n \"SELECT total_requests.date, ROUND(\" +\n \"(CAST(error_requests.count as decimal)/\" +\n \"total_requests.count*100.00),2) as percent \" +\n \"FROM total_requests, error_requests \" +\n \"WHERE total_requests.date=error_requests.date AND \" +\n \"(CAST(error_requests.count as decimal)/\" +\n \"total_requests.count*100.00)>1 ORDER BY percent DESC;\")\n\n print(\"\\nRunning Task: \" + print_errors.__doc__ + \"\\n\")\n\n conn, cur = connect()\n cur.execute(create_view_total_requests)\n cur.execute(create_view_error_requests)\n cur.execute(calculate_error_percentage)\n results = cur.fetchall()\n\n for result in results:\n print('\\t{0:%B %d, %Y} - {1}% errors'.format(result[0], result[1]))\n\n disconnect(conn, cur)", "def print_error_data(error_data):\n\n print('\\nDays when there were more than 1% errors in HTTP :\\n')\n for day in error_data:\n print(str(day[0]) + '\\t-\\t' + str(day[1]) + '% \\n')\n print('-------------------------------------------------------\\n')", "def display_results_for_errors(result):\n i = 0\n for r in result:\n print('\\t'+str(result[i][0])+' ---> '+str(result[i][1])+' %\\n')\n i = i + 1", "def execute_error_query():\n db = psycopg2.connect(database=\"news\")\n c = db.cursor()\n c.execute(error_query)\n content = c.fetchall()\n db.close()\n print(error_query_question)\n for date, error in content:\n print('%s -- %.2f%% errors' % (date.strftime(\"%B %d, %Y\"), error))\n print('\\n')", "def search_invalid_parameters(error):\n current_app.logger.info(str(error))\n return render_template(\"search.html\", query=error.query, error=error), 400", "def get_errorData_query():\n\n query = '''select total_requests.days, errors*100/total_requests as percentage\n from error_requests, total_requests\n where error_requests.days = total_requests.days\n and (errors*100/total_requests > 1);'''\n\n return query", "def print_query_response(response):\n if response.text is not None:\n print(json.loads(response.text))\n else:\n logger.warning('Response not valid.')", "def error_report():\n db, c = connect(DBNAME)\n c.execute(\"select to_char(time,'FMMonth DD, YYYY') as date, \"\n \"round((sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100,2) \"\n \"as percent_error from log group by date \"\n \"having (sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100 > 1\")\n error_table = c.fetchall()\n db.close()\n print \"\\nDates on Which Over 1% of Requests Led to Errors:\"\n for error in error_table:\n if __name__ == '__main__':\n print str(error[0]) + \" - \" + str(error[1]) + \"%\"", "def _filter_return_errors(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def print_requests_httperror(cls, class_name, response):\n print(\n f\"{cls.ERROR_PREFIX} {cls.REQUESTS_PACKAGE_HTTPERROR_MESSAGE} {class_name}/{response.json()['message']}\"\n )", "def print_http_error(error):\n if hasattr(error, \"msg\"):\n print \"%s msg '%s'.\" % (ERROR, error.msg)\n if hasattr(error, \"reason\"):\n print \"%s reason '%s'.\" % (ERROR, error.reason)\n if getattr(error, \"message\"):\n print \"%s message '%s'.\" % (ERROR, error.message)\n if hasattr(error, \"code\"):\n print \"%s error code '%d'.\" % (ERROR, error.code)", "def pp_query(query):\n print(format_query(query))", "def print_response(responses):\n connection_error, empty_results = False, False\n\n # the \"global\" keyword tells python that these variables are defined\n # *outside* our print_response() function\n global query_type\n global verbose\n # you should mostly avoid global variables, but they are sometimes handy\n\n # First, check if we have gotten any errors when connecting to the api\n # enumerate() returns each item in a list along with the item's index\n for index, response in enumerate(responses):\n # an http status code is a number sent from the web server\n # everyone knows the dreaded \"404\" (not found)\n # there is also 200 (ok), 503 (service unavailable), 418 (i'm a teapot -- not joking!)\n # and dozens of others\n if response.status_code != requests.codes.OK:\n connection_error = True\n del responses[index]\n # we also check if the response is empty\n # (that means the api found no words matching our query)\n elif response.json() == []:\n empty_results = True\n del responses[index]\n\n # this is because Windows doesn't understand ANSI color codes >:(\n # e.g. \\033[0;36m means \"turn the text after me blue\" -- but windows is like \"??\"\n # so the colorama library translates the ANSI codes\n colorama_init()\n\n if responses == [] and connection_error == True:\n print(\"\\033[0;36mUnable to reach API.\\033[0m Check your internet connection or try again with more feeling.\")\n sys.exit(1)\n elif responses == [] and empty_results == True:\n # if the user has the BSD 'fortune' program installed, use it\n try:\n fortune = call(['fortune','-s'])\n except FileNotFoundError:\n # otherwise, get a fortune from the web\n fortune = fortune_cookie()\n if fortune:\n print(\"\\033[0;36mNo results found!\\033[0m Have a fortune cookie:\")\n print(fortune)\n else:\n print(\"\\033[0;36mNo results found!\\033[0m Try a paper dictionary instead?\")\n sys.exit(1)\n\n # quick note about JSON before we dive in further\n # json is a method of representing abitrarily complex objects\n # it comes from javascript (JavaScript Object Notation)\n # like most js stuff it is excellently useful and a touch unholy\n # together with xml, yaml, and cvs, it is the commonest way of\n # making text data machine-readable\n # to help you understand, here are some examples of json objects\n #\n # [ {'type': 'noun', 'definition': 'available money; cash.', 'example': None},\n # {'type': 'adjective', 'definition': 'willing or eager to do something.',\n # 'example': 'she is ready to die for her political convictions'} ]\n # a list containing two dictionaries\n # each dictionary contains keys of 'type', 'definition', and 'example'\n #\n # [ {'word': 'ready', 'score': 2147483647, 'tags': ['query'],\n # 'defs': ['n\\tpoised for action', 'v\\tprepare for eating by applying heat'] } ]\n # a list containing one dictionary with keys 'word','score','tags', and 'defs'\n # notice that the value of 'tags' and 'defs' are both lists!\n #\n # [ {'word': 'devil', 'score': 2147483647,\n # 'tags': ['query', 'pron:D EH1 V AH0 L ', 'ipa_pron:dˈɛvʌɫ'] } ]\n # a list containing one dictionary with keys 'word', 'score', and 'tags'\n #\n # [ {'word': 'coleslaw', 'score': 26424, 'tags': ['n']},\n # {'word': 'dressing', 'score': 26424, 'tags': ['n']},\n # {'word': 'greens', 'score': 26424, 'tags': ['n'] } ]\n # you can read this one by yourself :)\n\n if query_type == \"DEF\":\n for response in responses:\n # print out helpful info if the user asked for it\n if verbose > 1: print(response.url) # What we asked the remote server\n if verbose > 2: print(response.text) # The raw return JSON\n # check if this is the datamuse API or the owlbot API\n if re.search(r'datamuse',response.url):\n api = \"datamuse\"\n # the json() function turns the raw response (bytes of data)\n # into python lists, dictionaries, etc (like demonstrated above)\n # we take the first item in the list [0] because a dictionary query\n # only has one entry (the word and its definition)\n payload = response.json()[0]\n word = payload[\"word\"]\n # since 'defs' is a list, let's join it together into a string for printing\n definition = '\\n'.join(payload['defs'])\n lines = []\n for entry in payload['defs']:\n # get the word type and its definition out of the string\n # yes, you can have two (or more!) return values from a function in python\n # groups() returns a tuple of all the capture groups in the regex (see below)\n # notice that _def not def (b/c def is a keyword)\n type,_def = re.match(r'([^\\\\]*)\\t(.*)',entry).groups()\n # put the type and def back into a string :)\n # ljust(11) is left justify by 11 spaces (neat formatted columns!)\n line = f\"{type.ljust(11)} {_def}\"\n # put that line into a list\n lines.append(line)\n # go back up and get another ^\n # now join all the lines together with a new line character (\\n) between them\n definition = '\\n'.join(lines)\n # regex explained: ([^\\\\]*)\\t(.*)\n # () capturing group -- what we find in here, we keep, lol\n # [] character set -- match any of the characters in here\n # [^ ] negation -- do not match any of the characters in here\n # \\\\ *one* literal backslash -- b/c \\ is special in regex \\\\ means \\\n # * the previous thing, zero or more times\n # \\t literal tab character\n # . any character at all ever -- even ones you weren't thinking about when you typed it :D\n # all together: anything which is not a \\, followed by a \\t, followed by anything\n # capture the first bit (type), forget the \\t, caputre the second bit (_def)\n else:\n api = \"owlbot\"\n payload = response.json()\n word = re.search(r'dictionary/(.*)$',response.url).groups()[0]\n # regex explained: $ means \"end of the line\"\n # it's not a character like \\n or \\r\n # it is an anchor (^ means \"start of the line\")\n lines = []\n for entry in payload:\n line = f\"{entry['type'].ljust(11)} {entry['definition']}\"\n # ' ' * 12 means insert 12 spaces\n if entry['example']: line += f\"\\n{' ' * 12}Example:{entry['example']}\"\n lines.append(line)\n definition = '\\n'.join(lines)\n # lots of work, but now we print it! \\o/\n print(f\"\\033[0;36m{api}\\033[0m says word \\033[0;32m{word}\\033[0m means\")\n print(definition)\n if query_type == \"PRO\":\n # print out helpful info if the user asked for it\n if verbose > 1: print(\"The answer came from: \",responses[0].url)\n if verbose > 2: print(\"The raw JSON response was: \",responses[0].text)\n # no for loop and only one response (responses[0])\n # (b/c we use only one API for everything except dictionary lookups)\n payload = responses[0].json()[0]\n word = payload[\"word\"]\n for tag in payload['tags']:\n if re.match(r'pron:',tag):\n pron = re.match(r'pron:(.*)',tag).groups()[0]\n elif re.match(r'ipa_pron:',tag):\n ipa = re.match(r'ipa_pron:(.*)',tag).groups()[0]\n pronunciation = f\"\\033[0;32m{pron}\\033[0m (\\033[0;32m{ipa}\\033[0m)\"\n print(f\"\\033[0;36mdatamuse\\033[0m says word \\033[0;32m{word}\\033[0m is pronounced like {pronunciation}\")\n else:\n # print out helpful info if the user asked for it\n if verbose > 1: print(\"The answer came from: \",responses[0].url)\n if verbose > 2: print(\"The raw JSON response was: \",responses[0].text)\n payload = responses[0].json()\n # this will be fun to explain but. . .\n # 1. go through each entry. if it has tags (a list), turn the list into a string\n for entry in payload:\n entry['tags'] = ', '.join(entry['tags']) if 'tags' in entry else ''\n # 2. create a function which takes one argument (entry -- a dictionary)\n # and returns a formatted string with justification and coloring\n fentry = lambda entry: (f\"\\033[0;32m{entry['word'].rjust(13)}\\033[0m \"\n f\"\\033[0;36m{entry['tags'].rjust(13)}\\033[0m \")\n # 3. for each entry in the payload list, run fentry(entry)*\n # (all the entries are now formatted as strings)\n entries = list(map(fentry, payload))\n # 4. starting at 0, go up to len(entries)-1 in steps of 3 (0,3,6,9. . .)\n # for each step *i*, take a slice of entries from i to i+3\n # join them together\n # this creates a single string containing three list entries\n # store all the strings in a list in the variable lines\n lines = (''.join(entries[i:i+3]) for i in range(0,len(entries),3))\n print(\"\\033[0;36mdatamuse thinks these words may help!\\033[0m\".rjust(94))\n # 5. join the lines together with \\n in between each\n print('\\n'.join(lines))\n\n # * extra note here about map()\n # since you are interested in data stuff :3\n # there's two very common data operations\n # one is \"for every datum, do something to it\"\n # another is \"keep some data, get rid of others\"\n # the first is usually called map\n # the second is called filter\n # python has functions for both of them (helpfully called map() and fliter(), tada!)\n # both take two arguments: a function and a list (or tuple or dictionary)\n # eg. filter(my_function,my_list)\n # with map, the function should take one argument, transform it, and return it\n # eg. def my_function(x):\n # return x + 3\n # (or my_function = lambda x: x + 3)\n # that function adds three but you can do any kind of (very complex) transforms\n # with filter, the function should take one argument, return true if it should be kept,\n # or false if not\n # eg. def my_function(x):\n # if x > 34.99: return True\n # else: return False\n # (or my_function = lambda x: True if x > 34.99 else False )\n #\n # the tricky bit is that neither map() or filter() return your data (huh?)\n # they return iterators\n # what's an iterator, sam?\n # an iterator is like a soda vending machine\n # it has all the cans of pop inside,\n # but you stick your quarters in and get them out one by one\n # for example:\n # >>> lst = [1,2,3,4,5]\n # >>> map(lambda x: x + 3, lst)\n # <map object at 0x7f1c78673b38> <-- this is the iterator\n # . . . and here's the loop that \"iterates\" over it:\n # >>> for item_plus_three in map(lambda x: x + 3, lst):\n # ... print(item_plus_three)\n # ... \n # 4\n # 5\n # 6\n # 7\n # 8", "def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))", "def print_errors(self,result,cause=False,detail=False):\n errors = result.get_errors()\n if errors:\n print ('=== ERRORS '+('='*59))\n for error in errors:\n print (error.id)\n if cause:\n print (' ',error.get_cause())\n if detail:\n for key in (k for k in error.keys() if k not in [Result.START_TIME,\n Result.END_TIME,\n Result.CAUSE]):\n print ('-' * 70)\n print ('%s:' % key)\n print (as_utf8(error[key]))", "def showerrors():\n errorMessages = middleware.ixn.showErrorMessage(silentMode=True)\n if errorMessages:\n print(errorMessages)\n print()", "def _find_errors_in_page(self, response):\n if response.status_code == 403:\n return \"Could not check for errors, as response was a 403 response\\\n forbidden. User asking for this url did not have permission.\"\n \n \n errors = re.search('<ul class=\"errorlist\">(.*)</ul>', \n response.content, \n re.IGNORECASE)\n\n if errors: \n #show a little around the actual error to scan for variables that\n # might have caused it\n span = errors.span()\n wide_start = max(span[0]-200,0)\n wide_end = min(span[1]+200,len(response.content)) \n wide_error = response.content[wide_start:wide_end]\n return wide_error\n \n return \"\"", "def send_query(self, query_str):\n failed_query_message = f\"The following query failed:\\n{query_str}\"\n try:\n response = client.query_workspace(\n workspace_id = ws_id,\n query = query_str,\n timespan = (start_time, end_time)\n )\n if response.status == LogsQueryStatus.PARTIAL:\n self.fail(f\"Got partial response for the following query:\\n{query_str}\")\n elif response.status == LogsQueryStatus.FAILURE:\n self.fail(failed_query_message)\n elif response.tables == None or len(response.tables) == 0:\n self.fail(\"No data tables were returned in the response for the query\")\n else:\n return response\n except HttpResponseError as err:\n self.fail(failed_query_message)", "def test_AlgorithmsHandler_GETMalformedQuery(self):\n response = self.testapp.get('/algorithms/?qqry=algorithm', expect_errors=True)\n self.assertEqual(400, response.status_int)\n self.assertIsNotNone(response.charset)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding=response.charset))\n self.assertEqual('application/json', response.content_type)", "def query3():\n\n print(\"3. On which days did more than 1% of requests lead to errors?\\n\")\n\n query = \"\"\"\n SELECT view_daily_requests.date,\n CAST(view_daily_errors.daily_errors AS REAL) /\n CAST(view_daily_requests.daily_requests AS REAL) AS pc\n FROM view_daily_requests\n JOIN view_daily_errors\n ON view_daily_requests.date = view_daily_errors.date\n WHERE CAST(view_daily_errors.daily_errors AS REAL) /\n CAST(view_daily_requests.daily_requests AS REAL) >= 0.01\n ORDER BY pc DESC;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"pc\" as percentage,\n # format date '31 December 2018'. Print output.\n j = list(j)\n j[0] = j[0].strftime(\"%d %B %Y\")\n j[1] = str(format(j[1], '%'))\n print(\" Date: {} - {} errors\".format(*j))", "def query_error(self):\n return self.details[KEY_QUERY_ERROR]", "def printDaysWithErrors():\n cursor = connection.cursor()\n query = \"\"\"\n SELECT * FROM\n (SELECT daily_error_view.day,\n (daily_error_view.errors * 100.0)\n /\n (daily_traffic_view.views * 100.0)\n AS error_rate\n FROM daily_error_view JOIN daily_traffic_view\n ON daily_error_view.day = daily_traffic_view.day)\n AS daily_error_rate\n WHERE daily_error_rate.error_rate > 0.01;\n \"\"\"\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nDays with greater than 1 percent error rate:\")\n for result in results:\n print(\"{:%B %d, %Y} - {:.2%} errors\".format(result[0], result[1]))", "def days_with_request():\n\n # To print information\n information_string = '3. Days with more than ' \\\n '1% of request that lead to an error:\\n'\n\n # Query string\n query = \"\"\"select * from (select date(time),\n round(100.0*sum(case log.status\n when '200 OK' then 0 else 1 end)/count(log.status),3)\n as error from log group by date(time)\n order by error desc) as subq where error > 1;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t{0:%B %d, %Y} - {1}%'.format(result[0], result[1]))\n\n print(\"\\n\")", "def httperror( status_code=500, message=b'' ):", "def test_results_failed(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser_id\n\n address = \"1215 Brookview Ave, Kettering, Ohio 45409\"\n\n resp = c.get(f\"/results/{address}\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\n '<h1 class=\"mt-3 display-2 text-center\"><b>Maybe try living somewhere closer to civilization, testuser</b></h1>',\n html,\n )\n self.assertIn(\n '<i class=\"fas fa-sad-cry fa-8x amber-text\"></i>', html,\n )", "def error_dates():\n\n results = query_database(QUERIES[2])\n print('\\nOn which days did more than 1% of requests lead to errors?\\n')\n for date, rate in results:\n print(' * {} -- {:.2%}'.format(date, rate))", "def query(output, query):\n gqlapi = gql.get_api()\n print_output(output, gqlapi.query(query))" ]
[ "0.6736217", "0.64593387", "0.632152", "0.63167804", "0.63110185", "0.6229107", "0.6191037", "0.6138806", "0.613624", "0.6113323", "0.6022598", "0.5978065", "0.5972042", "0.5964749", "0.59426546", "0.58535814", "0.5853011", "0.5849017", "0.5840018", "0.5825729", "0.5808999", "0.5800331", "0.5785445", "0.5770634", "0.57492065", "0.57311994", "0.5726179", "0.56583095", "0.56558955", "0.5641752" ]
0.84260356
0
Override process_record with your mapper
def process_record(self, record): raise NotImplementedError('Process record needs to be customized')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mapper(record):\n personA = record[0]\n personB = record[1]\n mr.emit_intermediate(personA, personB)", "def handle_record(self, record):\n raise NotImplementedError", "def Transform(self, record):\n pass", "def transform_record(self, pid, record, links_factory=None, **kwargs):\n context = kwargs.get(\"marshmallow_context\", {})\n context.setdefault(\"pid\", pid)\n context.setdefault(\"record\", record)\n return self.dump(\n self.preprocess_record(pid, record, links_factory=links_factory, **kwargs),\n context,\n )", "def process_field_mapping(self, analysis, observable: Observable, result, result_field, result_time=None) -> None:\n pass", "def emit(self, record):\r\n try:\r\n self.enqueue(self.prepare(record))\r\n except Exception:\r\n self.handleError(record)", "def _do_mapping(self):\n pass", "def parse_record(self, record):\n raise NotImplementedError()", "def emit(self, record):\n pass", "def _get_mapping_record(self):\n return self.__mapping_record", "def _get_map_record(self):\n return self.mapper.map_record(self.binding_record)", "def post_processor(self):", "def process_match_result(self, match):\n raise NotImplementedError()", "def mapped(self, *args, **kwargs): # real signature unknown\r\n pass", "def emit(self, record: LogRecord):\n try:\n self.enqueue(self.prepare(record))\n except Exception:\n self.handleError(record)", "def applyMapping(self):\n pass", "def map(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if happy.flow.isIterable(self.aggkey):\n outkey = ''\n for ak in self.aggkey:\n if record.has_key(ak):\n outkey = outkey + record[ak] + \":\"\n task.collect(outkey, json) \n elif record.has_key(self.aggkey):\n if (record[self.aggkey]):\n task.collect(record[self.aggkey], json)", "def post_instrument_class(self, mapper):\n pass", "def emit(self, record):\n if self.collection is not None:\n try:\n getattr(self.collection, write_method)(self.format(record))\n except Exception:\n if not self.fail_silently:\n self.handleError(record)", "def create_row_processor(self, context, path, reduced_path, \n mapper, row, adapter):\n return None, None, None", "def parsing_processor(self, change):\n self.processor(simplejson.loads(change))", "def parse_records(self, handle, do_features=...): # -> Generator[SeqRecord, None, None]:\n ...", "def mapper(record):\n matrix, row, col, value = record\n if matrix == A_MATRIX:\n # For all A(i,j) emit key (j, k) for k=1 to number of columns in B\n for k in range(0, B_COLS):\n mr.emit_intermediate((row, k), [matrix, col, value])\n else:\n # For all B(j, k) emit key (j, i) for i=1 to number of rows in B\n for i in range(0, A_ROWS):\n mr.emit_intermediate((i, col), [matrix, row, value])", "def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):\n record = hide_confidential_fields(record, hide)\n record = unserialize_fields(record, hide)\n\n convert_float_timestamp2str(record)\n\n return record", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def create_row_processor(self, context, path, reduced_path, mapper, \n row, adapter):\n\n return None, None, None", "def map(query, fh, skip_header_row, default_obj={}):\n\n # First, try the JsonRecordReader; then attempt the csv record reader\n reader = MetaRecordReader(default_obj)\n\n # Hack: append an 'else []' to queries that lack an else clause\n if \" if \" in query and not \" else \" in query:\n query = query + \" else []\"\n\n compiled_query = compile(query, 'STRING', 'eval')\n\n it = iter(fh)\n if skip_header_row:\n next(it)\n\n for line in it:\n obj, env = reader.get_record(line)\n obj_out = eval(compiled_query, env)\n if isinstance(obj_out, list):\n # Lists are treated as flatmap\n yield from obj_out\n else:\n yield obj_out", "def mapLogRecord(self, record):\n newrec = record.__dict__\n for p in self.params:\n newrec[p] = self.params[p]\n maxParamLength = 4000\n # truncate and clean the message from non-UTF-8 characters\n try:\n newrec['msg'] = newrec['msg'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n try:\n newrec['message'] = newrec['message'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n return newrec", "def emit(self, record):\n self.buffer.append(record.__dict__)", "def emit(self, record):\n self.buffer.append(record.__dict__)" ]
[ "0.69456196", "0.6940001", "0.68277967", "0.63850415", "0.6172935", "0.6161542", "0.6121411", "0.61127484", "0.5994616", "0.5944488", "0.5828723", "0.57904565", "0.5777248", "0.5687219", "0.5642455", "0.5636709", "0.5632619", "0.56293315", "0.5624392", "0.5621543", "0.5617855", "0.559401", "0.55388516", "0.55352193", "0.5513791", "0.5495455", "0.5477768", "0.54627496", "0.54607254", "0.54607254" ]
0.7899032
0
Copy of qrencode.encode_scaled The original method does not support the standard 4 unit pixel border
def qrcode_scale(qrcode, pixel_size): version, src_size, im = qrcode qr_image_size = src_size * pixel_size padding_size = 4 * pixel_size final_image_size = (src_size * pixel_size) + (2 * padding_size) new_img = Image.new("L", (final_image_size, final_image_size), 255) new_img.paste(im.resize((qr_image_size, qr_image_size), Image.NEAREST), (padding_size, padding_size)) return (version, final_image_size, new_img)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_scaled(data, size, version=0, level=QR_ECLEVEL_L, hint=QR_MODE_8,\n case_sensitive=True):\n version, src_size, im = encode(data, version, level, hint, case_sensitive)\n if size < src_size:\n size = src_size\n qr_size = (size / src_size) * src_size\n im = im.resize((qr_size, qr_size), Image.NEAREST)\n pad = (size - qr_size) / 2\n ret = Image.new(\"L\", (size, size), 255)\n ret.paste(im, (pad, pad))\n\n return (version, size, ret)", "def scale4x(self) -> 'BaseImage':\n return self.scale2x().scale2x()", "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "def scale_invert(self):", "def scale(self):", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale_image(image=np.zeros((100,100)), \n new_width=100,\n ascii_block_size=(2,3)):\n \n original_width, original_height = image.shape\n aspect_ratio = original_height / float(original_width)\n w,h = ascii_block_size\n new_height = int(h/w * aspect_ratio * new_width)\n\n return skimage.transform.resize(image, (new_width, new_height))", "def scale(input_img, size):\n width, height = size\n old_height, old_width = input_img.shape\n x_scale = float(height) / old_height\n y_scale = float(width) / old_width\n\n output_img = np.zeros((height, width), dtype=np.uint8)\n for xidx in xrange(height):\n old_x = float(xidx) / x_scale\n for yidx in xrange(width):\n old_y = float(yidx) / y_scale\n if old_x.is_integer() or old_y.is_integer():\n output_img[xidx, yidx] = input_img[int(old_x), int(old_y)]\n else: # use bilinear interpolation\n x1 = int(np.floor(old_x))\n x2 = int(np.ceil(old_x)) if int(np.ceil(old_x)) < old_height else old_height - 1\n y1 = int(np.floor(old_y))\n y2 = int(np.ceil(old_y)) if int(np.ceil(old_y)) < old_width else old_width - 1\n\n q11 = input_img[x1, y1]\n q12 = input_img[x1, y2]\n q21 = input_img[x2, y1]\n q22 = input_img[x2, y2]\n\n output_img[xidx, yidx] = (q11 * (x2 - old_x) * (y2 - old_y)\n + q21 * (old_x - x1) * (y2 - old_y)\n + q12 * (x2 - old_x) * (old_y - y1)\n + q22 * (old_x - x1) * (old_y - y1)) \\\n / ((x2 - x1) * (y2 - y1) + 1e-10)\n\n return output_img", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def rescale(mat, nbit=16, minmax=None):\n if nbit != 8 and nbit != 16:\n raise ValueError(\"Only two options for nbit: 8 or 16 !!!\")\n if minmax is None:\n gmin, gmax = np.min(mat), np.max(mat)\n else:\n (gmin, gmax) = minmax\n mat = np.clip(mat, gmin, gmax)\n mat = (mat - gmin) / (gmax - gmin)\n if nbit == 8:\n mat = np.uint8(np.clip(mat * 255, 0, 255))\n else:\n mat = np.uint16(np.clip(mat * 65535, 0, 65535))\n return mat", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def requantize(image, level_num=8):\n M, N = image.shape\n level_space = np.linspace(0, 255, level_num)\n out_img = np.zeros([M, N], dtype='uint8')\n for i in range(M):\n for j in range(N):\n out_img[i, j] = min(level_space, key=lambda x: abs(x - image[i, j]))\n\n return out_img.astype('uint8')", "def _scale_to_mbs_frame(self : \"animation\",\n img : \"np.ndarray\"\n ) -> \"np.ndarray\":\n xnew = img.shape[0] + self._mbs - img.shape[0]%self._mbs\n ynew = img.shape[1] + self._mbs - img.shape[1]%self._mbs\n return (255*resize(img, (xnew, ynew))).astype(np.uint8)", "def scaling(self):\n \n if self.colindex == self.rowsize: # last chart in row\n self.colindex = 0\n self.rowindex += 1 \n xorigin = self.indent + (self.colindex * self.xscale) \n yorigin = self.rowindex * self.yscale\n xscale = self.xscale # to fulfil % formatting below\n yscale = self.yscale \n self.colindex += 1\n\n res = \"origin(%(xorigin)s%%, %(yorigin)s%%), scale(%(xscale)s%%, %(yscale)s%%)\" % locals()\n return res", "def draw_scale(img,scale,width=2,head_enlarge_rate=1,fix_head_size=0):\n scale_img = copy.deepcopy(img)\n for i in range(scale.shape[0]):\n if fix_head_size > 0:\n head_size = fix_head_size\n else:\n head_size = scale[i][2] * head_enlarge_rate\n p1 = (int(round(scale[i][0] - head_size / 2)),\n int(round(scale[i][1] - head_size / 2)))\n p2 = (int(round(scale[i][0] + head_size / 2)),\n int(round(scale[i][1] + head_size / 2)))\n cv2.rectangle(scale_img, p1, p2, (0, 0, 255), width)\n\n return scale_img", "def export_to_scale(\n paper_size: tuple[float, float] = (8.5, 11),\n origin: tuple[float, float] = (0, 0),\n scale: float = 1,\n dpi: int = 300,\n):\n doc = make_doc(offset=(1, 2), size=(6.5, 8))\n msp = doc.modelspace()\n msp.add_mtext(\n f\"scale = 1:{scale}\\n\"\n f\"paper size = {paper_size[0]:.1f} inch x {paper_size[1]:.1f} inch \",\n dxfattribs={\"style\": \"OpenSans\", \"char_height\": 0.25},\n ).set_location(\n (0.2, 0.2), attachment_point=MTextEntityAlignment.BOTTOM_LEFT\n )\n\n ctx = RenderContext(doc)\n fig: plt.Figure = plt.figure(dpi=dpi)\n ax: plt.Axes = fig.add_axes([0, 0, 1, 1])\n\n # disable all margins\n ax.margins(0)\n\n # get the final render limits in drawing units:\n min_x, min_y, max_x, max_y = render_limits(\n origin, paper_size, scale\n )\n\n ax.set_xlim(min_x, max_x)\n ax.set_ylim(min_y, max_y)\n\n out = MatplotlibBackend(ax)\n # finalizing invokes auto-scaling by default!\n Frontend(ctx, out).draw_layout(msp, finalize=False)\n\n # set output size in inches:\n fig.set_size_inches(paper_size[0], paper_size[1], forward=True)\n\n fig.savefig(CWD / f\"image_scale_1_{scale}.pdf\", dpi=dpi)\n plt.close(fig)", "def rescale_toa(arr, dtype=np.float32):\n # First look at raw value dists along bands\n\n arr_trans = np.subtract(arr, arr.min(axis=(1, 2))[:, np.newaxis, np.newaxis])\n arr_rs = np.divide(arr_trans, arr_trans.max(axis=(1, 2))[:, np.newaxis, np.newaxis])\n if dtype == np.uint8:\n arr_rs = np.array(arr_rs*255, dtype=np.uint8)\n return arr_rs", "def scaleClipl(x):\n x = 0 if x < 0 else x\n x = 1 if x > 1 else x\n return int(round(x*255.))", "def ConvertToScaledBitmap(self, size, window=None):\n size = wx.Size(*size)\n if window:\n size.width = int(size.width * window.GetContentScaleFactor())\n size.height = int(size.height * window.GetContentScaleFactor())\n\n # We can only have one overall scale factor for both dimensions with\n # this rasterization method, so chose either the minimum of width or\n # height to help ensure it fits both ways within the specified size.\n sx = size.width / self.width\n sy = size.height / self.height\n scale = min(sx, sy)\n return self.ConvertToBitmap(scale=scale, width=size.width, height=size.height)", "def test_replace_namespaced_scale_scale(self):\n pass", "def __resize_512p(input_data):\n rate = 1\n test_size=512\n if input_data['img'].shape[0] > input_data['img'].shape[1]:\n if True: # input_data['img'].shape[1] < 512:\n rate = test_size / input_data['img'].shape[1]\n seq = iaa.Sequential([\n iaa.Scale({'height': \"keep-aspect-ratio\", 'width': test_size}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n else:\n if True: # input_data['img'].shape[0] < 512:\n rate = test_size / input_data['img'].shape[0]\n seq = iaa.Sequential([\n iaa.Scale({'height': test_size, 'width': \"keep-aspect-ratio\"}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n\n if DataAugmentor._is_synthtext(input_data):\n input_data['contour'] = [[np.cast['int32'](contour * rate) for contour in contours] for contours in\n input_data['contour']]\n else:\n input_data['contour'] = [np.cast['int32'](contour * rate) for contour in input_data['contour']]\n input_data['center_point'] = [(np.cast['int32'](point[0] * rate),\n np.cast['int32'](point[1] * rate)) for point in input_data['center_point']]\n return input_data", "def encode(self, data, scaling=True):\n\n encoded = encode_data(data,\n self._codec['datas'],\n self._codec['formats'],\n scaling)\n encoded |= (0x80 << (8 * self._length))\n encoded = hex(encoded)[4:].rstrip('L')\n\n return binascii.unhexlify(encoded)[:self._length]", "def normalize(image):\r\n return image / 127.5 - 1.", "def encode_dim(self):\n raise NotImplementedError", "def setSurfaceColorScale(low,high):\n dislin.zscale(low,high)", "def scaled_raw_noise_4d(loBound, hiBound, x, y, z, w):\n return raw_noise_4d(x, y, z, w)*(hiBound - loBound)/2 + (hiBound + loBound)/2", "def pixel_scale(self):\n return np.abs(float(self.header[\"CDELT1\"]))", "def test_patch_namespaced_scale_scale(self):\n pass", "def scale_pos(q_unsc):\n q_sc = (q_unsc - 0.125)/0.125\n return q_sc", "def convertToSpectroGram(self):" ]
[ "0.6628178", "0.5901082", "0.5880577", "0.5856074", "0.58090234", "0.56154186", "0.5561109", "0.5520228", "0.55200243", "0.54664236", "0.54620045", "0.544472", "0.539391", "0.53541255", "0.53466356", "0.53360176", "0.5328329", "0.5323346", "0.52971715", "0.52537584", "0.5236238", "0.522988", "0.5210341", "0.520378", "0.5197487", "0.51958954", "0.5176812", "0.5158592", "0.51511985", "0.5149774" ]
0.59873855
1
Iterate through all the documents.
def iter_documents(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n for this_document in self.documents:\n yield this_document", "def __iter__(self):\n for document in self.query:\n yield self._to_document(document)", "def __iter__(self):\n return self.iter_documents()", "def __iter__(self):\n return self.docs.__iter__()", "def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]", "def __iter__(self):\n for hit in self._evaluate()['hits']['hits']:\n yield self._to_document(hit)", "def __iter__(self):\n if self.documents is None:\n documents = []\n self.corpus = self.corpus.reset_index()\n for index, row in self.corpus.iterrows():\n tokens = self.tokenizer.tokenize(to_unicode(row['body']))\n documents.append(TaggedDocument(self.transformer(tokens), [index, row['stock'], row['doc_tag']]))\n self.documents = documents\n\n return self.documents.__iter__()", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "async def iterate_all(\n self,\n *,\n projection: Optional[Dict[str, Any]] = DEFAULT_PROJECTION,\n limit: int = DEFAULT_LIMIT,\n offset: int = DEFAULT_OFFSET,\n session: Optional[Any] = DEFAULT_SESSION,\n **kwargs: Any,\n ) -> AsyncGenerator[Dict[str, Any], None]:\n async for document in self._database.iterate_all(\n self.name,\n projection=projection,\n limit=limit,\n offset=offset,\n session=session,\n **kwargs,\n ):\n yield document", "def iter_docids(self):\n return iter(self.client.smembers(self.dbprefix + 'docs'))", "def action_gen():\n for n, doc in enumerate(cursor):\n # print fields\n did = doc.pop('_id')\n if doc == {}:\n print \"Empty document, skipping\"\n continue\n op_dict = {\n '_index': db.lower(),\n '_type': collection,\n '_id': int('0x' + str(did), 16),\n '_source': doc\n }\n #op_dict['doc'] = doc\n yield op_dict", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def es_iterate_all_documents(es, index, pagesize=250, scroll_timeout=\"3m\", **kwargs):\n is_first = True\n while True:\n # Scroll next\n if is_first: # Initialize scroll\n result = es.search(index=index, scroll=\"1m\", **kwargs, body={\n \"size\": pagesize\n })\n is_first = False\n else:\n result = es.scroll(body={\n \"scroll_id\": scroll_id,\n \"scroll\": scroll_timeout\n })\n scroll_id = result[\"_scroll_id\"]\n hits = result[\"hits\"][\"hits\"]\n # Stop after no more docs\n if not hits:\n break\n # Yield each entry\n yield from (hit['_source'] for hit in hits)", "def __iter__(self):\n for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)", "def indexable_objects_iter(docs, es_index, es_doctype):\n\n for doc in docs:\n insert_doc = {\n '_index': es_index,\n '_type': es_doctype,\n '_id': \"%s-%s\" % (doc['state'], doc['id']),\n '_source': doc\n }\n yield insert_doc", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def clean_all_documents(cls):\n for index, text in enumerate(cls.documents):\n text_processed = cls.clean_document(text)\n cls.processed_documents.append(text_processed)", "def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))", "def __iter__(self):\n for p in self.paths:\n yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)", "def __iter__(self):\r\n doc_reader = self.searcher.doc_reader\r\n for docnum in self.scored_list:\r\n yield doc_reader[docnum]", "def iterate_over_all_documents_in_collection(session, collection: str, document_ids=None, consider_tag=False,\n consider_sections=False, consider_classification=False):\n if not collection:\n raise ValueError(\"Document collection must be specified and cannot be None\")\n\n doc_query = session.query(Document)\n doc_query = doc_query.filter(Document.collection == collection)\n\n if document_ids:\n document_ids = sorted(list(document_ids))\n doc_query = doc_query.filter(Document.id.in_(document_ids))\n\n doc_query = doc_query.order_by(Document.id)\n doc_query = doc_query.yield_per(BULK_QUERY_CURSOR_COUNT_DEFAULT)\n\n if consider_tag:\n tag_query = session.query(Tag)\n tag_query = tag_query.filter(Tag.document_collection == collection)\n\n if document_ids:\n tag_query = tag_query.filter(Tag.document_id.in_(document_ids))\n\n tag_query = tag_query.order_by(Tag.document_id)\n tag_query = tag_query.yield_per(BULK_QUERY_CURSOR_COUNT_DEFAULT)\n tag_query = iter(tag_query)\n current_tag = next(tag_query, None)\n\n if consider_classification:\n class_query = session.query(DocumentClassification)\n class_query = class_query.filter(DocumentClassification.document_collection == collection)\n\n if document_ids:\n class_query = class_query.filter(DocumentClassification.document_id.in_(document_ids))\n\n class_query = class_query.order_by(DocumentClassification.document_id)\n class_query = class_query.yield_per(BULK_QUERY_CURSOR_COUNT_DEFAULT)\n\n class_query = iter(class_query)\n current_class = next(class_query, None)\n\n if consider_sections:\n sec_query = session.query(DocumentSection)\n sec_query = sec_query.filter(DocumentSection.document_collection == collection)\n\n if document_ids:\n sec_query = sec_query.filter(DocumentSection.document_id.in_(document_ids))\n\n sec_query = sec_query.order_by(DocumentSection.document_id)\n sec_query = sec_query.yield_per(BULK_QUERY_CURSOR_COUNT_DEFAULT)\n sec_query = iter(sec_query)\n current_sec = next(sec_query, None)\n\n for res in doc_query:\n t_doc = TaggedDocument(id=res.id, title=res.title,\n abstract=res.abstract)\n\n if consider_tag:\n while current_tag and t_doc.id == current_tag.document_id:\n t_doc.tags.append(TaggedEntity(document=current_tag.document_id,\n start=current_tag.start,\n end=current_tag.end,\n ent_id=current_tag.ent_id,\n ent_type=current_tag.ent_type,\n text=current_tag.ent_str))\n current_tag = next(tag_query, None)\n\n if consider_classification:\n while current_class and t_doc.id == current_class.document_id:\n t_doc.classification.update({current_class.classification: current_class.explanation})\n current_class = next(class_query, None)\n\n if consider_sections:\n while current_sec and t_doc.id == current_sec.document_id:\n t_doc.sections.append(\n kgextractiontoolbox.document.document.DocumentSection(position=current_sec.position,\n title=current_sec.title,\n text=current_sec.text))\n current_sec = next(sec_query, None)\n\n t_doc.remove_duplicates_and_sort_tags()\n yield t_doc", "def ingest_all(self, docs):\n for doc in docs:\n self.ingest(doc)", "def iterate(self):\n raise NotImplementedError()", "def iterate(self):", "def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def process_all_documents(self,\n n_jobs: Optional[int] = None,\n ) -> List[Optional[Document]]:\n return self.process_documents(self.doc_ids, n_jobs)", "def getDocumentAll(self, query = {}, keys = []):\n query = query or {}\n if \"include_docs\" not in query:\n query[\"include_docs\"] = True\n\n if not keys:\n return self.client.get(self.name +\"/_all_docs\", query)\n else:\n return self.client.post(self.name +\"/_all_docs\", query,\n {\"keys\": keys}).getBodyData()", "def documents(self):\r\n return doc.Documents(self)" ]
[ "0.756131", "0.7519196", "0.73756146", "0.7028327", "0.6934227", "0.67785066", "0.6757482", "0.6694352", "0.6692733", "0.664955", "0.6616363", "0.65842587", "0.65538126", "0.6528225", "0.6503124", "0.6484031", "0.64438236", "0.640956", "0.64088947", "0.64065766", "0.6398948", "0.63646215", "0.63438964", "0.6316858", "0.62937003", "0.62927383", "0.6287122", "0.62840074", "0.6275239", "0.62326306" ]
0.8500158
0
Create a basic search for the value supplied. If fieldname is also supplied, search only for the value in that field.
def query(self, value, fieldname=None, *args, **kwargs): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(field='name', method=IS, value=None):\n def searchPred(db):\n return Result((k, v) for k, v in db.items()\n if field in v and method(v[field], value))\n\n return searchPred", "def set_search_filter(\n query: BaseQuery,\n obj_model: Model,\n search_field: str = None,\n search_value=None,\n *args,\n **kwargs,\n) -> BaseQuery:\n if search_field is not None and search_value is not None:\n column = next(\n (c for c in inspect(obj_model).columns if c.name == search_field), None\n )\n if column is not None:\n query = query.filter(column.like(f\"%{search_value}%\"))\n\n return query", "def search(self, value):\n pass", "def search_field(self, field, query, index=None, doc_type=None):\r\n return self.search({\r\n 'query': {\r\n 'fuzzy_like_this_field': {\r\n field: {\r\n 'like_text': query\r\n ,'max_query_terms': 250\r\n }\r\n }\r\n }\r\n }, index=index, doc_type=doc_type, size=25)", "def test_search_in_field(\n self,\n es_with_collector,\n model_field,\n model_value,\n search_term,\n match_found,\n ):\n CompanyFactory()\n CompanyFactory(\n **{\n 'name': 'test_company',\n model_field: model_value,\n },\n )\n es_with_collector.flush_and_refresh()\n\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': search_term,\n 'entity': 'company',\n },\n )\n\n assert response.status_code == status.HTTP_200_OK\n if match_found:\n assert response.data['count'] == 1\n assert response.data['results'][0]['name'] == 'test_company'\n else:\n assert response.data['count'] == 0", "def search_for(self, indexname, field, term, outfield=None):\n url = \"{url_home}/{index}/{function}\".format(url_home=self.url_elastic, index=indexname, function=\"_search\")\n match = {field: dict(query=term)}\n data = dict(\n query=match\n )\n res = rw.get(url, headers=self.headers, data=json.dumps(data))\n return res", "def abstract_search(self, model, params):\n domain = []\n\n for key, value in params.items():\n self.check_field_existence(model, key)\n\n # we change the operator according to the field type or name\n if key == 'name':\n domain.append((key, 'ilike', value))\n elif type(value) is list:\n domain.append((key, 'in', value))\n elif key == 'active' and value == False:\n domain.append((key, '!=', True))\n else:\n domain.append((key, '=', value))\n\n return self.env[model].sudo().search(domain)", "def search(self,\n collection_name: str,\n vector: List,\n field: List,\n\tfilters: List=[],\n approx: int = 0,\n sum_fields: bool = True,\n metric: str = \"cosine\",\n min_score=None,\n page: int = 1,\n page_size: int = 10,\n include_vector:bool=False,\n include_count:bool=True,\n asc:bool=False,\n **kwargs\n ):\n\n search_fields ={}\n if isinstance(field, str):\n advanced_search_query = {\n field.replace('_vector_', ''): {'vector': vector, 'fields': [field]}\n }\n else:\n advanced_search_query = {\n field[0].replace('_vector_', ''): {'vector': vector, 'fields': field}\n }\n return self.advanced_search(\n collection_name=collection_name,\n multivector_query=advanced_search_query,\n\t filters=filters,\n approx=approx,\n sum_fields=sum_fields,\n metric=metric,\n min_score=min_score,\n page=page,\n page_size=page_size,\n include_vector=include_vector,\n include_count=include_count,\n asc=asc,\n **kwargs\n )", "def build_field_query(fields_to_search, search):\n query = Q()\n for field in fields_to_search:\n query |= Q(**{\"{0}__icontains\".format(field): search})\n return query", "def find_tasks_by_field(field, field_value):\n if field == 'date':\n tasks = Task.select().where(Task.date == field_value)\n return tasks\n elif field == 'employee_name':\n tasks = Task.select().where(Task.employee_name == field_value)\n return tasks\n elif field == 'time_spent':\n tasks = Task.select().where(Task.time_spent == field_value)\n return tasks\n elif field == 'search_str':\n tasks = Task.select().where((Task.title.contains(field_value)) |\n (Task.notes.contains(field_value)))\n return tasks", "def get_doc_by_keyword(self,collection,field_name,search_key,like=True):\n if like:\n # This finds the records in which the field just \"contains\" the search_key\n res = self.client['rephie'][collection].find(({field_name : {'$regex' : \".*\"+search_key+\".*\"}}))\n else:\n # This finds the records in which the field is equal to the search_key\n res = self.client['rephie'][collection].find({field_name : search_key})\n\n return self._make_result_list(res)", "def search(self, term):", "def searchByField(database):\n field=str(input(\"What is his field name :\"))\n usrs,find=getByField(database,field)\n for usr in usrs:\n print(usr)", "def search(self, find_val):\n return False", "def query(self, fieldname, value, *args, **kwargs):\n qg = self.schema.query_generator(fieldname)\n return qg(value, *args, **kwargs).connect(self)", "def simple_search(self, pattern):\n query = Q()\n for ptn in pattern.split():\n for field in SEARCH_FIELDS:\n query |= Q(**{'%s__icontains' % field: ptn})\n return self.get_queryset().filter(query)", "def search(query_string):", "def search(self, *args, **kwargs):\n # comparison = f\"__{kwargs.get('comparison')}__\" if kwargs.get('comparison') else '__eq__'\n comparison = '__{comparison}__'.format(comparison=kwargs.get('comparison')) if kwargs.get('comparison') else '__eq__'\n try:\n key, value = args[0], args[1]\n except IndexError:\n for key in kwargs.keys():\n if '__' in key:\n # comparison = f'__{key.split(\"__\")[1]}__'\n comparison = '__{comparison}__'.format(comparison=key.split(\"__\")[1])\n key, value = key.split(\"__\")[0], kwargs[key]\n return SearchableList(list(filter(lambda x: try_compare(x, key, comparison, value), self)))", "def test_search_table_with_field(self) -> None:\n responses.add(responses.GET, local_app.config['SEARCHSERVICE_BASE'] + SEARCH_ENDPOINT,\n json={}, status=HTTPStatus.OK)\n\n with local_app.test_client() as test:\n response = test.get('/api/search/field/'\n 'tag_names/field_val/test', query_string=dict(query_term='test',\n page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.OK)", "def give_field(self,\r\n fieldname):\r\n\r\n return [a_temp for a_temp in self.default_dict['field']\r\n if self.default_dict['field'][a_temp] == fieldname]", "def netsuite_search(\n self,\n type_name: str,\n search_value: str,\n operator: str = \"contains\",\n page_size: int = 5,\n ) -> PaginatedSearch:\n # pylint: disable=E1101\n record_type_search_field = self.client.SearchStringField(\n searchValue=search_value, operator=operator\n )\n basic_search = self.client.basic_search_factory(\n type_name, recordType=record_type_search_field\n )\n paginated_search = PaginatedSearch(\n client=self.client,\n type_name=type_name,\n basic_search=basic_search,\n pageSize=page_size,\n )\n return paginated_search", "def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):\n if context is None: \n context={}\n ids= []\n if len(name) >= 2:\n ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)\n if not ids:\n ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)\n return self.name_get(cr,uid,ids,context=context)", "def full_search(pw, *arg, **kw):\n return pw.search(*arg, **kw)", "def search_with_filters(self,\n collection_name: str,\n vector: List,\n field: List,\n filters: List=[],\n approx: int = 0,\n sum_fields: bool = True,\n metric: str = \"cosine\",\n min_score=None,\n page: int = 1,\n page_size: int = 10,\n include_vector:bool=False,\n include_count:bool=True,\n asc:bool=False,\n **kwargs\n ):\n search_fields ={}\n if isinstance(field, str):\n advanced_search_query = {\n field.replace('_vector_', ''): {'vector': vector, 'fields': [field]}\n }\n else:\n advanced_search_query = {\n field[0].replace('_vector_', ''): {'vector': vector, 'fields': field}\n }\n return self.advanced_search(\n collection_name=collection_name,\n multivector_query=advanced_search_query,\n approx=approx,\n sum_fields=sum_fields,\n filters=filters,\n metric=metric,\n min_score=min_score,\n page=page,\n page_size=page_size,\n include_vector=include_vector,\n include_count=include_count,\n asc=asc,\n **kwargs\n )", "def search(self, name=None, first_name=None, last_name=None, email=None,\r\n phone=None, company=None, twitter=None, labels=None,\r\n case_id=None, subject=None, description=None,\r\n status=None, priority=None, assigned_group=None,\r\n assigned_user=None, channels=None, notes=None, attachments=None,\r\n created=None, updated=None, since_created_at=None,\r\n max_created_at=None, since_updated_at=None, max_updated_at=None,\r\n since_id=None, max_id=None, per_page=None, page=None,\r\n embed=None, fields=None, **case_custom_fields):\r\n store = locals()\r\n store.update(store.pop('case_custom_fields'))\r\n\r\n params = base.get_params(None, store)\r\n url = '{0}/{1}'.format(self.get_url(), 'search')\r\n return http.Request('GET', url, params), parsers.parse_json", "def search(self, *args, **kwargs):", "def get_field_schema(self, value, schemas, **kwargs):\n keys = kwargs.get(\"keys\", GET_SCHEMA_KEYS)\n search = value.lower().strip()\n\n for schema in schemas:\n if not schema.get(\"selectable\"):\n continue\n\n for key in keys:\n if search == schema[key].lower():\n return schema\n\n msg = \"No field found where any of {} equals {!r}, valid fields: \\n{}\"\n msg = msg.format(keys, value, \"\\n\".join(self._prettify_schemas(schemas=schemas)))\n raise NotFoundError(msg)", "def make_filtered_field(ds, fieldname, filter_fields = [], tolerance = tol):\n def _filtered_field(field, data):\n x = data[('gas',fieldname)]\n\n select = data[filter_fields[0]] < 0\n for f in filter_fields:\n select = select + (data[f] < tolerance)\n x[select] = np.nan\n\n return x\n\n ds.add_field(('gas',fieldname + '_filtered'), function = _filtered_field, units = \"\")\n return", "def search(self, **kwargs):\n return keyword_search(self._rq_list, **kwargs)", "def searchRecords(self, filterChoice, keyword):\r\n session = wx.GetApp().session\r\n model = getattr(db, self.modelName)\r\n\r\n result = None\r\n if filterChoice == \"Person\":\r\n qry = session.query(model)\r\n logging.debug(qry)\r\n result = qry.filter(db.Person.full_name.contains('%s' % keyword))\r\n\r\n result = result.all()\r\n\r\n logging.debug(result)\r\n return result" ]
[ "0.666969", "0.64892364", "0.6434192", "0.6009932", "0.5974208", "0.5703867", "0.56232536", "0.56202215", "0.5617161", "0.5593979", "0.55536413", "0.552709", "0.5507973", "0.5505467", "0.54794776", "0.5471659", "0.5467763", "0.5461998", "0.5449711", "0.54352003", "0.542504", "0.54006326", "0.53983676", "0.5389576", "0.53875995", "0.5352595", "0.53460413", "0.532529", "0.53105676", "0.5303212" ]
0.66154534
1
Perform a search. The search should be an instance of multisearch.queries.Search.
def search(self, search): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(self, query):", "def search(self, search_params):\n\n search_params = search_params._get_params() if isinstance(search_params, SearchParams) else search_params\n\n r = self._create_operation_request(self._url_full, \"search\", method=\"POST\", data=search_params)\n\n return send_session_request(self._session, r).json()", "def search(self, search_params):\n if self.db.is_data_set():\n return self.db.search(search_params)\n else:\n self.crawler.initialize()\n # return self.db.search(search_params)", "async def search(self, *args, **kwargs):\n pass", "def _search(self, query):\n return self._request(query)", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def search(self, *args, **kwargs):", "def search(self, q, *args, **kwargs):\n\t\treturn self.__model.objects.search(q, *args, **kwargs)", "def search(self, query):\n logger.debug('Performing search for: '+query)\n write_textfield('queryString', query+\"\\n\", check=False)\n self.waitForLoaderToDisappear()", "def do_search(self, **criteria):\n return self.app.get(url(controller='dex_search',\n action='move_search',\n **criteria))", "def __search(self):\n query = self.__query.query()\n self.__engine.search(query)", "def search(self, **kwargs):\n return keyword_search(self._rq_list, **kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def Search(self, request, global_params=None):\n config = self.GetMethodConfig('Search')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Search(self, request, global_params=None):\n config = self.GetMethodConfig('Search')\n return self._RunMethod(\n config, request, global_params=global_params)", "def search(self, query=None):\n\n self.visual.log(\"Starting search\")\n if self.search_invoke_counter > 0:\n # step to the starting history to search everything\n self.reset_history()\n search_done = False\n just_began_search = True\n query_supplied = bool(query)\n\n ttr = TimedThreadRunner(self.search_for_entry, \"\")\n # ttr.set_delay(1, self.visual.log, \"delaying search execution...\")\n\n while True:\n # get new search object, if it's a continued search OR no pre-given query\n if not just_began_search or (just_began_search and not query_supplied):\n search_done, new_query = self.visual.receive_search()\n self.visual.log(\"Got: [{}] [{}]\".format(search_done, new_query))\n if search_done is None:\n # pressed ESC\n self.visual.message(\"Aborting search\")\n return\n if new_query == \"\" and search_done:\n # pressed enter\n self.visual.message(\"Concluded search\")\n break\n # got an actual query item\n # if query content is updated, reset the timer\n query = new_query\n\n query = query.lower().strip()\n # ttr.reset_time(query)\n # self.visual.log(\"Got query: {}\".format(query))\n # ttr.update_args(query)\n # ttr.start()\n # ttr.stop()\n # results_ids = ttr.get_result()\n results_ids = self.search_for_entry(query)\n # results_ids = []\n just_began_search = False\n self.search_invoke_counter += 1\n if not self.visual.does_incremental_search:\n break\n\n if not query:\n # no search was performed\n return\n # push the reflist modification to history\n self.change_history(results_ids, \"search:\\\"{}\\\"\".format(query))", "def search(self, query, **kwargs):\n query = dict(query)\n if self._query:\n query = {'$and': [self._query, query]}\n cat = type(self)(\n metadatastore_db=self._metadatastore_db,\n asset_registry_db=self._asset_registry_db,\n query=query,\n find_kwargs=kwargs,\n handler_registry=self._handler_registry,\n transforms=self._transforms,\n root_map=self._root_map,\n filler_class=self._filler_class,\n name='search results',\n getenv=self.getenv,\n getshell=self.getshell,\n auth=self.auth,\n metadata=(self.metadata or {}).copy(),\n storage_options=self.storage_options)\n return cat", "def search(self, query_id, query_str):\n pass", "def search(self, query):\n return self._search_provider.search(self._normalize_query(query))", "def search(self, **kwargs):\n clone = self._clone()\n\n if 'q' in kwargs:\n clone.q = kwargs['q']\n del kwargs['q']\n clone.params.update(kwargs)\n\n return clone.solr.search(q=clone.q, **clone.params)", "def search():\n query = input('Please enter your search query\\n')\n # For now, we will just print the whole database\n #db_actions.display()\n db_actions.search(query)", "def search(\n self,\n query,\n fields=None,\n operator=None,\n order_by_relevance=True,\n partial_match=None, # RemovedInWagtail60Warning\n backend=\"default\",\n ):\n search_backend = get_search_backend(backend)\n return search_backend.search(\n query,\n self,\n fields=fields,\n operator=operator,\n order_by_relevance=order_by_relevance,\n partial_match=partial_match, # RemovedInWagtail60Warning\n )", "def search(self, q):\n self.__query = q\n self.scrape_page()", "def search(self, query, maxhits=100):" ]
[ "0.726705", "0.72350997", "0.7127986", "0.71041954", "0.69433963", "0.69397825", "0.6918522", "0.68597406", "0.6766558", "0.6742335", "0.67419297", "0.6725602", "0.67140645", "0.67140645", "0.67140645", "0.67140645", "0.67140645", "0.67140645", "0.67140645", "0.6712671", "0.6712671", "0.66915", "0.66772974", "0.66678315", "0.6662745", "0.663146", "0.6608676", "0.6605863", "0.66050684", "0.6581347" ]
0.747983
0
Create a runner for a script with the contents from a string. Useful for testing short scripts that fit comfortably as an inline string in the test itself, without having to create a separate file for it.
def script_from_string(self, script: str) -> LocalScriptRunner: hasher = hashlib.md5(bytes(script, "utf-8")) script_name = hasher.hexdigest() path = pathlib.Path(self.tmp_script_dir.name, script_name) aligned_script = textwrap.dedent(script) path.write_text(aligned_script) return LocalScriptRunner(str(path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runner():\n return CliRunner()", "def script_from_filename(self, script_path: str) -> LocalScriptRunner:\n return LocalScriptRunner(str(self.dir_path / script_path))", "def runner() -> CliRunner:\n return CliRunner()", "def runner() -> CliRunner:\n return CliRunner()", "def ez_run(cls, program_string):\n res = Cpu('test')\n res.load(program_string.split('\\n'))\n res.run()\n return res", "def fixture_runner():\n return CliRunner()", "def runfile(self, s):\n return self.shell.ex(load_wrap(s, attach=False))", "def make_eval(s, filename=\"unknown\"):\n return _Eval(s.strip(), filename)", "def test_putStrin_with_Stringlit2(self):\n input = \"\"\"\n \n void main () {\n putStringLn(\"Tam nay thi ket thuc testcase 100 cho roi\");\n }\n \"\"\"\n expect = \"Tam nay thi ket thuc testcase 100 cho roi\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,600))", "def load_and_run(runner):\r\n\r\n #**************************************************************************************************************************\r\n #Cache commands to be executed.\r\n #If using -object spoofs, -enable option needs to be added as either a standalone command or an option in another command.\r\n #Note: -enable is not required for named spoofs (-id).\r\n #Example command: runner.AddCommand('spoof_main \"-enable -object CHwOrders -data acRPM -value 0.33 -enable\"\\n','->',False)\r\n #Templates:\r\n #runner.AddCommand('spoof_main \"-enable\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-id -var1 -var2 \"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-enable -object -data -value \"\\n','->',False)\r\n #runner.AddCommand('<enter any telnet command here>\\n','->',False)\r\n #**************************************************************************************************************************\r\n\r\n\r\n\r\n #**************************************************************************************************************************\r\n #Execute cached commands, then clear cached commands to get runner ready for next set of commands.\r\n #Optional pause here with additional details if necessary (i.e. instructions, timing, etc.).\r\n #The raw_input method will display the string then the operator will have to press ENTER to proceed.\r\n #raw_input(\"<Prompt to present to operator>\");\r\n #**************************************************************************************************************************\r\n runner.AddCommand(\"Log_Test_Info(\\\"Running spoof script \" + scriptName + \"\\\")\\n\", \"->\", False)\r\n runner.Run()\r\n runner.ResetCommands()\r\n \r\n \r\n runner.AddCommand('spoof_main\"-object CHwStates -data red -value 995\"\\n','->',False)\r\n runner.AddCommand('spoof_main\"-object CHwStates -data green -value 950\"\\n','->',False)\r\n runner.AddCommand('spoof_main\"-object ProcRun_IntegratedPlateletYield -data data -value 400000000000\"\\n','->',False)\r\n runner.AddCommand('spoof_main\"-enable\"\\n','->',False)\r\n runner.Run()\r\n runner.ResetCommands()\r\n\r\n\r\n #**************************************************************************************************************************\r\n #(Optional) Pause here with additional details if necessary (i.e. instructions, timing, etc.).\r\n #time.sleep for no operator prompt, raw_input for prompt.\r\n #The raw_input method will display the string then the operator will have to press ENTER to proceed.\r\n #time.sleep(30)\r\n #raw_input(\"<Prompt to present to operator>, press ENTER to continue.\");\r\n #**************************************************************************************************************************\r\n\r\n \r\n\r\n #**************************************************************************************************************************\r\n #(Optional) Next set of commands to be executed.\r\n #If more sets of commands are needed copies of this section and the \"Execute\" section below may be inserted after the \"Execute\" section below.\r\n #If data needs to be unspoofed prior to next spoof use command below.\r\n #runner.AddCommand('spoof_main \"-unspoof -object <Spoofed Object Here> -data <Spoofed Data Here>\"\\n','->',False)\r\n #Example command: runner.AddCommand('spoof_main \"-enable -object CHwOrders -data acRPM -value 0.33 -enable\"\\n','->',False)\r\n #**************************************************************************************************************************\r\n\r\n\r\n\r\n #**************************************************************************************************************************\r\n #(Optional) Execute next set of cached commands.\r\n #Optional pause here with additional details if necessary (i.e. instructions, timing, etc.).\r\n #The raw_input method will display the string then the operator will have to press ENTER to proceed.\r\n #raw_input(\"<Prompt to operator>\");\r\n #runner.Run()\r\n #runner.ResetCommands()\r\n #**************************************************************************************************************************\r\n\r\n\r\n\r\n #**************************************************************************************************************************\r\n #If desired, add a -disable -end or -unspoof command to disable the spoofer or simply unspoof spoofed data. \r\n #runner.AddCommand('spoof_main \"-unspoof -object <Spoofed Object Here> -data <Spoofed Data Here>\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-disable\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-end\"\\n','->',False)\r\n #**************************************************************************************************************************\r\n runner.AddCommand(\"Log_Test_Info(\\\"Exiting spoof script \" + scriptName + \"\\\")\\n\", \"->\", False)\r\n runner.Run()\r\n runner.ResetCommands()\r\n\r\n #**************************************************************************************************************************\r\n #Optional prompt to notify operator that script and all spoofs have been execute successfully.\r\n #raw_input(\"Script execution complete, press ENTER to close this window.\");\r\n #**************************************************************************************************************************\r", "def runner() -> CliRunner:\n return click.testing.CliRunner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n\n return app.test_cli_runner()", "def test_putStrin_with_Stringlit(self):\n input = \"\"\"\n \n void main () {\n putStringLn(\"Tam nay thi ket thuc testcase 100 cho roi\");\n }\n \"\"\"\n expect = \"Tam nay thi ket thuc testcase 100 cho roi\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,599))\n ##### dat ten trung nen chi chay ra 98 testcase", "def create_test_function(source, output, lang):\n with open(source) as f:\n snippet = f.read()\n with open(output) as f:\n res = f.read()\n\n def tst_func(slf):\n slf.do(snippet, res, lang=lang)\n\n return tst_func", "def cli_runner():\n runner = CliRunner()\n\n def cli_main(*cli_args):\n \"\"\"Run cookiecutter cli main with the given args.\"\"\"\n return runner.invoke(main, cli_args)\n\n return cli_main", "def get_test_text(project_name):\r\n \r\n return \"\"\"\r\nfrom nose.tools import *\r\nimport %s\r\n\r\ndef setup():\r\n print \"SETUP!\"\r\n\r\ndef teardown():\r\n print \"TEAR DOWN!\"\r\n\r\ndef test_basic():\r\n print \"I RAN!\"\r\n \"\"\" % project_name", "def build_runner(config):\n if not isinstance(config, dict) or 'runner_type' not in config:\n raise ValueError('`runner_type` is missing from configuration!')\n\n runner_type = config['runner_type']\n if runner_type not in _RUNNERS:\n raise ValueError(f'Invalid runner type: `{runner_type}`!\\n'\n f'Types allowed: {list(_RUNNERS)}.')\n return _RUNNERS[runner_type](config)", "def test_cli_string():\n cmd = get_cli_string()\n assert \"pytest\" in cmd", "def test_string():", "def construct_case(filename, name):\n\n def make_test(test_name, definition, i):\n def m(self):\n if name in SKIP_TESTS.get(self.es_version, ()) or name in SKIP_TESTS.get(\n \"*\", ()\n ):\n raise SkipTest()\n self.run_code(definition)\n\n m.__doc__ = \"%s:%s.test_from_yaml_%d (%s): %s\" % (\n __name__,\n name,\n i,\n \"/\".join(filename.split(\"/\")[-2:]),\n test_name,\n )\n m.__name__ = \"test_from_yaml_%d\" % i\n return m\n\n with open(filename) as f:\n tests = list(yaml.load_all(f))\n\n attrs = {\"_yaml_file\": filename}\n i = 0\n for test in tests:\n for test_name, definition in test.items():\n if test_name in (\"setup\", \"teardown\"):\n attrs[\"_%s_code\" % test_name] = definition\n continue\n\n attrs[\"test_from_yaml_%d\" % i] = make_test(test_name, definition, i)\n i += 1\n\n return type(name, (YamlTestCase,), attrs)", "def make_shell_test(name):\n test = Test(name)\n test.add_step(\"run\", step_run, checks=[\n check_retcode_zero,\n create_check_reference_output(name+\".ref\"),\n ], allow_retries=3)\n return test", "def test_script(self) -> None:\n main()", "def test_string():\n pass", "def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))", "def simulate_block():\n return '''\n```sh\n# In build directory\n./simulate\n```'''", "def cli_runner(request: SubRequest) -> CliRunner:\n init_kwargs = {}\n marker = request.node.get_closest_marker(\"runner_setup\")\n if marker:\n init_kwargs = marker.kwargs\n return CliRunner(**init_kwargs)", "def test_script(self):\n self.script(\"# script.py\\n\"\n \"a = 2\\n\"\n \"# other\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n self.assertEqual(script.type, \"script\")\n self.assertEqual(script.mode, \"w\")\n self.assertEqual(script.first_char_line, 1)\n self.assertEqual(script.first_char_column, 0)\n self.assertEqual(script.last_char_line, 3)\n self.assertEqual(script.last_char_column, 7)\n self.assertEqual(script.container_id, -1)\n\n script_block = self.metascript.code_blocks_store[script.id]\n self.assertEqual(script_block.code, \"# script.py\\na = 2\\n# other\")\n self.assertEqual(script_block.docstring, \"\")\n self.assertTrue(bool(script_block.code_hash))" ]
[ "0.6015849", "0.5901293", "0.5896176", "0.5896176", "0.57842547", "0.5726609", "0.5675143", "0.56670874", "0.56511617", "0.5644335", "0.564341", "0.5618902", "0.5618902", "0.5618902", "0.5581154", "0.55600226", "0.55423313", "0.5522817", "0.5454394", "0.5397374", "0.53546125", "0.5337112", "0.53328186", "0.53154606", "0.53078467", "0.5287171", "0.52773964", "0.52725023", "0.52568376", "0.5238527" ]
0.6885351
0
Convert camera state to internal camera state.
def to_camera_state(internal_camera_state): def to_temperature(internal_temperature): return CameraState.Temperature( dmd=internal_temperature.dmd.value, general=internal_temperature.general.value, led=internal_temperature.led.value, lens=internal_temperature.lens.value, pcb=internal_temperature.pcb.value, ) return CameraState( available=internal_camera_state.available.value, connected=internal_camera_state.connected.value, live=internal_camera_state.live.value, temperature=to_temperature(internal_camera_state.temperature), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera_to_object_transform(self):\n # form the full object to camera transform\n T_stp_camera = self.stp_to_camera_transform()\n T_obj_stp = self.object_to_stp_transform()\n T_obj_camera = T_stp_camera.dot(T_obj_stp)\n return T_obj_camera", "def _state_convert(self, raw_state):\n variables_dict = dict()\n variables_dict[\"s_t\"] = np.hstack((0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.0, 0.0, 1.0))\n variables_dict[\"v_t\"] = np.hstack((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n variables_dict[\"add_s_t\"] = np.hstack((0.8, 0.8))\n variables_dict[\"add_v_t\"] = np.hstack((0.0, 0.0))\n variables_dict[\"flag_t\"] = 0.0\n variables_dict[\"add_dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"ego_lane\"] = raw_state[16]\n variables_dict[\"lane_ids\"] = raw_state[18]\n variables_dict[\"ego_lane\"] = variables_dict[\"lane_ids\"].index(variables_dict[\"ego_lane\"])\n if variables_dict[\"ego_lane\"] == 0 or variables_dict[\"ego_lane\"] == 2:\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"]] = 1.0\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"] + 3] = 1.0\n variables_dict[\"flag_t\"] = 1 if variables_dict[\"ego_lane\"] == 0 else -1\n\n variables_dict[\"ego_raw_speed\"] = raw_state[3]\n variables_dict[\"filter_speed\"] = (variables_dict[\"ego_raw_speed\"]\n if variables_dict[\"ego_raw_speed\"] >= 10.0 else 10.0)\n variables_dict[\"s_t\"][6] = variables_dict[\"ego_raw_speed\"] / SPEED_RANGE\n objects = raw_state[-1]\n # print(\"ego_speed\",ego_raw_speed,\"ego_lane\",ego_lane)\n if objects[0] is not None:\n # for i in range(len(objects)):\n for i, _object in enumerate(objects):\n lane_id = objects[i][0]\n dist = abs(objects[i][1]) * np.sign(objects[i][1])\n speed = objects[i][2]\n pre_post = np.sign(dist)\n flag = 0 if pre_post == 1.0 else 1\n\n if abs(dist) < VIEW_RANGE:\n for j in range(3):\n adjacent_lane = variables_dict[\"ego_lane\"] - 1 + j\n dist_index = j + flag * 3\n if (lane_id == adjacent_lane and abs(dist) < variables_dict[\"dist_min\"][dist_index]):\n self.min_dist(\n variables_dict[\"v_t\"],\n variables_dict[\"s_t\"],\n dist_index,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n variables_dict[\"dist_min\"][dist_index] = abs(dist)\n\n if abs(dist) < variables_dict[\"add_dist_min\"][flag]:\n if (variables_dict[\"ego_lane\"] == 0 and lane_id == variables_dict[\"ego_lane\"] + 2\n or variables_dict[\"ego_lane\"] == len(variables_dict[\"lane_ids\"]) - 1\n and lane_id == variables_dict[\"ego_lane\"] - 2):\n self.min_dist(\n variables_dict[\"add_v_t\"],\n variables_dict[\"add_s_t\"],\n flag,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n\n state = np.hstack((\n variables_dict[\"s_t\"],\n variables_dict[\"v_t\"],\n variables_dict[\"add_s_t\"],\n variables_dict[\"add_v_t\"],\n variables_dict[\"flag_t\"],\n ))\n return state", "def camera_image_to_state(self, rgb, depth, mask, mask_background, im_ef_pose, step, agg=True, vis=False):\n if vis: \n fig = plt.figure(figsize=(14.4, 4.8))\n ax = fig.add_subplot(1, 3, 1)\n plt.imshow(rgb[:, :, (2, 1, 0)])\n ax = fig.add_subplot(1, 3, 2)\n plt.imshow(depth[...,0])\n ax = fig.add_subplot(1, 3, 3)\n plt.imshow(mask)\n plt.show()\n\n mask_target = np.zeros_like(mask)\n mask_target[mask == self.target_obj_id] = 1\n mask_state = 1 - mask_target[...,None]\n image_state = np.concatenate([rgb, depth, mask_state], axis=-1)\n image_state = image_state.T\n \n # depth to camera, all the points on foreground objects\n # backproject depth\n depth_cuda = torch.from_numpy(depth).cuda()\n fx = INTRINSICS[0, 0]\n fy = INTRINSICS[1, 1]\n px = INTRINSICS[0, 2]\n py = INTRINSICS[1, 2]\n im_pcloud = posecnn_cuda.backproject_forward(fx, fy, px, py, depth_cuda)[0].cpu().numpy()\n\n # select points\n valid = (depth[...,0] != 0) * (mask > 0)\n point_xyz = im_pcloud[valid, :].reshape(-1, 3)\n label = mask[valid][...,None]\n point_state = np.concatenate((point_xyz, label), axis=1).T\n # point_state = backproject_camera_target_realworld_clutter(depth, INTRINSICS, mask)\n print('%d foreground points' % point_state.shape[1])\n \n # filter depth\n index = point_state[2, :] < self.depth_threshold\n point_state = point_state[:, index]\n\n # camera to hand\n point_state = se3_transform_pc(EXTRINSICS, point_state)\n\n # background points\n valid = (depth[...,0] != 0) * (mask_background > 0)\n point_background = im_pcloud[valid, :].reshape(-1, 3)\n index = point_background[:, 2] < self.depth_threshold\n point_background = point_background[index, :]\n if point_background.shape[0] > 0:\n point_background = regularize_pc_point_count(point_background, 1024, use_farthest_point=False)\n point_background = se3_transform_pc(EXTRINSICS, point_background.T)\n\n # accumate points in base, and transform to hand again\n point_state = self.process_pointcloud(point_state, im_ef_pose, step, agg)\n obs = (point_state, image_state)\n return obs, point_background", "def get_camera_state(self, parameter):\n return self.opt.getParameter(parameter)", "def sim_to_state(self):\n # As mentioned earlier, we only need z-axis variables.\n # This will keep our state nice and small.\n return [\n self.sim.pose[Z_AXIS],\n self.sim.v[Z_AXIS],\n self.sim.linear_accel[Z_AXIS]\n ]", "def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)", "def camera(self):\n self.spectrum = self.spectrum", "def state2img(self):\n reward = 0.0\n done = False\n\n cur_stage, cur_arm, cur_step = self.state\n\n # Initial state\n if cur_stage == 0:\n img = Image.new('RGB', (32, 32), color='purple') # purple init\n # First stage\n elif cur_stage == 1:\n # Picture viewing states\n if cur_step == 0:\n rng = np.random.default_rng()\n rand_idx = rng.integers(len(self.ds_dict['choice_1']))\n if cur_arm == 1:\n img = self.ds_dict['choice_1'][rand_idx][0]\n else:\n img = self.ds_dict['choice_2'][rand_idx][0]\n # Pre-action waiting / distraction states\n else:\n img = Image.new('RGB', (32, 32), color='black')\n # TODO: change this to distracting birds?\n # Second stage\n else:\n # At decision point state\n if cur_step == 0:\n img = Image.new('RGB', (32, 32), color='blue')\n # Delay reward hallway states\n elif cur_step <= self.reward_delay_len:\n rng = np.random.default_rng()\n rand_idx = rng.integers(len(self.ds_dict['corridor']))\n img = self.ds_dict['corridor'][rand_idx][0]\n # Pre-terminal state\n elif cur_step == (self.reward_delay_len + 1):\n img = Image.new('RGB', (32, 32), color='green') # green final\n if (cur_arm == 2) and (not self.final_obs_aliased):\n img = Image.new('RGB', (32, 32), color='red') # red final\n # Terminal state\n else:\n img = self.prev_img\n done = True\n noise = np.random.default_rng().normal(0, self.reward_stdev)\n if cur_arm == 1:\n reward = 1.0 + noise\n else:\n reward = -1.0 + noise\n\n # Make things fully observable by having a diff image for each state\n if self.fully_observable and (not((cur_stage == 1)\n and (cur_step == 0))):\n # NOTE: hacky, TODO in the future should use hashing\n state_tup_cp = (self.state[0], self.state[1], self.state[2])\n fulobs_idx = self.state_tuple_2_idx(state_tup_cp)\n img = self.ds_dict['corridor'][fulobs_idx][0]\n\n # Construct info (for algo evaluation only)\n info = {\n 'stage_num': self.state[0],\n 'arm_num': self.state[1],\n 'step_num': self.state[2],\n }\n\n return img, reward, done, info", "def get_state(self) -> np.array:\n return self.rstate.render_frame(self.rsimulator, self.grayscale)", "def convert_to(self, dst, rt_mat=None):\n from .box_3d_mode import Box3DMode\n return Box3DMode.convert(\n box=self, src=Box3DMode.CAM, dst=dst, rt_mat=rt_mat)", "def state_tensor_convert(self,state):\n return torch.Tensor(state)", "def update_state(self, boxes: Dict[Sensor, List[BoundBox]]):\n new_state = self._light_state(boxes[Sensor.LargeFOVCameraRGB])\n if new_state[0] == TrafficLightState.NO_TL:\n new_state = self._light_state(boxes[Sensor.MediumFOVCameraRGB])\n if new_state[0] == TrafficLightState.NO_TL:\n new_state = self._light_state(boxes[Sensor.NarrowFOVCameraRGB])\n\n if self._state is None:\n self._state = new_state\n\n if new_state[0] == self.get_state()[0] or new_state[0] != self._new_state[0]:\n self._state_counter = 0\n elif new_state[0] == self._new_state[0]:\n self._state_counter += 1\n\n if self._state_counter >= self.MIN_STATE_FRAMES:\n self._state = new_state\n\n self._new_state = new_state\n return self.get_state()", "def update(self, target):\n self.state = self.camera_func(self.state, target.rect)", "def get_state(self):\n return dict(\n rotation = glm_dumps(glm.quat(self.rotation)), \n position = glm_dumps(glm.vec3(self.position)),\n anchor = glm_dumps(glm.vec3(self.anchor)), \n scale0 = glm_dumps(glm.vec3(self.scale0)), \n )", "def transform(self, state):\n robot_state_tensor = torch.Tensor([state.robot_state.to_tuple()]).to(self.device)\n human_states_tensor = torch.Tensor([human_state.to_tuple() for human_state in state.human_states]). \\\n to(self.device)\n\n return robot_state_tensor, human_states_tensor", "def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()", "def normalize_state(self):\n self.state = 2 * (self.state - 0.5)", "def fromState(state):", "def copy_state_to_network(self):\n state = self.rigid_body_state\n\n state.position = self.transform.world_position.copy()\n state.orientation = self.transform.world_orientation.copy()\n state.angular = self.physics.world_angular.copy()\n state.velocity = self.physics.world_velocity.copy()\n # state.collision_group = self.physics.collision_group\n # state.collision_mask = self.physics.collision_mask\n self.rigid_body_time = WorldInfo.elapsed", "def stp_to_camera_transform(self):\n # setup variables\n camera_xyz_w = self.cam_pos\n camera_rot_w = self.cam_rot\n camera_int_pt_w = self.cam_interest_pt\n camera_xyz_obj_p = camera_xyz_w - camera_int_pt_w\n \n # get the distance from the camera to the world\n camera_dist_xy = np.linalg.norm(camera_xyz_w[:2])\n z = [0,0,np.linalg.norm(camera_xyz_w[:3])]\n\n # form the rotations about the x and z axis for the object on the tabletop\n theta = camera_rot_w[0] * np.pi / 180.0\n phi = -camera_rot_w[2] * np.pi / 180.0 + np.pi / 2.0\n camera_rot_obj_p_z = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n\n camera_rot_obj_p_x = np.array([[1, 0, 0],\n [0, np.cos(theta), -np.sin(theta)],\n [0, np.sin(theta), np.cos(theta)]])\n \n # form the full rotation matrix, swapping axes to match maya\n camera_md = np.array([[0, 1, 0],\n [1, 0, 0],\n [0, 0, -1]])\n camera_rot_obj_p = camera_md.dot(camera_rot_obj_p_z.dot(camera_rot_obj_p_x))\n camera_rot_obj_p = camera_rot_obj_p.T\n \n # form the full object to camera transform\n R_stp_camera = camera_rot_obj_p\n t_stp_camera = np.array(z)\n return RigidTransform(rotation=R_stp_camera,\n translation=t_stp_camera,\n from_frame='stp', to_frame='camera')", "def node_to_state(self, node):\n xd = node[0]\n yd = node[1]\n state = self.origin + np.array([self.resolution[0]*(cos(self.rotation)*xd - sin(self.rotation)*yd),\n self.resolution[1]*(sin(self.rotation)*xd + cos(self.rotation)*yd)])\n return state", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def reverse_state(self):\n state = np.array(self.get_reverse_state_arr())\n om = utils.build_occupancy_maps(utils.build_humans(state))\n # print(\"OM: \", om.size())\n # We only have a batch of one so just get the first element of\n # transform and rotate\n state = utils.transform_and_rotate(state.reshape((1, -1)))[0]\n # print(\"State: \", state.size())\n return torch.cat((state, om), dim=1).unsqueeze(0)", "def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)", "def get_state(self):\n return self.get_pose()", "def process_state(state):\n grid = state.grid\n pos = state.pos\n reshaped_grid = np.reshape(grid,(1, grid_size*grid_size)) # Only use squared for square matrices\n reshaped_grid = reshaped_grid[0]\n processed_state = np.concatenate((pos, reshaped_grid))\n processed_state = np.array([processed_state])\n # processed_state.reshape(1, 1, grid_size*grid_size+2, 1)\n #print(processed_state.shape)\n\n return processed_state", "def set_state(self,params):\n self.update_emccd_bias(params['emccd_bias'])\n self.update_num_images(params['num_images'])\n self.make_rois_from_lists(params['roi_coords'],params['thresholds'])\n try: # add things here that don't exist in old state files (different try/except for each)\n self.copy_im_threshs = params['copy_im_threshs']\n except KeyError:\n self.copy_im_threshs = [None for _ in range(self.num_images)]", "def _state_from_obs(self, obs: np.ndarray) -> np.ndarray:\n obs_ = obs.copy()\n if self.state_mask is not None:\n obs_ = obs_[self.state_mask]\n return obs_", "def camera_cb(self, msg):\n #rospy.loginfo(\"Received new image\")\n\n try:\n image = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n except CvBridgeError as e:\n rospy.logerr(e)\n return\n\n self.image = cv2.flip(image, -1)", "def transform_state(state):\n # TODO: automate n_enemies calculation -> only valid fot n_enemies = n_friends\n n_agents = len(state.agents)\n n_enemies = n_agents // 2 # TODO: improve this\n states_v = torch.zeros(n_agents, 5 + n_enemies) # 5 = x, y, alive, ammo, aim, enemy visible ? (x n_enemies)\n for agent_idx, agent in enumerate(state.agents):\n states_v[agent_idx, 0] = state.position[agent][0] # x\n states_v[agent_idx, 1] = state.position[agent][1] # y\n states_v[agent_idx, 2] = state.alive[agent]\n states_v[agent_idx, 3] = state.ammo[agent] / 5 # args.ammo\n states_v[agent_idx, 4] = -1 if state.aim[agent] is None else state.aim[agent].id\n idx = 5\n for other in state.agents:\n if (agent, other) in state.visible:\n states_v[agent_idx, idx] = int(state.visible[(agent, other)])\n idx += 1\n return states_v" ]
[ "0.5980134", "0.58976054", "0.58470523", "0.5826162", "0.5741425", "0.5722831", "0.57148314", "0.56743157", "0.56138504", "0.5612726", "0.5576117", "0.5559552", "0.5549885", "0.5537195", "0.5533225", "0.5515739", "0.5493909", "0.5471352", "0.5414366", "0.5413517", "0.53990895", "0.538847", "0.53719264", "0.5359854", "0.53576964", "0.53574425", "0.53469497", "0.53433704", "0.5337379", "0.5324848" ]
0.76187414
0
Function to apply fixed constant pressure boundary condition, it returns a transmissibility and a source/sink matrix modified.
def fixed_constant_pressure(self, general_transmissibility): #print('Setting boundary conditions of local problem {}'.format(self.coarse_volume)) correct_volumes_group_1 = np.array([0,1,2,3]) correct_volumes_group_2 = np.array([12,13,14,15]) transmissibility = copy.deepcopy(general_transmissibility) volumes_group_1 = correct_volumes_group_1 volumes_group_2 = correct_volumes_group_2 transmissibility[volumes_group_1] = 0 transmissibility[volumes_group_2] = 0 transmissibility[volumes_group_1, volumes_group_1] = 1 transmissibility[volumes_group_2, volumes_group_2] = 1 source = lil_matrix((int(self.number_volumes_local_problem), 1), dtype = 'float') source[volumes_group_1] = 1 source[volumes_group_2] = 0 #print('Fixed constant pressure boundary condition applied') return transmissibility, source, correct_volumes_group_1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBCProjWF_simple(self, discretization='CC'):\n\n if discretization is not 'CC':\n raise NotImplementedError('Boundary conditions only implemented'\n 'for CC discretization.')\n\n def projBC(n):\n ij = ([0, n], [0, 1])\n vals = [0, 0]\n vals[0] = 1\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(n+1, 2))\n\n def projDirichlet(n, bc):\n bc = checkBC(bc)\n ij = ([0, n], [0, 1])\n vals = [0, 0]\n if(bc[0] == 'dirichlet'):\n vals[0] = -1\n if(bc[1] == 'dirichlet'):\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(n+1, 2))\n\n BC = [['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'],\n ['dirichlet', 'dirichlet']]\n n = self.vnC\n indF = self.faceBoundaryInd\n\n if(self.dim == 1):\n Pbc = projDirichlet(n[0], BC[0])\n B = projBC(n[0])\n indF = indF[0] | indF[1]\n Pbc = Pbc*sdiag(self.area[indF])\n\n elif(self.dim == 2):\n Pbc1 = sp.kron(speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = sp.kron(projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2), format=\"csr\")\n B1 = sp.kron(speye(n[1]), projBC(n[0]))\n B2 = sp.kron(projBC(n[1]), speye(n[0]))\n B = sp.block_diag((B1, B2), format=\"csr\")\n indF = np.r_[(indF[0] | indF[1]), (indF[2] | indF[3])]\n Pbc = Pbc*sdiag(self.area[indF])\n\n elif(self.dim == 3):\n Pbc1 = kron3(speye(n[2]), speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = kron3(speye(n[2]), projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc3 = kron3(projDirichlet(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2, Pbc3), format=\"csr\")\n B1 = kron3(speye(n[2]), speye(n[1]), projBC(n[0]))\n B2 = kron3(speye(n[2]), projBC(n[1]), speye(n[0]))\n B3 = kron3(projBC(n[2]), speye(n[1]), speye(n[0]))\n B = sp.block_diag((B1, B2, B3), format=\"csr\")\n indF = np.r_[\n (indF[0] | indF[1]),\n (indF[2] | indF[3]),\n (indF[4] | indF[5])\n ]\n Pbc = Pbc*sdiag(self.area[indF])\n\n return Pbc, B.T", "def getBCProjWF(self, BC, discretization='CC'):\n\n if discretization is not 'CC':\n raise NotImplementedError('Boundary conditions only implemented'\n 'for CC discretization.')\n\n if isinstance(BC, string_types):\n BC = [BC for _ in self.vnC] # Repeat the str self.dim times\n elif isinstance(BC, list):\n assert len(BC) == self.dim, 'BC list must be the size of your mesh'\n else:\n raise Exception(\"BC must be a str or a list.\")\n\n for i, bc_i in enumerate(BC):\n BC[i] = checkBC(bc_i)\n\n def projDirichlet(n, bc):\n bc = checkBC(bc)\n ij = ([0, n], [0, 1])\n vals = [0, 0]\n if(bc[0] == 'dirichlet'):\n vals[0] = -1\n if(bc[1] == 'dirichlet'):\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(n+1, 2))\n\n def projNeumannIn(n, bc):\n bc = checkBC(bc)\n P = sp.identity(n+1).tocsr()\n if(bc[0] == 'neumann'):\n P = P[1:, :]\n if(bc[1] == 'neumann'):\n P = P[:-1, :]\n return P\n\n def projNeumannOut(n, bc):\n bc = checkBC(bc)\n ij = ([0, 1], [0, n])\n vals = [0, 0]\n if(bc[0] == 'neumann'):\n vals[0] = 1\n if(bc[1] == 'neumann'):\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(2, n+1))\n\n n = self.vnC\n indF = self.faceBoundaryInd\n if(self.dim == 1):\n Pbc = projDirichlet(n[0], BC[0])\n indF = indF[0] | indF[1]\n Pbc = Pbc*sdiag(self.area[indF])\n\n Pin = projNeumannIn(n[0], BC[0])\n\n Pout = projNeumannOut(n[0], BC[0])\n\n elif(self.dim == 2):\n Pbc1 = sp.kron(speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = sp.kron(projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2), format=\"csr\")\n indF = np.r_[(indF[0] | indF[1]), (indF[2] | indF[3])]\n Pbc = Pbc*sdiag(self.area[indF])\n\n P1 = sp.kron(speye(n[1]), projNeumannIn(n[0], BC[0]))\n P2 = sp.kron(projNeumannIn(n[1], BC[1]), speye(n[0]))\n Pin = sp.block_diag((P1, P2), format=\"csr\")\n\n P1 = sp.kron(speye(n[1]), projNeumannOut(n[0], BC[0]))\n P2 = sp.kron(projNeumannOut(n[1], BC[1]), speye(n[0]))\n Pout = sp.block_diag((P1, P2), format=\"csr\")\n\n elif(self.dim == 3):\n Pbc1 = kron3(speye(n[2]), speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = kron3(speye(n[2]), projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc3 = kron3(projDirichlet(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2, Pbc3), format=\"csr\")\n indF = np.r_[\n (indF[0] | indF[1]),\n (indF[2] | indF[3]),\n (indF[4] | indF[5])\n ]\n Pbc = Pbc*sdiag(self.area[indF])\n\n P1 = kron3(speye(n[2]), speye(n[1]), projNeumannIn(n[0], BC[0]))\n P2 = kron3(speye(n[2]), projNeumannIn(n[1], BC[1]), speye(n[0]))\n P3 = kron3(projNeumannIn(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pin = sp.block_diag((P1, P2, P3), format=\"csr\")\n\n P1 = kron3(speye(n[2]), speye(n[1]), projNeumannOut(n[0], BC[0]))\n P2 = kron3(speye(n[2]), projNeumannOut(n[1], BC[1]), speye(n[0]))\n P3 = kron3(projNeumannOut(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pout = sp.block_diag((P1, P2, P3), format=\"csr\")\n\n return Pbc, Pin, Pout", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def basiscond(self):\n nrmbasis_ = ctypes.c_double()\n nrminvbasis_ = ctypes.c_double()\n res = __library__.MSK_XX_basiscond(self.__nativep,ctypes.byref(nrmbasis_),ctypes.byref(nrminvbasis_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nrmbasis_ = nrmbasis_.value\n _nrmbasis_return_value = nrmbasis_\n nrminvbasis_ = nrminvbasis_.value\n _nrminvbasis_return_value = nrminvbasis_\n return (_nrmbasis_return_value,_nrminvbasis_return_value)", "def weak_repulsion_boundary(Cents,a,k, CV_matrix,n_c,n_C):\n CCW = np.dstack((roll_reverse(Cents[:,:,0]),roll_reverse(Cents[:,:,1])))#np.column_stack((Cents[:,1:3],Cents[:,0].reshape(-1,1,2)))\n CCW_displacement = Cents - CCW\n rij = np.sqrt(CCW_displacement[:,:,0]**2 + CCW_displacement[:,:,1]**2)\n norm_disp = (CCW_displacement.T/rij.T).T\n V_soft_mag = -k*(rij - 2*a)*(rij<2*a)\n V_soft_CCW = (V_soft_mag.T*norm_disp.T).T\n V_soft_CW = -(roll_forward(V_soft_mag).T*norm_disp.T).T\n V_soft = V_soft_CW + V_soft_CCW\n F_soft = np.zeros((n_c, 2))\n for i in range(3):\n F_soft += np.asfortranarray(CV_matrix[:, :, i])@np.asfortranarray(V_soft[:, i])\n F_soft[n_C:] = 0\n return F_soft", "def basiscond(self): # 3\n res,resargs = self.__obj.basiscond()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nrmbasis_return_value,_nrminvbasis_return_value = resargs\n return _nrmbasis_return_value,_nrminvbasis_return_value", "def _apply_boundary_conditions(self,t,dt):\n # update coloured noise generator\n self.noise_gen.update(dt)\n # extract four corner values for each of u and v fields as component\n # mean plus current noise generator output\n corner_mean_index = int(scipy.floor(t/self.wind_update_period))\n added_noise = self.noise_gen.output\n (u_tl, u_tr, u_bl, u_br, v_tl, v_tr, v_bl, v_br) = \\\n added_noise + self._corner_means[corner_mean_index,:]\n # linearly interpolate along edges\n self._u[:, 0] = u_tl + self._rx * (u_tr - u_tl) # u top edge\n self._u[:, -1] = u_bl + self._rx * (u_br - u_bl) # u bottom edge\n self._u[0, :] = u_tl + self._ry * (u_bl - u_tl) # u left edge\n self._u[-1, :] = u_tr + self._ry * (u_br - u_tr) # u right edge\n self._v[:, 0] = v_tl + self._rx * (v_tr - v_tl) # v top edge\n self._v[:, -1] = v_bl + self._rx * (v_br - v_bl) # v bottom edge\n self._v[0, :] = v_tl + self._ry * (v_bl - v_tl) # v left edge\n self._v[-1, :] = v_tr + self._ry * (v_br - v_tr) # v right edge", "def apply_pressure_boundary_from_function(self, \n boundary_marker, \n p_function):\n self.mfd.apply_dirichlet_from_function(boundary_marker, \n p_function)", "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def compute_dynamical_zp_broadening(self):\n self.distribute_workload()\n self.zero_point_broadening = (\n self.sum_qpt_function('get_zpb_dynamical'))\n self.broadening_is_dynamical = True", "def piecewise_constant(x, boundaries, values, name=None):\n \n with ops.op_scope([x, boundaries, values, name],\n name, 'PiecewiseConstant') as name:\n x = ops.convert_to_tensor(x)\n # Avoid explicit conversion to x's dtype. This could result in faulty\n # comparisons, for example if floats are converted to integers.\n boundaries = ops.convert_n_to_tensor(boundaries)\n if not all(b.dtype == x.dtype for b in boundaries):\n raise ValueError('boundaries must have the same dtype as x.')\n # TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.\n values = ops.convert_n_to_tensor(values)\n if not all(v.dtype == values[0].dtype for v in values):\n raise ValueError('values must have elements all with the same dtype.')\n \n pred_fn_pairs = {}\n pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]\n pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]\n for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):\n # Need to bind v here; can do this with lambda v=v: ...\n pred = (x > low) & (x <= high)\n pred_fn_pairs[pred] = lambda v=v: v\n \n # The default isn't needed here because our conditions are mutually\n # exclusive and exhaustive, but tf.case requires it.\n default = lambda: values[0]\n return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n self.x_save = np.ones((n_t,int(self.n_c*self.b_extra),2))*np.nan\n self.tri_save = -np.ones((n_t,int(self.tris.shape[0]*self.b_extra),3),dtype=np.int32)\n self.generate_noise_boundary()\n if do_F_bound is True:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x,recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i,:self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours,self.vs)\n self.get_P(self.neighbours,self.vs)\n F = self.get_F(self.neighbours,self.vs)\n # F_bend = get_F_bend(self.n_c, self.CV_matrix, self.n_C, x, self.zeta)\n F_soft = weak_repulsion_boundary(self.Cents,self.a,self.k, self.CV_matrix,self.n_c,self.n_C)\n F_bound = boundary_tension(self.Gamma_bound,self.n_C,self.n_c,self.Cents,self.CV_matrix)\n x += self.dt*(F + F_soft + self.v0*self.noise[i,:x.shape[0]] + F_bound)\n # + F_bend + F_bound\n\n self.x = x\n self.x_save[i,:x.shape[0]] = x\n else:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x, recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i, :self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours, self.vs)\n self.get_P(self.neighbours, self.vs)\n F = self.get_F(self.neighbours, self.vs)\n F_soft = weak_repulsion_boundary(self.Cents, self.a, self.k, self.CV_matrix, self.n_c, self.n_C)\n x += self.dt * (F + F_soft + self.v0 * self.noise[i, :x.shape[0]])\n\n self.x = x\n self.x_save[i, :x.shape[0]] = x\n print(\"Simulation complete\")\n return self.x_save", "def impose_boundary_conditions(self) -> _ImposeBoundaryConditionsResults:\n\n stiffness = self.get_stiffness_matrix()\n force_vector = self.get_force_vector()\n\n restrained_dofs = self.get_supported_dofs()\n\n for axis in range(2):\n stiffness = np.delete(\n stiffness,\n [dof for dof in restrained_dofs],\n axis=axis,\n )\n\n force_vector = np.delete(\n force_vector,\n [dof for dof in restrained_dofs],\n axis=0,\n )\n\n return _ImposeBoundaryConditionsResults(\n stiffness=stiffness,\n force=force_vector,\n )", "def PGD(Params, relaxationVars, fixedBs, fixedTs, data):\n Tol = Params[\"tol\"]\n TolCD = Params[\"tolCD\"]\n Lambda0 = Params[\"Lambda\"]\n Lambda1 = Params[\"alpha\"] * Lambda0\n M = Params[\"M\"]\n y = data.ycentered # data.y - data.ybar\n\n Bindices = relaxationVars.BActive.copy() # list\n Tindices = relaxationVars.TActive.copy() # list of tuples (i,j)\n currentB, currentT = relaxationVars.initialSol.ToArray(Bindices, Tindices)\n fixedB = fixedBs.copy() # Dict. key = index, value = 0 or 1 (no index if not fixed)\n fixedT = fixedTs.copy() # Dict. key = (i,j), value = 0 or 1 (no index if not fixed)\n DualInitial = relaxationVars.useDual\n\n # Store the index mappings\n Bmap = {} # Bmap[i] = index of i in currentB or XB\n for i in range(len(Bindices)):\n Bmap[Bindices[i]] = i\n\n Tmap = {} # Tmap[(i,j)] = index of interaction in XT and currentT\n for i in range(len(Tindices)):\n c1, c2 = Tindices[i]\n Tmap[(c1, c2)] = i\n Tmap[(c2, c1)] = i\n\n # Next: Some sanity checks (those can be removed if we're carful about the\n # inputs)\n\n # Make sure if B_i is fixed to 0 then all T_{ij}'s (in Tindices) are also\n # fixed to zero\n for i, val in fixedB.items():\n if val == 0:\n for l, j in Tmap:\n if l < j and (l == i or j == i):\n fixedT[(l, j)] = 0\n\n # Make sure if T_{ij} is fixed to 1 then both B_i and B_j are fixed to 1\n for key, val in fixedT.items():\n if val == 1:\n i, j = key\n fixedB[i] = 1\n fixedB[j] = 1\n\n # Delete from Bindices and Tindices all the indices s.t. z_i = 0 / z_{ij}\n # = 0\n Bzeros = []\n for i, val in fixedB.items():\n if val == 0:\n Bzeros.append(Bmap[i])\n for i in sorted(Bzeros, reverse=True):\n del Bindices[i]\n currentB = np.delete(currentB, Bzeros)\n\n Tzeros = []\n for key, val in fixedT.items():\n if val == 0:\n Tzeros.append(Tmap[key])\n for i in sorted(Tzeros, reverse=True):\n del Tindices[i]\n currentT = np.delete(currentT, Tzeros)\n\n # Update the index mappings\n Bmap = {} # Bmap[i] = index of i in currentB or XB\n for i in range(len(Bindices)):\n Bmap[Bindices[i]] = i\n\n Tmap = {} # Tmap[(i,j)] = index of interaction in XT and currentT\n for i in range(len(Tindices)):\n c1, c2 = Tindices[i]\n Tmap[(c1, c2)] = i\n Tmap[(c2, c1)] = i\n\n # End of sanity checks\n\n # Retrive the matrices of the optimization variables\n # Later: We can store the centered columns (but this will require twice\n # the memory)\n XB, XT = data.Retrieve(Bindices, Tindices)\n XBMean = XB.mean(axis=0)\n XB = XB - XBMean\n XTMean = XT.mean(axis=0)\n XT = XT - XTMean\n\n Bfree = [i for i in Bindices if i not in fixedB]\n Tfree = [(i, j) for i, j in Tmap if i < j and (i, j) not in fixedT]\n TfreeIndices = [Tmap[(i, j)]\n for i, j in Tmap if i < j and (i, j) not in fixedT]\n lenFixedB = len(Bindices) - len(Bfree)\n lenFixedT = len([key for key in fixedT if fixedT[key] == 1])\n\n # (Dual) Block CD Variables\n u = defaultdict(float)\n w = defaultdict(dict)\n if not DualInitial:\n for i in Bindices:\n u[i] = 0\n for pair in Tmap:\n i, j = pair\n w[i][j] = 0\n else:\n for i in Bindices:\n if i in relaxationVars.u and i not in fixedB:\n u[i] = relaxationVars.u[i]\n else:\n u[i] = 0\n for i, j in Tmap:\n if j in relaxationVars.w[i] and (min(i, j), max(\n i, j)) not in fixedT and i not in fixedB and j not in fixedB:\n w[i][j] = relaxationVars.w[i][j]\n else:\n # Important: we need w[i][j] = 0 if T_{ij} if fixed (this is\n # due to the thresholding function)\n w[i][j] = 0\n\n sortedIndices = {i: sorted(w[i]) for i in w}\n sortedIndices = defaultdict(list, sortedIndices)\n\n # Prepare all the fixed matrices/vectors required for grad evaluation\n # later.\n XBty = np.dot(XB.T, y)\n XBtXB = np.dot(XB.T, XB)\n XTty = np.dot(XT.T, y)\n XTtXT = np.dot(XT.T, XT)\n XBtXT = np.dot(XB.T, XT)\n\n # Compute the lipschitz constant of the grad.\n Xfull = np.hstack((XB, XT))\n if Xfull.shape[1] != 0:\n eigvals, v = np.linalg.eig(np.dot(Xfull.T, Xfull))\n L = np.max(np.real(eigvals))\n else:\n L = 1 # any value here should suffice - it's not used.\n\n # Compute the lipschitz constants for BCD.\n LCD = {}\n for i in Bindices:\n LCD[i] = (len(w[i]) + 1) * ((Lambda0**2) / (L * M**2))\n\n # Define the thresholding constants\n frac = Lambda0 / (M * L)\n Mpfrac = M + frac\n frac1 = Lambda1 / (M * L)\n Mpfrac1 = M + frac1\n fracsqL = frac * frac * L\n LambdaovM = Lambda0 / M\n Lambda1ovM = Lambda1 / M\n Lambda1ovLambda0 = Lambda1 / Lambda0\n\n start = time.time()\n\n oldObj = math.inf\n for it in range(5000):\n grad_B = - XBty + np.dot(XBtXB, currentB) + np.dot(XBtXT, currentT)\n grad_T = - XTty + np.dot(XTtXT, currentT) + np.dot(XBtXT.T, currentB)\n Bstar = currentB - grad_B / L\n Tstar = currentT - grad_T / L\n # Iterate over the blocks, running dual BCD.\n # We employ dual warm starts by using the same (u,w) across the PGD updates.\n CDPrevObj = -math.inf\n LCDCurrent = copy(LCD)\n useZeroSuffCondition = True\n if useZeroSuffCondition:\n # Perform proximal screening below.\n zeroGroups = set()\n for i in Bfree:\n zeroSufficient = False\n cumsum = 0\n for j in w[i]:\n thrshld = max(\n (abs(Tstar[Tmap[(i, j)]]) / frac - Lambda1ovLambda0), 0)\n # Do feature level screening below.\n if thrshld == 0:\n # The initialization below ensures that \\theta_{ij} is\n # never updated by BCA.\n w[i][j] = 0\n w[j][i] = 0\n else:\n cumsum += thrshld\n\n if cumsum <= 1 - abs(Bstar[Bmap[i]]) / frac:\n zeroSufficient = True\n if zeroSufficient:\n u[i] = Bstar[Bmap[i]] / frac\n for j in w[i]:\n if abs(Tstar[Tmap[(i, j)]]) > frac1:\n w[i][j] = Tstar[Tmap[(\n i, j)]] / frac - Lambda1ovLambda0 * np.sign(Tstar[Tmap[(i, j)]])\n else:\n w[i][j] = 0\n w[j][i] = 0\n # Not nec. but can improve speed.\n LCDCurrent[j] -= (Lambda0**2) / (L * M**2)\n zeroGroups.add(i)\n\n BfreeMinusZeroGroups = [i for i in Bfree if i not in zeroGroups]\n CDObjConst = 0\n '''\n for i in zeroGroups:\n CDObjConst += q(u[i], Bstar[Bmap[i]], M, Lambda0, L,frac)\n for j in w[i]:\n if i < j:\n # T(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n CDObjConst += T(w[i][j], w[j][i], Tstar[Tmap[(i,j)]], M, Lambda0, L,frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n '''\n ####\n else:\n zeroGroups = set()\n CDObjConst = 0\n BfreeMinusZeroGroups = Bfree\n # To Turn the part above off, comment it out and set the following:\n # zeroGroups = set()\n # CDObjConst = 0\n # BfreeMinusZeroGroups = Bfree\n\n for innerit in range(10000):\n # for i in Bfree:\n for i in BfreeMinusZeroGroups:\n # First, Calculate utilde and wtilde for ith block\n utilde = u[i] + delq(u[i],\n Bstar[Bmap[i]],\n M,\n Lambda0,\n L,\n frac,\n Mpfrac,\n fracsqL,\n LambdaovM) / LCDCurrent[i]\n\n #wtilde = {}\n # for j in w[i]:\n # if B_j is fixed to 1, then we already set w[j][i] = 0\n # wtilde[j] = w[i][j] + delT(w[i][j], w[j][i], Tstar[Tmap[(i,j)]], M, Lambda0, L,frac, Mpfrac, fracsqL, LambdaovM)/LCD[i]\n sortedIndicesi = sortedIndices[i]\n # delT(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM)\n wtilde = [w[i][j] + delT(w[i][j],\n w[j][i],\n Tstar[Tmap[(i,\n j)]],\n M,\n Lambda0,\n L,\n frac,\n frac1,\n Mpfrac1,\n LambdaovM) / LCDCurrent[i] for j in sortedIndicesi]\n\n x = np.empty(shape=len(wtilde) + 1)\n # Solve the l1 projection problem.\n x[0] = utilde\n x[1:] = np.array(wtilde)\n projection = project(x)\n # Update the solution.\n u[i] = projection[0]\n # for j in range(len(w[i])):\n # w[i][sortedIndicesi[j]] = projection[j+1] ## +1 since u[i] is\n # first\n for counter, j in enumerate(sortedIndicesi):\n w[i][j] = projection[counter + 1]\n # Calculate the current objective\n CDObj = CDObjConst # 0\n for i in BfreeMinusZeroGroups: # Bfree:\n CDObj += q(u[i], Bstar[Bmap[i]], M, Lambda0, L, frac)\n for j in w[i]:\n if i < j:\n # T(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n CDObj += T(w[i][j], w[j][i], Tstar[Tmap[(i, j)]], M,\n Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n #Params[\"print\"](\"Inner obj: \", CDObj)\n if terminate(CDPrevObj, CDObj, TolCD):\n break\n CDPrevObj = CDObj\n\n # Get back the primal solution.\n for i in range(len(Bindices)):\n # if Bindices[i] is fixed to 1, then u[Bindices[i]] = 0 and the\n # update below will lead to currentB[i] = Bstar[i] (or +- M)\n if Bindices[i] not in zeroGroups:\n # assuming Bindices is sorted\n currentB[i] = dualtoprimalu(\n u[Bindices[i]], Bstar[i], M, Lambda0, L, frac)\n else:\n currentB[i] = 0\n\n for i, j in Tmap:\n # if i or j is fixed, the corresponding w[i][j] will be zero, which\n # leads to the correct update.\n if i < j:\n if (i, j) in Tfree:\n # dualtoprimalw(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1)\n if i in zeroGroups or j in zeroGroups:\n currentT[Tmap[(i, j)]] = 0\n else:\n currentT[Tmap[(i, j)]] = dualtoprimalw(\n w[i][j], w[j][i], Tstar[Tmap[(i, j)]], M, Lambda0, L, frac, frac1, Mpfrac1)\n else: # careful, this is the case when no thresholding should be applied\n coefficient = Tstar[Tmap[(i, j)]]\n if np.abs(coefficient) <= M:\n currentT[Tmap[(i, j)]] = coefficient\n else:\n currentT[Tmap[(i, j)]] = M * np.sign(coefficient)\n\n r = y - np.dot(XB, currentB) - np.dot(XT, currentT)\n\n maxterm = 0\n for i in range(len(currentB)):\n if Bindices[i] not in fixedB:\n maxtemp = np.abs(currentB[i])\n for j in w[Bindices[i]]:\n maxtemp = max(maxtemp, np.abs(\n currentT[Tmap[(Bindices[i], j)]]))\n maxterm += maxtemp\n l1norm = np.sum(np.abs(currentT[TfreeIndices]))\n # IMPORTANT: Avoid using lenFixed and lenFixedT here.....!!!!!! ####\n currentobjective = 0.5 * np.dot(r, r) + Lambda0 * (\n lenFixedB + lenFixedT) + (Lambda0 / M) * maxterm + (Lambda1 / M) * l1norm\n\n if currentobjective > oldObj:\n Params[\"print\"](\"Objective Increased!!!\")\n\n if terminate(oldObj, currentobjective, Tol):\n break\n\n oldObj = currentobjective\n Params[\"print\"](\"Iteration :\", it, \". Objective: \", currentobjective)\n\n end = time.time()\n Params[\"print\"](\"Time: \", end - start, \" seconds.\")\n\n # Check if any small values should be zero.\n # Start with more aggressive checks first.\n Trunc = False\n for epsilon in [0.01, 1e-3, 1e-4, 1e-5, 1e-6]:\n currentBtrunc = np.copy(currentB)\n currentTtrunc = np.copy(currentT)\n currentBSetToZero = np.nonzero(np.abs(currentB) < epsilon)[0]\n currentBtrunc[currentBSetToZero] = 0\n currentBSetToZeroPSet = set(currentBSetToZero)\n for (i, j) in Tmap:\n if Bmap[i] in currentBSetToZeroPSet or Bmap[j] in currentBSetToZeroPSet:\n currentTtrunc[Tmap[(i, j)]] = 0\n\n currentTtrunc[np.abs(currentT) < epsilon] = 0\n rtrunc = y - np.dot(XB, currentBtrunc) - np.dot(XT, currentTtrunc)\n maxterm = 0\n for i in range(len(currentBtrunc)):\n if Bindices[i] not in fixedB:\n maxtemp = np.abs(currentBtrunc[i])\n for j in w[Bindices[i]]:\n maxtemp = max(maxtemp, np.abs(\n currentTtrunc[Tmap[(Bindices[i], j)]]))\n maxterm += maxtemp\n l1norm = np.sum(np.abs(currentTtrunc[TfreeIndices]))\n objectivetrunc = 0.5 * np.dot(rtrunc, rtrunc) + Lambda0 * (\n lenFixedB + lenFixedT) + (Lambda0 / M) * maxterm + (Lambda1 / M) * l1norm\n\n Params[\"print\"](\n \"eps: \",\n epsilon,\n \" objectivetrunc: \",\n objectivetrunc,\n \" currentobjective: \",\n currentobjective)\n # 1.01 might be beneficial in some extreme cases where supp becomes\n # very large (but might also cause descent problems)\n if objectivetrunc <= currentobjective:\n '''\n currentB = currentBtrunc\n currentT = currentTtrunc\n r = rtrunc\n currentobjective = objectivetrunc\n '''\n Params[\"print\"](\"###CHANGE###\", \"eps: \", epsilon)\n Params[\"print\"](\"Final Objective :\", objectivetrunc)\n Trunc = True\n break\n\n integral = True\n\n for i in Bfree:\n zi = np.abs(currentB[Bmap[i]]) / M\n if zi > 0 and zi < 0.999:\n integral = False\n\n for i in TfreeIndices:\n zi = np.abs(currentT[i]) / M\n if zi > 0 and zi < 0.999:\n integral = False\n\n Bnnz = {key: currentB[Bmap[key]]\n for key in Bmap if currentB[Bmap[key]] != 0}\n Tnnz = {(i, j): currentT[Tmap[(i, j)]]\n for i, j in Tmap if i < j and currentT[Tmap[(i, j)]] != 0}\n intercept = data.ybar - np.dot(XBMean, currentB) - np.dot(XTMean, currentT)\n sol = Solution(Bnnz, Tnnz, intercept)\n\n if Trunc:\n BnnzTrunc = {key: currentBtrunc[Bmap[key]]\n for key in Bmap if currentBtrunc[Bmap[key]] != 0}\n TnnzTrunc = {(i, j): currentTtrunc[Tmap[(\n i, j)]] for i, j in Tmap if i < j and currentTtrunc[Tmap[(i, j)]] != 0}\n interceptTrunc = data.ybar - \\\n np.dot(XBMean, currentBtrunc) - np.dot(XTMean, currentTtrunc)\n solTrunc = Solution(BnnzTrunc, TnnzTrunc, interceptTrunc)\n else:\n BnnzTrunc = Bnnz\n TnnzTrunc = Tnnz\n interceptTrunc = intercept\n solTrunc = sol\n\n return (sol, solTrunc, currentobjective, integral, r, u, w)", "def boundary_conditions(particle_outer_radius, boundary_temp):\n\n boundary_condition = [particle_outer_radius, boundary_temp]\n\n return boundary_condition", "def probabilistic_thresholding(cmndf, thresholds, p_min, p_max, absolute_min_prob, F_axis, Fs, beta_distr,\n parabolic_interp=True):\n # restrict search range to interval [p_min:p_max]\n cmndf[:p_min] = np.inf\n cmndf[p_max:] = np.inf\n\n # find local minima (assuming that cmndf is real in [p_min:p_max], you will always find a minimum,\n # at least p_min or p_max)\n min_idxs = (np.argwhere((cmndf[1:-1] < cmndf[0:-2]) & (cmndf[1:-1] < cmndf[2:]))).flatten().astype(np.int64) + 1\n\n O_m = np.zeros(2 * len(F_axis))\n\n # return if no minima are found, e.g., when frame is silence\n if min_idxs.size == 0:\n return O_m, np.ones_like(thresholds)*p_min, np.ones_like(thresholds)\n\n # Optional: Parabolic Interpolation of local minima\n if parabolic_interp:\n # do not interpolate at the boarders, Numba compatible workaround for np.delete()\n min_idxs_interp = delete_numba(min_idxs, np.argwhere(min_idxs == p_min))\n min_idxs_interp = delete_numba(min_idxs_interp, np.argwhere(min_idxs_interp == p_max - 1))\n p_corr, cmndf[min_idxs_interp] = parabolic_interpolation(cmndf[min_idxs_interp - 1],\n cmndf[min_idxs_interp],\n cmndf[min_idxs_interp + 1])\n else:\n p_corr = np.zeros_like(min_idxs).astype(np.float64)\n\n # set p_corr=0 at the boarders (no correction done later)\n if min_idxs[0] == p_min:\n p_corr = np.concatenate((np.array([0.0]), p_corr))\n\n if min_idxs[-1] == p_max - 1:\n p_corr = np.concatenate((p_corr, np.array([0.0])))\n\n lag_thr = np.zeros_like(thresholds)\n val_thr = np.zeros_like(thresholds)\n\n # loop over all thresholds\n for i, threshold in enumerate(thresholds):\n # minima below absolute threshold\n min_idxs_thr = min_idxs[cmndf[min_idxs] < threshold]\n\n # find first local minimum\n if not min_idxs_thr.size:\n lag = np.argmin(cmndf) # choose absolute minimum when no local minimum is found\n am_prob = absolute_min_prob\n val = np.min(cmndf)\n else:\n am_prob = 1\n lag = np.min(min_idxs_thr) # choose first local minimum\n val = cmndf[lag]\n\n # correct lag\n if parabolic_interp:\n lag += p_corr[np.argmin(min_idxs_thr)]\n\n # ensure that lag is in [p_min:p_max]\n if lag < p_min:\n lag = p_min\n elif lag >= p_max:\n lag = p_max - 1\n\n lag_thr[i] = lag\n val_thr[i] = val\n\n idx = np.argmin(np.abs(1200 * np.log2(F_axis / (Fs / lag)))) # quantize estimated period\n O_m[idx] += am_prob * beta_distr[i] # pYIN-Paper, Formula 4/5\n\n return O_m, lag_thr, val_thr", "def assemble_matrices(self):\n \n #Pointer reassignment for convenience\n N = self.ngrids\n\n #Begin with a linked-list data structure for the transmissibilities,\n #and one-dimenstional arrays for the diagonal of B and the flux vector\n T = lil_matrix((N, N), dtype=np.double)\n B = np.zeros(N, dtype=np.double)\n Q = np.zeros(N, dtype=np.double)\n\n #Read in boundary condition types and values\n bcs = self.input_data['boundary conditions']\n bc_type_1 = bcs['left']['type'].lower()\n bc_type_2 = bcs['right']['type'].lower()\n bc_value_1 = bcs['left']['value']\n bc_value_2 = bcs['right']['value']\n \n #Loop over all grid cells\n for i in range(N):\n\n #Apply left BC\n if i == 0:\n T[i, i+1] = -self.compute_transmissibility(i, i + 1)\n\n if bc_type_1 == 'neumann':\n T[i, i] = T[i,i] - T[i, i+1]\n elif bc_type_1 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i,i] - T[i, i+1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_1\n else:\n pass #TODO: Add error checking here if no bc is specified\n\n #Apply right BC\n elif i == (N - 1):\n T[i, i-1] = -self.compute_transmissibility(i, i - 1)\n\n if bc_type_2 == 'neumann':\n T[i, i] = T[i,i] - T[i, i-1]\n elif bc_type_2 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i, i] - T[i, i-1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_2\n else:\n pass #TODO:Add error checking here if no bc is specified\n\n #If there is no boundary condition compute interblock transmissibilties\n else:\n T[i, i-1] = -self.compute_transmissibility(i, i-1)\n T[i, i+1] = -self.compute_transmissibility(i, i+1)\n T[i, i] = (self.compute_transmissibility(i, i-1) +\n self.compute_transmissibility(i, i+1))\n\n #Compute accumulations\n B[i] = self.compute_accumulation(i)\n\n #If constant-rate wells are present, add them to the flux vector\n if self.rate_well_grids is not None:\n Q[self.rate_well_grids] += self.rate_well_values\n\n \n #Return sparse data-structures\n return (T.tocsr(), \n csr_matrix((B, (np.arange(N), np.arange(N))), shape=(N,N)), \n Q)", "def bool_constraints(Px,pk1,pk2,mu1,mu2,mu3):\n C = np.array([1,1,1,1,1,1,1,1], dtype=bool) # Initialise array as True\n \n # Constraint 1: Check polarisation basis probabilities are valid.\n if (Px >= 1.0 or Px <= 0.0):\n C[0] = False\n # Constraint 2: Check probability of pulse with intensity 1 is in bounds.\n if (pk1 >= 1.0 or pk1 <= 0.0):\n C[1] = False\n # Constraint 3: Check probability of pulse with intensity 2 is in bounds.\n if (pk2 >= 1.0 or pk2 <= 0.0):\n C[2] = False\n # Constraint 4: Check sum of probabilities for intensity 1 & 2 are less\n # than unity.\n if ((pk1 + pk2) >= 1.0):\n C[3] = False\n # Constraint 5: Check value of intensity 1 is in bounds.\n if (mu1 >= 1.0 or mu1 <= 0.0):\n C[4] = False\n # Constraint 6: Check value of intensity 2 is in bounds.\n if (mu2 >= 1.0 or mu2 <= 0.0):\n C[5] = False\n # Constraint 7: Check values of all intensities are in bounds.\n if ((mu1 - mu3) <= mu2):\n C[6] = False\n # Constraint 8: Check values of intensities 2 & 3 are in bounds.\n if (mu2 <= mu3):\n C[7] = False\n return C", "def testConstantBoundedField(self):\n photoCalib = lsst.afw.image.PhotoCalib(self.constantCalibration)\n self._testPhotoCalibCenter(photoCalib, 0)\n\n self.assertEqual(1, photoCalib.instFluxToMaggies(self.instFlux, self.pointYShift))\n self.assertEqual(0, photoCalib.instFluxToMagnitude(self.instFlux, self.pointYShift))\n self.assertFloatsAlmostEqual(1e-9, photoCalib.instFluxToMaggies(self.instFlux*1e-9, self.pointXShift))\n self.assertFloatsAlmostEqual(22.5, photoCalib.instFluxToMagnitude(\n self.instFlux*1e-9, self.pointXShift))\n\n photoCalib = lsst.afw.image.PhotoCalib(self.constantCalibration, self.calibrationErr)\n self._testPhotoCalibCenter(photoCalib, self.calibrationErr)", "def restrict(self):\n\n cg = self.grid.coarse_like(2)\n\n c_edge_coeffs = EdgeCoeffs(cg, None, empty=True)\n\n c_eta_x = cg.scratch_array()\n c_eta_y = cg.scratch_array()\n\n fg = self.grid\n\n c_eta_x[cg.ilo:cg.ihi+2,cg.jlo:cg.jhi+1] = \\\n 0.5*(self.x[fg.ilo:fg.ihi+2:2,fg.jlo :fg.jhi+1:2] +\n self.x[fg.ilo:fg.ihi+2:2,fg.jlo+1:fg.jhi+1:2])\n\n # redo the normalization\n c_edge_coeffs.x = c_eta_x*fg.dx**2/cg.dx**2\n\n c_eta_y[cg.ilo:cg.ihi+1,cg.jlo:cg.jhi+2] = \\\n 0.5*(self.y[fg.ilo :fg.ihi+1:2,fg.jlo:fg.jhi+2:2] +\n self.y[fg.ilo+1:fg.ihi+1:2,fg.jlo:fg.jhi+2:2])\n\n c_edge_coeffs.y = c_eta_y*fg.dy**2/cg.dy**2\n\n return c_edge_coeffs", "def const_r1(x, fBar, phiC, w):\n return numpy.where(x < fBar, w*phiC*x, w*phiC*fBar)", "def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc", "def test_linear_in_cond(self):\n # reproducible arbitrariness\n np.random.seed(3232)\n\n cond_out = np.random.randn(self.Nc)\n alpha = 2.3\n\n self.conductor.out_step = np.copy(cond_out)\n self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.conductor.out_step = alpha*cond_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def test_linear_in_cond(self):\n # reproducible arbitrariness\n np.random.seed(3232)\n\n cond_out = np.random.randn(self.Nc)\n alpha = 2.3\n\n self.conductor.out_step = np.copy(cond_out)\n self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.conductor.out_step = alpha*cond_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def FlowRestriction(T_in, p_in, m_dot_out, d_inner, f):\r\n\r\n # Cross section\r\n A_cross = (np.pi/4)*(d_inner**2)\r\n\r\n # Assumption isenthalpic flow!\r\n h_in = hp.HeCalc(9, 0, 1, p_in, 2, T_in, 1) #J/kg\r\n\r\n # Iteration for the calculation of p_out even though the influence is probably negligible\r\n # I checked it and for 20 bar it really is negligible\r\n dp = 0.0\r\n p_out = 0.0\r\n for i in range(5):\r\n p_out = p_in - dp\r\n T_out = hp.HeCalc(2, 0, 1, p_out, 9, h_in, 1)\r\n Rho_out = hp.HeCalc(3, 0, 1, p_out, 2, T_out, 1) #kg/m³\r\n # Velocity of the outgoing flow\r\n u_out = m_dot_out/(A_cross*Rho_out) #m/s\r\n\r\n # Calculation of the dp with Bernoulli equation and resistance coefficient (see VDI Heatatlas 2013)\r\n dp = f * Rho_out * 0.5 * u_out**2\r\n\r\n\r\n h_out = hp.HeCalc(9, 0, 1, p_out, 2, T_out, 1)\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n return state_out", "def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)", "def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))", "def compute_static_zp_broadening(self):\n self.distribute_workload()\n self.zero_point_broadening = self.sum_qpt_function('get_zpb_static')\n self.broadening_is_dynamical = False" ]
[ "0.5876379", "0.56467026", "0.56454414", "0.557635", "0.54988295", "0.5464996", "0.54629254", "0.54357946", "0.54234153", "0.5325453", "0.52624094", "0.5240842", "0.5230096", "0.52277684", "0.52179563", "0.52156794", "0.51873976", "0.5179338", "0.51686215", "0.51600015", "0.51327753", "0.51163477", "0.5101336", "0.50824636", "0.5070707", "0.5070707", "0.5047928", "0.50444174", "0.5035333", "0.5029132" ]
0.73723733
0
Returns masked epsilon value for the current epoch; Epsilon value = eps_mask eps
def eps(self): return self.eps_mask*self.eps_scheduler.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_epsilon(self):\n step_size = float(self._eps_begin - self._eps_end) / self._total_steps\n self._epsilon = max(self._eps_end, self._epsilon - step_size)\n return self._epsilon", "def epsilon():\n return _EPSILON", "def epsilon(self):\n return self._epsilon", "def get_epsilon(step: int):\n return (epsilon_0 - epsilon) * math.exp(-step) + epsilon", "def epsilon(self):\n return self.__epsilon", "def get_initial_epsilon(self):\n return self.epsilon_percentile, True, self.max_rounds == 0", "def _get_epsilon(self, is_evaluation, power=1.0):\n if is_evaluation:\n return 0.0\n decay_steps = min(self._step_counter, self._epsilon_decay_duration)\n decayed_epsilon = (\n self._epsilon_end + (self._epsilon_start - self._epsilon_end) *\n (1 - decay_steps / self._epsilon_decay_duration) ** power)\n return decayed_epsilon", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def set_epsilon(value):\n global _EPSILON\n _EPSILON = value", "def current_epsilon(self):\n t = self.action_requests\n T = self.exploration_period\n if(t >= T):\n return self.epsilon_final\n\n epsilon0 = self.epsilon_initial\n epsilonT = self.epsilon_final\n\n return epsilon0 - (t * (epsilon0 - epsilonT)) / T", "def decay_epsilon(self, epsilon, MIN_EPSILON,\r\n EPSILON_DECAY: float) -> float:\r\n if epsilon > MIN_EPSILON:\r\n epsilon *= EPSILON_DECAY\r\n epsilon = max(MIN_EPSILON, epsilon)\r\n return epsilon", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def eps(self):\n return self._eps", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def epsilon(dimension):\n return DriverUtils.EPSILON[dimension] if dimension in DriverUtils.EPSILON \\\n else DriverUtils.EPSILON['default']", "def calc_epsilon(self, state_number, evaluation=False):\n if evaluation:\n return self.eps_evaluation\n elif state_number < self.replay_buffer_start_size:\n return self.eps_initial\n elif self.replay_buffer_start_size <= state_number < self.replay_buffer_start_size + self.eps_annealing_states:\n return self.slope * state_number + self.intercept\n elif state_number >= self.replay_buffer_start_size + self.eps_annealing_states:\n return self.slope_2 * state_number + self.intercept_2", "def update_epsilon(self):\n self.epsilon = self.epsilon * self.decay", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def epsilon_delta(self):", "def calc_eps(self, steps):\n eps_threshold = self.EPS_END + (self.EPS_START - self.EPS_END) * \\\n math.exp(-1. * steps / self.EPS_DECAY)\n return eps_threshold", "def Kepsilon(self):\n kE = 2 + 0.1024 / self.r + (0.1124 + 0.1265 * radians(self.sweep25W) + 0.1766 * radians(self.sweep25W)**2) / \\\n (self.r**2)\n kE0 = 2 + 0.1024 / self.r + 0.1124 / (self.r**2)\n return kE / kE0", "def MccEpsilon(self):\n if getattr(self, '_MccEpsilon', None) is None:\n self._MccEpsilon = Utils.sdiag(self.epsilon)\n return self._MccEpsilon", "def em_epsilon_cdp(epsilon, delta, k):\n if delta <= 0:\n return epsilon / k\n else:\n log_delta = np.log(1 / delta)\n return max(\n epsilon / k,\n np.sqrt((8 * log_delta + 8 * epsilon) / k) -\n np.sqrt(8 * log_delta / k))", "def eps(self, dtype):\n return self.finfo(dtype).eps", "def _sigma_ee_nonrel(self,gam,eps):\n s0 = 4 * r0**2 * alpha / (15 * eps)\n x = 4 * eps / (gam**2 - 1)\n sigma_nonrel = s0 * self._F(x,gam)\n sigma_nonrel[np.where(eps >= 0.25*(gam**2 - 1.))] = 0.0\n sigma_nonrel[np.where(gam*np.ones_like(eps) < 1.0)] = 0.0\n return sigma_nonrel / mec2_unit", "def find_epsilon(self, ltarget):\n\n dnu = self.find_large_separation()\n one = n = nu = 0.0\n for i in range(len(self.modes)):\n if (self.modes['l'][i] != ltarget): continue\n one += 1.0\n n += self.modes['n'][i]\n nu += self.modes['freq'][i]\n if (one == 0.0):\n return 0.0\n else:\n return (nu/dnu-n)/one", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def testEpsK1Changes(self):\n with self.test_context() as session:\n initial_eps = 1e-3\n num_classes = 5\n rm = gpflow.likelihoods.RobustMax(num_classes, initial_eps)\n\n expected_eps_k1 = initial_eps / (num_classes - 1.)\n actual_eps_k1 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k1, actual_eps_k1)\n\n new_eps = 0.412\n rm.epsilon.assign(new_eps, session=session)\n expected_eps_k2 = new_eps / (num_classes - 1.)\n actual_eps_k2 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k2, actual_eps_k2)", "def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]" ]
[ "0.690235", "0.6759066", "0.67323434", "0.67133105", "0.66764283", "0.6660499", "0.64527667", "0.62959194", "0.62958694", "0.6282747", "0.62823194", "0.6260531", "0.6201078", "0.6074518", "0.605753", "0.60512596", "0.6047877", "0.6020077", "0.6009018", "0.59153366", "0.5843124", "0.578868", "0.574674", "0.5743239", "0.5721888", "0.57168776", "0.5699687", "0.5575388", "0.5534793", "0.55343455" ]
0.7003138
0
This function stands for determine the threshold values of given dataframe according to given column name, lower quantile value and upper quantile value.
def outlier_thresholds(dataframe, col_name, low_quantile, up_quantile): quartile1 = dataframe[col_name].quantile(low_quantile) quartile3 = dataframe[col_name].quantile(up_quantile) interquantile_range = quartile3 - quartile1 up_limit = quartile3 + 1.5 * interquantile_range low_limit = quartile1 - 1.5 * interquantile_range return low_limit, up_limit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getquantile(df, low=0.1, high=0.9):\n q1 = df.quantile(low)\n q3 = df.quantile(high)\n print(q1)\n print(q3)\n return df[df<q1],df[df>q3]", "def replace_with_thresholds(dataframe, col_name, low_threshold, up_threshold):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name,\n low_threshold, up_threshold)\n if low_limit > 0:\n dataframe.loc[(dataframe[col_name] < low_limit), col_name] = low_limit\n dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit\n else:\n dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit", "def winsorize_columns(dataframe, columns, winzerize_type='percentile',limits =.01, standard_deviation_limit=3,frame_type='spark'):\n\n if frame_type == 'spark':\n import numpy as np\n df = dataframe\n\n if winzerize_type == 'percentile':\n def percentile_threshold(ardd, percentile):\n assert percentile > 0 and percentile <= 100, \"percentile should be larger then 0 and smaller or equal to 100\"\n\n return ardd.sortBy(lambda x: x).zipWithIndex().map(lambda x: (x[1], x[0])) \\\n .lookup(np.ceil(ardd.count() / 100 * percentile - 1))[0]\n\n for column in columns:\n def flatten_column(row):\n return tuple(float(x) for x in row)\n #Compute the percentiles\n lower = percentile_threshold(df.select(column).rdd.flatMap(flatten_column),limits)\n upper = percentile_threshold(df.select(column).rdd.flatMap(flatten_column), 100 - limits)\n\n print('For {column} the lower limit is {lower}'.format(column=column,lower=str(lower)))\n print('For {column} the upper limit is {upper}'.format(column=column,upper=str(upper)))\n\n from pyspark.sql.functions import when\n #Make columns greater then upper bound == to upper bound\n df = df.withColumn(column,\n when(df[column] > upper, upper)\n .otherwise(df[column]))\n #Make columns less then lower bound == to lower bound\n df = df.withColumn(column,\n when(df[column] < lower, lower)\n .otherwise(df[column]))\n return df\n elif winzerize_type == 'stddev':\n def replace(df,column_to_filter,standard_deviations=3):\n \"\"\"\n Will remove the outliers that have a stddev higher then x(param standard_deviations).\n\n \"\"\"\n import math\n #This function will flatten the row of the dataframe\n def flatten_column(row):\n return tuple(float(x) for x in row)\n stats = df.select(column_to_filter).rdd.flatMap(flatten_column).stats()\n mean = stats.mean()\n variance = stats.variance()\n stddev = math.sqrt(variance)\n stddev_threshhold = stddev*standard_deviations\n # print(stddev_threshhold)\n from pyspark.sql.functions import lit,abs\n from pyspark.sql.functions import when\n\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) > 0), (mean+stddev_threshhold))\n .otherwise(df[column_to_filter]))\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) < 0), (mean-stddev_threshhold))\n .otherwise(df[column_to_filter]))\n\n return df\n for column in columns:\n df = replace(df,column,standard_deviation_limit)\n return df\n else:\n from scipy.stats.mstats import winsorize\n\n df = None\n if frame_type == 'h2o':\n # convert to pandas\n df = dataframe.as_data_frame()\n elif frame_type == 'pandas':\n df = dataframe\n\n for column in columns:\n df[column] = winsorize(df[column], limits = limits)\n\n if frame_type == 'h2o':\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n df = h2o.H2OFrame(df)\n print('Done.')\n return df\n else:\n return df", "def remove_outliers_by_percentile(dataframe, columns, limits =.01, frame_type='spark'):\n\n if frame_type == 'spark':\n import numpy as np\n df = dataframe\n\n def percentile_threshold(ardd, percentile):\n assert percentile > 0 and percentile <= 100, \"percentile should be larger then 0 and smaller or equal to 100\"\n # df.approxQuantile(\"x\", [0.5], 0.25)\n return ardd.sortBy(lambda x: x).zipWithIndex().map(lambda x: (x[1], x[0])) \\\n .lookup(np.ceil(ardd.count() / 100 * percentile - 1))[0]\n\n for column in columns:\n def flatten_column(row):\n return tuple(float(x) for x in row)\n #Compute the percentiles\n lower = percentile_threshold(df.select(column).rdd.flatMap(flatten_column),limits)\n upper = percentile_threshold(df.select(column).rdd.flatMap(flatten_column), 100 - limits)\n\n print('For {column} the lower limit is {lower}'.format(column=column,lower=str(lower)))\n print('For {column} the upper limit is {upper}'.format(column=column,upper=str(upper)))\n\n from pyspark.sql.functions import lit\n #Filter out outliers\n df = df.where(\"{column} < {upper} AND {column} > {lower} \"\\\n .format(column=column,upper=upper,lower=lower))\n return df\n\n\n else:\n import numpy as np\n\n df = None\n if frame_type == 'h2o':\n # convert to pandas\n df = dataframe.as_data_frame()\n elif frame_type == 'pandas':\n df = dataframe\n\n for column in columns:\n ulimit = np.percentile(train_df[column].values, 100 - limits)\n llimit = np.percentile(train_df[column].values, limits)\n df[column] = df[df[column] < ulimit]\n df[column] = df[df[column] > llimit]\n\n if frame_type == 'h2o':\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n df = h2o.H2OFrame(df)\n print('Done.')\n return df\n else:\n return df", "def check_outlier(dataframe, col_name):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name, 0.05, 0.95)\n if dataframe[(dataframe[col_name] > up_limit) |\n (dataframe[col_name] < low_limit)].any(axis=None):\n return True\n else:\n return False", "def outlier_determine_threshold(df, col):\r\n df = df.copy(deep=True)\r\n keep_looping = True\r\n number_of_loops = 1\r\n thresh = 5\r\n while keep_looping:\r\n if number_of_loops >= 10:\r\n break\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n pct_outliers = len(dfout_index)/len(df)\r\n if pct_outliers == 0:\r\n if thresh > 5:\r\n thresh = thresh - 5\r\n elif thresh == 5:\r\n return thresh\r\n else:\r\n thresh = thresh - 1\r\n elif pct_outliers <= 0.01:\r\n keep_looping = False\r\n else:\r\n thresh_multiplier = int((pct_outliers/0.01)*0.5)\r\n thresh = thresh*thresh_multiplier\r\n number_of_loops += 1\r\n print(' %s Outlier threshold = %d' %(col, thresh))\r\n return thresh", "def filter_percentile(df, col, up=95, down=5):\n pup = np.percentile(df[col].values, up)\n pdw = np.percentile(df[col].values, down)\n\n s = (df[col]<pup) & (df[col]>pdw)\n df2 = df[s]\n\n return df2", "def detect_outlier(df,method='iqr',val=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n if method=='z_score':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']\n df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))\n elif method=='iqr':\n for i in c_name:\n q1,q3 = df.approxQuantile(i,[0.25,0.75],0)\n IQR = q3-q1\n lo = q1-(1.5*IQR)\n up = q3+(1.5*IQR)\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n elif method=='std':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']*thresh\n lo = m - s\n up = m + s\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n return df", "def locProts(df, thresh=.75):\r\n if \"Localization prob\" not in df.columns:\r\n print(\"This dataframe has no 'Localization prob' column!\")\r\n return True\r\n print(f\"{df.shape[0]} entries in dataframe.\")\r\n df = df[df[\"Localization prob\"]>=thresh]\r\n print(f\"{df.shape[0]} entries in dataframe with localization prob >= {thresh*100}%.\")\r\n return df", "def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices", "def calculate_data_will_be_affected(dataframe, column,\n low_quantile=0.0, up_quantile=1.0):\n ratio_for_up_limit = (len(dataframe[dataframe[column] > dataframe[column].\n quantile(up_quantile)]) /\n dataframe[column].shape[0]) * 100\n ratio_for_low_limit = (len(dataframe[dataframe[column] < dataframe[column].\n quantile(low_quantile)]) /\n dataframe[column].shape[0]) * 100\n ratio_of_affected_data = round(ratio_for_up_limit + ratio_for_low_limit, 2)\n print(\"When we do reassign operation in the ({} - {}) range, %{} of data \"\n \"will be affected in this operation.\".format(low_quantile,\n up_quantile,\n ratio_of_affected_data))", "def flag_outliers_in_col(self, df, col='paciente_idade', threshold=2):\n data = df[col]\n mean = np.mean(data)\n std = np.std(data)\n outlier = []\n for i in data:\n z = (i-mean)/std\n outlier.append(z > threshold)\n outlier = pd.Series(outlier)\n print(f\"Number of outliers: {outlier.sum()}\")\n return outlier", "def define_threshold(self, threshold):\n def func(x, threshold):\n if x > threshold:\n return 'up'\n elif x < -threshold:\n return 'down'\n else:\n return 'stable'\n try:\n self.df['Direction'] = self.df.apply(lambda x: func(x['Return'], threshold), axis=1)\n except:\n print(\"issue\")\n return", "def missing_stats(X, missing_threshold, axis=1):\n a = 1-axis\n missing_series = X.isnull().sum(axis = a) / X.shape[a]\n # Calculate the fraction of missing in each column \n missing_series = X.isnull().sum() / X.shape[0]\n if axis == 1:\n missing_stats_cols = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})\n # Sort with highest number of missing values on top\n missing_stats_cols = missing_stats_cols.sort_values('missing_fraction', ascending = False)\n missing_threshold_cols_grid = pd.DataFrame(missing_series[missing_series >= missing_threshold]).reset_index().rename(columns = {'index': 'cols', 0: 'missing_fraction'})\n return missing_threshold_cols_grid\n elif axis == 0:\n missing_stats_rows = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})\n # Sort with highest number of missing values on top\n missing_stats_rows = missing_stats_rows.sort_values('missing_fraction', ascending = False)\n missing_threshold_rows_grid = pd.DataFrame(missing_series[missing_series > missing_threshold]).reset_index().rename(columns = {'index': 'rows', 0: 'missing_fraction'})\n return missing_threshold_rows_grid", "def get_quantile_thresholds(ds, quantile, dim, lat_name='lat', lon_name='lon', lat_chunk=1, lon_chunk=1):\n ds = ds.chunk({**{lat_name: 1, lon_name: 1},\n **{d: -1 for d in dim}})\n return ds.quantile(quantile, dim)", "def test_signal_threshold(df_phys, signal, threshold):\n df_signal = df_phys[df_phys[\"Signal\"] == signal][\"Physical Value\"]\n\n stats = df_signal.agg([\"count\", \"min\", \"max\", \"mean\", \"std\"])\n delta = stats[\"max\"] - stats[\"min\"]\n\n if delta > threshold:\n print(f\"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}\")", "def check_threshold(threshold, data, percentile_func, name=\"threshold\"):\n if isinstance(threshold, str):\n message = (\n f'If \"{name}\" is given as string it '\n \"should be a number followed by the percent \"\n 'sign, e.g. \"25.3%\"'\n )\n if not threshold.endswith(\"%\"):\n raise ValueError(message)\n\n try:\n percentile = float(threshold[:-1])\n except ValueError as exc:\n exc.args += (message,)\n raise\n\n threshold = percentile_func(data, percentile)\n elif isinstance(threshold, numbers.Real):\n # checks whether given float value exceeds the maximum\n # value of the image data\n value_check = abs(data).max()\n if abs(threshold) > value_check:\n warnings.warn(\n f\"The given float value must not exceed {value_check}. \"\n f\"But, you have given threshold={threshold}.\"\n )\n else:\n raise TypeError(\n f\"{name} should be either a number \"\n \"or a string finishing with a percent sign\"\n )\n return threshold", "def transform(self, df):\n if self.__fitOK:\n\n selected_col = []\n\n for i, col in enumerate(df.columns):\n\n if (self.__Ddrifts[col] < self.threshold):\n selected_col.append(col)\n\n return df[selected_col]\n\n else:\n raise ValueError('Call the fit function before !')", "def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]", "def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]", "def createQuantile(data, column_name, cut_of_point):\r\n data[data[column_name] > data[column_name].quantile(cut_of_point)] = 0\r\n return data", "def get_rows_greater_than_avg(df, column):\r\n df= df[df[column] > df[column].mean()]\r\n return df", "def listcwsw(df, threshold, key_col='token', val_col='value'):\n return df[df[val_col]>=threshold][key_col].values", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def fast_outlier_id(data, cols=\"All\", method=\"z-score\",\n threshold_low_freq=0.05):\n\n # ASSERT TESTS\n assert isinstance(data, pd.DataFrame), \"Data must be in pandas Data Frame!\"\n\n if type(cols) == str:\n if cols.lower() == \"all\":\n cols = list(data.columns)\n\n if type(cols) != str:\n assert isinstance(cols, list), \"Columns must be inputted in a list\"\n for i in cols:\n assert i in list(\n data.columns), \"Columns must exist in the inputted data \" \\\n \"dataframe\"\n\n assert method.lower() in [\"z-score\",\n \"interquartile\"], \\\n \"The only permitted values are z-score or interquantile,thank you\"\n\n # Initialize lists containing summary values\n no_nans_list = list()\n col_type_list = list()\n perc_nans_list = list()\n outlier_values_list = list()\n outlier_count_list = list()\n outlier_perc_list = list()\n method_list = list()\n\n # Subsetting the data by the columns selected by the user\n subset = data[cols]\n for i in cols:\n # More lists containing summary values\n no_nans = subset[i].isna().sum()\n no_nans_list.append(no_nans)\n col_type_list.append(subset[i].dtype)\n perc_nans_list.append(round(no_nans / len(subset[i]), 2))\n data_no_nans = subset[i][~pd.isna(subset[i])]\n if data_no_nans.dtypes in ['float64', 'int64']:\n if method.lower() == \"z-score\":\n score = np.abs(stats.zscore(data_no_nans))\n data_no_nans = data_no_nans.to_numpy()\n outlier_values = data_no_nans[np.where(score > 2)]\n outlier_count_list.append(len(outlier_values))\n outlier_perc_list.append(\n round(len(outlier_values) / len(data_no_nans), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"Z-Score\")\n elif method.lower() == \"interquartile\":\n Q1 = np.quantile(data_no_nans, 0.25)\n Q3 = np.quantile(data_no_nans, 0.75)\n IQR = Q3 - Q1\n score = (data_no_nans < (Q1 - 1.5 * IQR)) | (\n data_no_nans > (Q3 + 1.5 * IQR))\n data_no_nans = data_no_nans.to_numpy()\n outlier_values = data_no_nans[np.where(score > 0)]\n outlier_count_list.append(len(outlier_values))\n outlier_perc_list.append(\n round(len(outlier_values) / len(data_no_nans), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"Interquartile\")\n elif data_no_nans.dtype in ['object']:\n score = data_no_nans.value_counts() / len(data_no_nans)\n outlier_values = score[score < threshold_low_freq].index.tolist()\n outlier_count_list.append(\n data_no_nans.value_counts()[score < threshold_low_freq].sum())\n outlier_perc_list.append(\n round(sum(score[score < threshold_low_freq]), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"low-freq\")\n summary_dict = {'column_name': cols, 'type': col_type_list,\n 'no_nans': no_nans_list, 'perc_nans': perc_nans_list,\n 'outlier_method': method_list,\n \"no_outliers\": outlier_count_list,\n \"perc_outliers\": outlier_perc_list,\n \"outlier_values\": outlier_values_list}\n summary = pd.DataFrame(summary_dict)\n return (summary)", "def _estimate_threshold(self, **kwargs):\n recompute_threshold = kwargs.pop('recompute_threshold', False)\n # if threshold is in table, then use it.\n current_setting = (self.beta, self.chi2dist.kwds['df'])\n threshold = None\n res = False\n if not recompute_threshold and current_setting in _gaussian_cusum_thresholds.keys():\n for e in _gaussian_cusum_thresholds[current_setting]:\n if e[0] == self.arl:\n threshold = np.array([e[1]])\n res = True\n # if threshold not is in table, estimate it\n if threshold is None:\n len_simulation = kwargs.pop('len_simulation', None)\n if len_simulation is None:\n len_simulation = 10 * self.arl\n self.log.info(\"estimating threshold...\")\n plain_cusum = Cusum(arl=self.arl, beta=self.beta)\n plain_cusum.gamma = self.gamma\n d2_training = self.chi2dist.rvs(size=(int(len_simulation), 1))\n kwargs.pop('x', None)\n res = plain_cusum._estimate_threshold(x=d2_training, dof=self.chi2dist.kwds['df'],\n **kwargs)\n threshold = plain_cusum.threshold\n self.threshold = threshold\n return res", "def get_stats(dataframe, variable):\n\n print(\"Maximum value of \", variable, \"is: \", dataframe[variable].max(), \" in the row \", dataframe[dataframe[variable] == dataframe[variable].max()])\n print(\"Minimum value of \", variable, \"is: \", dataframe[variable].min())\n print(\"Mean of \", variable, \" is: \", dataframe[variable].mean())\n print(\"Standard deviation of \", variable, \" is: \", dataframe[variable].std())\n print(\"Mode of \", variable, \" is: \", dataframe[variable].mode())\n print(\"Median of \", variable, \" is: \", dataframe[variable].median())\n\n # For speed: disregard points that are lower than 2.5 (to avoid stationary periods) as part of the interquartile range and greater than 100, which is only a few points anyway.\n dataframeselection = dataframe.loc[(dataframe[variable] >= 2.5) & (dataframe[variable] < 100)]\n q1 = dataframeselection[variable].quantile(0.25)\n q3 = dataframeselection[variable].quantile(0.75)\n iqr = q3 - q1\n print(\"Upper quartile of \", variable, \" is: \", q3)\n print(\"Lower quartile of \", variable, \" is: \", q1)\n print(\"Interquartile range of \", variable, \" is: \", iqr)\n lower_limit = q1-1.5*iqr\n upper_limit = q3+1.5*iqr\n print(\"Lower limit for outliers from IQR for \", variable, \" is: \", lower_limit)\n print(\"Upper limit for outliers from IQR for \", variable, \" is: \", upper_limit)\n points_above_upper_limit = len(dataframe.loc[(dataframe[variable] > (q3+1.5*iqr)) & (dataframe[variable] < 100)])\n number_of_points = len(dataframe)\n print(\"Total number of data points\", number_of_points)\n print(\"There are \", points_above_upper_limit, \" points that lie above the upper bound, which corresponds to \", (points_above_upper_limit/number_of_points)*100, \" %\")\n\n return upper_limit", "def quantile(df):\r\n\r\n\tdf_quantile_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_quantile_dict[col] = [df[col].quantile(0.25), df[col].quantile(0.5), df[col].quantile(0.75)]\r\n\r\n\tdf_quantile = pd.DataFrame(df_quantile_dict, index=['Quantile (25%)', 'Quantile (50%)', 'Quantile (75%)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_quantile", "def winsorize(df, quantile=0.05, columns=None):\n\n if columns is not None:\n # Winsorize SOME of the columns in the DataFrame.\n\n # Create a copy of the original data.\n df_clipped = df.copy()\n\n # Recursively call this function to winsorize and update those columns.\n df_clipped[columns] = winsorize(df=df[columns], quantile=quantile)\n else:\n # Winsorize ALL of the columns in the DataFrame.\n\n # Boolean mask used to ignore inf values.\n mask = np.isfinite(df)\n\n # Lower and upper quantiles for all columns in the data.\n # We use the boolean mask to select only the finite values,\n # and the infinite values are set to NaN, which are ignored\n # by the quantile-function.\n lower = df[mask].quantile(q=quantile)\n upper = df[mask].quantile(q=1.0 - quantile)\n\n # Limit / clip all column-values between these quantiles.\n df_clipped = df.clip(lower=lower, upper=upper, axis='columns')\n\n return df_clipped", "def _xtrim(self, lower, upper):\n trm = pd.Series(data=True, index=self._data.index)\n for c in self.index_colnames_all:\n l_limit = np.percentile(self._data[c], 100 * lower)\n u_limit = np.percentile(self._data[c], 100 * upper)\n trm &= self._data[c].apply(lambda x: True if l_limit <= x <= u_limit else False)\n\n return trm" ]
[ "0.6829688", "0.6753499", "0.66339403", "0.64233243", "0.6403971", "0.62588423", "0.624364", "0.6227992", "0.5958877", "0.5931864", "0.59059733", "0.5899942", "0.588427", "0.58527815", "0.5804863", "0.5798349", "0.5789768", "0.57848287", "0.56844115", "0.56844115", "0.5665437", "0.56630397", "0.56505543", "0.5620387", "0.56117505", "0.55986345", "0.5592844", "0.5583019", "0.5582067", "0.55781734" ]
0.74504536
0
This function stands for checking if a column of a dataframe have outlier or not according to results of outlier_thresholds function.
def check_outlier(dataframe, col_name): low_limit, up_limit = outlier_thresholds(dataframe, col_name, 0.05, 0.95) if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_outlier(incoming_data):\r\n outlier_df = \\\r\n incoming_data[incoming_data.apply(\r\n lambda x: np.abs(x - x.mean()) / x.std() > 3).all(axis=1)]\r\n return not outlier_df.empty", "def flag_outliers_in_col(self, df, col='paciente_idade', threshold=2):\n data = df[col]\n mean = np.mean(data)\n std = np.std(data)\n outlier = []\n for i in data:\n z = (i-mean)/std\n outlier.append(z > threshold)\n outlier = pd.Series(outlier)\n print(f\"Number of outliers: {outlier.sum()}\")\n return outlier", "def outlier_thresholds(dataframe, col_name, low_quantile, up_quantile):\n quartile1 = dataframe[col_name].quantile(low_quantile)\n quartile3 = dataframe[col_name].quantile(up_quantile)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit", "def outlier_determine_threshold(df, col):\r\n df = df.copy(deep=True)\r\n keep_looping = True\r\n number_of_loops = 1\r\n thresh = 5\r\n while keep_looping:\r\n if number_of_loops >= 10:\r\n break\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n pct_outliers = len(dfout_index)/len(df)\r\n if pct_outliers == 0:\r\n if thresh > 5:\r\n thresh = thresh - 5\r\n elif thresh == 5:\r\n return thresh\r\n else:\r\n thresh = thresh - 1\r\n elif pct_outliers <= 0.01:\r\n keep_looping = False\r\n else:\r\n thresh_multiplier = int((pct_outliers/0.01)*0.5)\r\n thresh = thresh*thresh_multiplier\r\n number_of_loops += 1\r\n print(' %s Outlier threshold = %d' %(col, thresh))\r\n return thresh", "def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices", "def detect_outlier(df,method='iqr',val=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n if method=='z_score':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']\n df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))\n elif method=='iqr':\n for i in c_name:\n q1,q3 = df.approxQuantile(i,[0.25,0.75],0)\n IQR = q3-q1\n lo = q1-(1.5*IQR)\n up = q3+(1.5*IQR)\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n elif method=='std':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']*thresh\n lo = m - s\n up = m + s\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n return df", "def get_outliers(a_dataframe):\n outliers_list = []\n for category in a_dataframe.dtypes.keys():\n try:\n column = a_dataframe.loc[:, category]\n mean = np.mean(column) # check if category is numeric\n except TypeError:\n pass\n else:\n # print_hist(column, category)\n st_dev = np.std(column)\n limit_hi = mean + 2 * st_dev\n limit_lo = mean - 2 * st_dev\n flag_bad = (column < limit_lo) | (column > limit_hi)\n if category != \"fnlwgt\": # skip 'fnlwgt' var. 'cos I'll delete it\n outliers_list.append(flag_bad)\n num_outliers = sum(flag_bad)\n print_stats(category, column,\n limit_hi, limit_lo,\n num_outliers\n )\n\n return outliers_list", "def is_outlier(hist, value):\n stdev = np.std(hist, axis=0)\n avg = np.average(hist[-15:], axis=0)\n if any(lf for lf, avg, std in zip(value, avg, stdev) if lf > avg + 3 * std) or \\\n any(lf for lf, avg, std in zip(value, avg, stdev) if lf < avg - 3 * std):\n return True\n return False", "def is_outlier(points, thresh=3.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def is_outlier(points, thresh=12):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def isnot_outlier(points, thresh=1.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score <= thresh", "def outlier_vars(data, show_plot=False):\n \n outliers = [] \n Q1 = data.quantile(0.25)\n Q3 = data.quantile(0.75)\n IQR = Q3 - Q1\n num_data = data.select_dtypes(include='number')\n result = dict ((((num_data < (Q1 - 1.5 * IQR)) | (num_data > (Q3 + 1.5 * IQR)))==True).any())\n for k,v in result.items():\n if v == True: \n outliers.append(k)\n if show_plot:\n pair_plot = sns.pairplot(data[outliers]);\n print(f'{result},\\n\\n Visualization of outlier columns')\n return pair_plot\n else:\n return data[outliers]", "def detect_outliers(self, var: ndarray) -> ndarray:\n beta, gamma = self.get_vars(var)\n r = self.data.obs - self.fevar.mapping(beta)\n s = np.sqrt(self.data.obs_se**2 +\n np.sum(self.revar.mapping.mat**2*gamma, axis=1))\n a = norm.ppf(0.5 + 0.5*self.inlier_pct)\n return np.abs(r) > a*s", "def FE_find_and_cap_outliers(df, features, drop=False, verbose=False):\r\n df = df.copy(deep=True)\r\n outlier_indices = []\r\n idcol = 'idcol'\r\n df[idcol] = range(len(df))\r\n if isinstance(features, str):\r\n features = [features]\r\n # iterate over features(columns)\r\n for col in features:\r\n # Determine a list of indices of outliers for feature col\r\n thresh = outlier_determine_threshold(df, col)\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n\r\n df['anomaly1'] = 0\r\n df.loc[dfout_index ,'anomaly1'] = 1\r\n\r\n ### this is how the column looks now before capping outliers\r\n if verbose:\r\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,5))\r\n colors = {0:'blue', 1:'red'}\r\n ax1.scatter(df[idcol], df[col], c=df[\"anomaly1\"].apply(lambda x: colors[x]))\r\n ax1.set_xlabel('Row ID')\r\n ax1.set_ylabel('Target values')\r\n ax1.set_title('%s before capping outliers' %col)\r\n\r\n capped_value = df.loc[dfout_index, col].min() ## this is the value we cap it against\r\n df.loc[dfout_index, col] = capped_value ## maximum values are now capped\r\n ### you are now good to go - you can show how they are capped using before and after pics\r\n if verbose:\r\n colors = {0:'blue', 1:'red'}\r\n ax2.scatter(df[idcol], df[col], c=df[\"anomaly1\"].apply(lambda x: colors[x]))\r\n ax2.set_xlabel('Row ID')\r\n ax2.set_ylabel('Target values')\r\n ax2.set_title('%s after capping outliers' %col)\r\n\r\n # Let's save the list of outliers and see if there are some with outliers in multiple columns\r\n outlier_indices.extend(dfout_index)\r\n\r\n # select certain observations containing more than one outlier in 2 columns or more. We can drop them!\r\n outlier_indices = Counter(outlier_indices)\r\n multiple_outliers = list( k for k, v in outlier_indices.items() if v > 3 )\r\n ### now drop these rows altogether ####\r\n df.drop([idcol,'anomaly1'], axis=1, inplace=True)\r\n if drop:\r\n print('Shape of dataframe before outliers being dropped: %s' %(df.shape,))\r\n number_of_rows = df.shape[0]\r\n df.drop(multiple_outliers, axis=0, inplace=True)\r\n print('Shape of dataframe after outliers being dropped: %s' %(df.shape,))\r\n print('\\nNumber_of_rows with multiple outliers in more than 3 columns which were dropped = %d' %(number_of_rows-df.shape[0]))\r\n return df", "def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped", "def filter_outliers(self, df, outlier):\n return df[~outlier].reset_index(drop=True)", "def outlier_detection(df, method, wt, features, output):\n\n print('\\n Detecting outliers...')\n start_time = time.time()\n\n # Model wt morphology\n wt_data = df[df['Strain ID'].isin(wt)][features].values\n\n # Create a subset with only WT cells and fit the model\n if method == 'GMM':\n gmm = mixture.GaussianMixture(n_components=3, covariance_type='full')\n gmm.fit(wt_data)\n df.insert(loc=len(df.columns), column='Score', value=-gmm.score_samples(df[features].values).ravel())\n else:\n ocsvm = svm.OneClassSVM(kernel='rbf')\n ocsvm.fit(wt_data)\n df.insert(loc=len(df.columns), column='Score', value=-ocsvm.decision_function(df[features].values).ravel())\n\n # Print OD walltime\n text = 'Outlier detection walltime: %.2f minutes\\n' % ((time.time() - start_time) / 60.0)\n text += 'Total number of cells: %d\\n' % df['Score'].shape[0]\n text += 'Number of WT cells: %d\\n' % wt_data.shape[0]\n log_write(output['log'], text)\n\n return df", "def identify_outliers(x):\n outliers = np.array([])\n\n IQR = iqr(x)\n low_cut = np.percentile(x,25) - 1.5*IQR\n high_cut = np.percentile(x,75) + 1.5*IQR\n\n for sub in x.index:\n if x.loc[sub] < low_cut or x.loc[sub] > high_cut:\n # outliers = np.append(outliers,np.asarray(x == i).nonzero()[0])\n outliers = np.append(outliers,sub)\n\n return outliers", "def is_outlier(points, threshold=3.5):\n # transform into vector\n if len(points.shape) == 1:\n points = points[:,None]\n\n # compute median value \n median = np.median(points, axis=0)\n \n # compute diff sums along the axis\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n # compute MAD\n med_abs_deviation = np.median(diff)\n \n # compute modified Z-score\n # http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.htm#Iglewicz\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n # return a mask for each outlier\n return modified_z_score > threshold", "def detect_outlier(column, max_dev=2):\n column_mean = np.mean(column)\n column_std = np.std(column)\n dist_from_mean = abs(column - column_mean)\n outlier_filter = dist_from_mean > max_dev * column_std\n ids = np.arange(len(column))\n return ids[outlier_filter]", "def remove_outliers(df, var):\n import numpy as np\n \n df = df.copy()\n \n # remove outliers\n Q1 = np.nanquantile(df[var] ,0.25)\n Q3 = np.nanquantile(df[var], 0.75)\n IQR = Q3 - Q1\n \n lower_end = Q1 - 1.5 * IQR \n high_end = Q3 + 1.5 * IQR \n \n df_filtered = df.drop(df[(df[var] < lower_end) | (df[var] > high_end)].index)\n \n return df_filtered", "def outlier_hunt(df):\n outlier_indices = []\n\n # iterate over features(columns)\n for col in df.columns.tolist():\n # 1st quartile (25%)\n Q1 = np.percentile(df[col], 1)\n\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col], 99)\n\n # Interquartile rrange (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices.extend(outlier_list_col)\n\n # select observations containing more than 2 outliers\n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(k for k, v in outlier_indices.items() if v >= 2)\n\n return multiple_outliers", "def replace_with_thresholds(dataframe, col_name, low_threshold, up_threshold):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name,\n low_threshold, up_threshold)\n if low_limit > 0:\n dataframe.loc[(dataframe[col_name] < low_limit), col_name] = low_limit\n dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit\n else:\n dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit", "def detect_outliers(df, n, features):\n outlier_indices = [] \n for col in features: \n Q1 = np.percentile(df[col], 25)\n Q3 = np.percentile(df[col], 75)\n IQR = Q3 - Q1\n outlier_step = 1.5 * IQR \n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n outlier_indices.extend(outlier_list_col) \n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(key for key, value in outlier_indices.items() if value > n) \n return multiple_outliers", "def remove_outliers(df, column_pup, maxdev = 2.5, allowp=0.1, \r\n column_x = None, column_y = None, left = None, right = None, top = None, bottom = None):\r\n \r\n # off-screen samples\r\n ## check if proper argumnets are passed\r\n if None in [column_x, column_y, left, right, top, bottom]:\r\n warnings.warn(\"Screen information not properly specified. Out-of-screen samples will not be removed.\")\r\n df[column_pup+'_rm'] = df[column_pup]\r\n ## remove out-of-screen samples\r\n else:\r\n conditions = ((df[column_x] < left) | (df[column_x] > right) | (df[column_y] < top) | (df[column_y] > bottom))\r\n df[column_pup+'_rm'] = np.where(conditions, np.nan, df[column_pup])\r\n \r\n # samples with a large SD\r\n mean = df[column_pup+'_rm'].mean(skipna=True)\r\n std = df[column_pup+'_rm'].std(skipna=True)\r\n \r\n # if std is reasonably small then no outlier will be declared\r\n if std >= allowp*mean:\r\n lower = mean - maxdev*std\r\n upper = mean + maxdev*std\r\n conditions2 = ((df[column_pup+'_rm']<lower) | (df[column_pup+'_rm']>upper))\r\n df[column_pup+'_rm'] = np.where(conditions2, np.nan, df[column_pup+'_rm'])\r\n \r\n return df", "def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]", "def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df", "def detect_outliers(data, tolerance=2):\n medians = data.rolling(5, center=True).median()\n lowerq = data.rolling(5, center=True).quantile(.75)\n upperq = data.rolling(5, center=True).quantile(.25)\n iqrs = np.abs(upperq - lowerq)\n diffs = np.abs(data - medians)\n outliers = pd.Series(diffs > (tolerance * iqrs))\n return outliers, sum(outliers)", "def compute_lof_score(stats, columns, col_name):\n\n if stats[col_name]['data_type'] != DATA_TYPES.NUMERIC:\n return {}\n\n np_col_data = np.array(columns[col_name]).reshape(-1, 1)\n lof = LocalOutlierFactor(contamination='auto')\n outlier_scores = lof.fit_predict(np_col_data)\n outlier_indexes = [i for i in range(len(columns[col_name])) if outlier_scores[i] < -0.8]\n\n return {\n 'lof_outliers': outlier_indexes\n ,'lof_based_outlier_score': round(10 * (1 - len(outlier_indexes)/len(columns[col_name])))\n ,'percentage_of_log_based_outliers': (len(outlier_indexes)/len(columns[col_name])) * 100\n ,'lof_based_outlier_score_description':\"\"\"\n The higher this score, the more outliers your dataset has. This is based on distance from the center of 20 clusters as constructed via KNN.\n \"\"\"\n }", "def replace_outliers_by_threshold(tX, threshold, outlier_value):\n\n new_tX = tX\n for j in range(new_tX.shape[1]):\n col = new_tX[:, j]\n values, indices = np.unique(col, return_index=True)\n data = zip(values, indices)\n values_mean = np.mean(values)\n values_std = np.std(values)\n cut_off = threshold * values_std\n lower, upper = values_mean - cut_off, values_mean + cut_off\n outliers = []\n other_values = []\n for (x, y) in data:\n if x < lower or x > upper:\n outliers.append((x, y))\n else:\n other_values.append((x, y))\n lower_mean = np.mean(np.asarray(other_values)[other_values <= values_mean])\n upper_mean = np.mean(np.asarray(other_values)[other_values >= values_mean])\n for v, index in outliers:\n if outlier_value == 'clip':\n if v < values_mean:\n new_tX[index, j] = lower\n else:\n new_tX[index, j] = upper\n elif outlier_value == 'mean':\n new_tX[index, j] = values_mean\n elif outlier_value == 'upper_lower_mean':\n if v < values_mean:\n new_tX[index, j] = lower_mean\n else:\n new_tX[index, j] = upper_mean\n return new_tX" ]
[ "0.7784306", "0.7516061", "0.7242404", "0.7233822", "0.7036477", "0.694232", "0.68730634", "0.679349", "0.678132", "0.6776806", "0.6770923", "0.6696206", "0.6659336", "0.6577895", "0.6551978", "0.6533329", "0.64901596", "0.64123476", "0.6403893", "0.6348688", "0.63019955", "0.6290253", "0.62723523", "0.6269384", "0.6244885", "0.6238147", "0.62215126", "0.621938", "0.6121758", "0.6121235" ]
0.85974765
0
This function stands for replacing given columns of dataframe according to given lower threshold and upper threshold values via using outlier_threshold function.
def replace_with_thresholds(dataframe, col_name, low_threshold, up_threshold): low_limit, up_limit = outlier_thresholds(dataframe, col_name, low_threshold, up_threshold) if low_limit > 0: dataframe.loc[(dataframe[col_name] < low_limit), col_name] = low_limit dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit else: dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_outliers_by_threshold(tX, threshold, outlier_value):\n\n new_tX = tX\n for j in range(new_tX.shape[1]):\n col = new_tX[:, j]\n values, indices = np.unique(col, return_index=True)\n data = zip(values, indices)\n values_mean = np.mean(values)\n values_std = np.std(values)\n cut_off = threshold * values_std\n lower, upper = values_mean - cut_off, values_mean + cut_off\n outliers = []\n other_values = []\n for (x, y) in data:\n if x < lower or x > upper:\n outliers.append((x, y))\n else:\n other_values.append((x, y))\n lower_mean = np.mean(np.asarray(other_values)[other_values <= values_mean])\n upper_mean = np.mean(np.asarray(other_values)[other_values >= values_mean])\n for v, index in outliers:\n if outlier_value == 'clip':\n if v < values_mean:\n new_tX[index, j] = lower\n else:\n new_tX[index, j] = upper\n elif outlier_value == 'mean':\n new_tX[index, j] = values_mean\n elif outlier_value == 'upper_lower_mean':\n if v < values_mean:\n new_tX[index, j] = lower_mean\n else:\n new_tX[index, j] = upper_mean\n return new_tX", "def outlier_thresholds(dataframe, col_name, low_quantile, up_quantile):\n quartile1 = dataframe[col_name].quantile(low_quantile)\n quartile3 = dataframe[col_name].quantile(up_quantile)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit", "def replace_numeric_values(*, df, colnames=\"all\", lower_limit=\"none\", upper_limit=\"none\", equal=False, replace_with=np.nan, verbose=True): \n\n \n cols_names = colnames\n \n # .. check provided col_names,\n if cols_names==\"all\": \n cols = list(df.columns)\n else: \n cols = cols_names \n\n # .. info, header, \n if verbose==True:\n print(f\"\"\"\\n{\"\".join([\"-\"]*80)} \\n Replacing Numerical Values in {len(cols)} columns\"\"\") \n print(f\" lower filter={lower_limit}, upper filter ={upper_limit}\")\n if equal==True:\n print(f\" Caution, equal=True, ie. values >= and <= then requested limits will be replaced\")\n print(f'{\"\".join([\"-\"]*80)}\\n') \n \n if verbose==False:\n pass\n \n \n # .. intelligent info,\n total_count=[]\n\n # .. count, to limit the number of displayed messages,\n count = 0\n\n # .. replace values and collect examples, \n for i, j in enumerate(cols):\n\n # ..... assume no values were replaced, so the messages work later, \n info_lower_filter = 0\n info_upper_filter = 0 \n \n # ..... test if the column is of the numeric type:\n # from pandas.api.types import is_numeric_dtype\n if is_numeric_dtype(df[j]):\n \n \n # * replace values < or <= lower limit,\n # - ----------------------------------\n if lower_limit!=\"none\": \n if equal == True:\n lower_filter = df.loc[:,j]<=lower_limit\n if equal == False:\n lower_filter = df.loc[:,j]<lower_limit\n \n # info,\n info_lower_filter=lower_filter.sum()\n df.loc[list(lower_filter),j]=replace_with\n \n \n # * replace values > or >= upper limit,\n # - ----------------------------------\n if upper_limit!=\"none\": \n if equal == True:\n upper_filter = df.loc[:,j]>=upper_limit\n if equal == False:\n upper_filter = df.loc[:,j]>upper_limit\n \n # info,\n info_upper_filter=upper_filter.sum()\n df.loc[list(upper_filter),j]=replace_with \n \n # * find how many values were replaced, and add that to the total_count list \n total_count.append(info_upper_filter+info_lower_filter)\n \n # * display examples for 3 first columns with replaced values,\n if verbose==True:\n if info_upper_filter+info_lower_filter>0 and count <4:\n print(f\"eg: {i}, {j} : {info_lower_filter} values <{lower_limit}, ...{info_upper_filter} values <{upper_limit}\")\n else:\n pass\n\n # * add 1 to count, to limit the number of displayed examples,\n count += 1 \n \n else:\n if verbose==True:\n print(f\"{i, j} is not of numeric type, values were not replaced !\")\n else:\n pass\n \n # .. additional message, if more then 2 columns had replaced values, \n if verbose==True:\n if len(total_count)>3 and pd.Series(total_count).sum()>0:\n print(f\". and {len(total_count)-3} other columns had in total {pd.Series(total_count).sum()} replaced values \\n\")\n\n # .. message in case no values vere replaced at all, \n if pd.Series(total_count).sum()==0:\n print(\"No values were replaced in requested columns....\")\n \n else:\n pass\n \n # .. return, \n return df.copy()", "def FE_find_and_cap_outliers(df, features, drop=False, verbose=False):\r\n df = df.copy(deep=True)\r\n outlier_indices = []\r\n idcol = 'idcol'\r\n df[idcol] = range(len(df))\r\n if isinstance(features, str):\r\n features = [features]\r\n # iterate over features(columns)\r\n for col in features:\r\n # Determine a list of indices of outliers for feature col\r\n thresh = outlier_determine_threshold(df, col)\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n\r\n df['anomaly1'] = 0\r\n df.loc[dfout_index ,'anomaly1'] = 1\r\n\r\n ### this is how the column looks now before capping outliers\r\n if verbose:\r\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,5))\r\n colors = {0:'blue', 1:'red'}\r\n ax1.scatter(df[idcol], df[col], c=df[\"anomaly1\"].apply(lambda x: colors[x]))\r\n ax1.set_xlabel('Row ID')\r\n ax1.set_ylabel('Target values')\r\n ax1.set_title('%s before capping outliers' %col)\r\n\r\n capped_value = df.loc[dfout_index, col].min() ## this is the value we cap it against\r\n df.loc[dfout_index, col] = capped_value ## maximum values are now capped\r\n ### you are now good to go - you can show how they are capped using before and after pics\r\n if verbose:\r\n colors = {0:'blue', 1:'red'}\r\n ax2.scatter(df[idcol], df[col], c=df[\"anomaly1\"].apply(lambda x: colors[x]))\r\n ax2.set_xlabel('Row ID')\r\n ax2.set_ylabel('Target values')\r\n ax2.set_title('%s after capping outliers' %col)\r\n\r\n # Let's save the list of outliers and see if there are some with outliers in multiple columns\r\n outlier_indices.extend(dfout_index)\r\n\r\n # select certain observations containing more than one outlier in 2 columns or more. We can drop them!\r\n outlier_indices = Counter(outlier_indices)\r\n multiple_outliers = list( k for k, v in outlier_indices.items() if v > 3 )\r\n ### now drop these rows altogether ####\r\n df.drop([idcol,'anomaly1'], axis=1, inplace=True)\r\n if drop:\r\n print('Shape of dataframe before outliers being dropped: %s' %(df.shape,))\r\n number_of_rows = df.shape[0]\r\n df.drop(multiple_outliers, axis=0, inplace=True)\r\n print('Shape of dataframe after outliers being dropped: %s' %(df.shape,))\r\n print('\\nNumber_of_rows with multiple outliers in more than 3 columns which were dropped = %d' %(number_of_rows-df.shape[0]))\r\n return df", "def winsorize_columns(dataframe, columns, winzerize_type='percentile',limits =.01, standard_deviation_limit=3,frame_type='spark'):\n\n if frame_type == 'spark':\n import numpy as np\n df = dataframe\n\n if winzerize_type == 'percentile':\n def percentile_threshold(ardd, percentile):\n assert percentile > 0 and percentile <= 100, \"percentile should be larger then 0 and smaller or equal to 100\"\n\n return ardd.sortBy(lambda x: x).zipWithIndex().map(lambda x: (x[1], x[0])) \\\n .lookup(np.ceil(ardd.count() / 100 * percentile - 1))[0]\n\n for column in columns:\n def flatten_column(row):\n return tuple(float(x) for x in row)\n #Compute the percentiles\n lower = percentile_threshold(df.select(column).rdd.flatMap(flatten_column),limits)\n upper = percentile_threshold(df.select(column).rdd.flatMap(flatten_column), 100 - limits)\n\n print('For {column} the lower limit is {lower}'.format(column=column,lower=str(lower)))\n print('For {column} the upper limit is {upper}'.format(column=column,upper=str(upper)))\n\n from pyspark.sql.functions import when\n #Make columns greater then upper bound == to upper bound\n df = df.withColumn(column,\n when(df[column] > upper, upper)\n .otherwise(df[column]))\n #Make columns less then lower bound == to lower bound\n df = df.withColumn(column,\n when(df[column] < lower, lower)\n .otherwise(df[column]))\n return df\n elif winzerize_type == 'stddev':\n def replace(df,column_to_filter,standard_deviations=3):\n \"\"\"\n Will remove the outliers that have a stddev higher then x(param standard_deviations).\n\n \"\"\"\n import math\n #This function will flatten the row of the dataframe\n def flatten_column(row):\n return tuple(float(x) for x in row)\n stats = df.select(column_to_filter).rdd.flatMap(flatten_column).stats()\n mean = stats.mean()\n variance = stats.variance()\n stddev = math.sqrt(variance)\n stddev_threshhold = stddev*standard_deviations\n # print(stddev_threshhold)\n from pyspark.sql.functions import lit,abs\n from pyspark.sql.functions import when\n\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) > 0), (mean+stddev_threshhold))\n .otherwise(df[column_to_filter]))\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) < 0), (mean-stddev_threshhold))\n .otherwise(df[column_to_filter]))\n\n return df\n for column in columns:\n df = replace(df,column,standard_deviation_limit)\n return df\n else:\n from scipy.stats.mstats import winsorize\n\n df = None\n if frame_type == 'h2o':\n # convert to pandas\n df = dataframe.as_data_frame()\n elif frame_type == 'pandas':\n df = dataframe\n\n for column in columns:\n df[column] = winsorize(df[column], limits = limits)\n\n if frame_type == 'h2o':\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n df = h2o.H2OFrame(df)\n print('Done.')\n return df\n else:\n return df", "def remove_outliers_by_percentile(dataframe, columns, limits =.01, frame_type='spark'):\n\n if frame_type == 'spark':\n import numpy as np\n df = dataframe\n\n def percentile_threshold(ardd, percentile):\n assert percentile > 0 and percentile <= 100, \"percentile should be larger then 0 and smaller or equal to 100\"\n # df.approxQuantile(\"x\", [0.5], 0.25)\n return ardd.sortBy(lambda x: x).zipWithIndex().map(lambda x: (x[1], x[0])) \\\n .lookup(np.ceil(ardd.count() / 100 * percentile - 1))[0]\n\n for column in columns:\n def flatten_column(row):\n return tuple(float(x) for x in row)\n #Compute the percentiles\n lower = percentile_threshold(df.select(column).rdd.flatMap(flatten_column),limits)\n upper = percentile_threshold(df.select(column).rdd.flatMap(flatten_column), 100 - limits)\n\n print('For {column} the lower limit is {lower}'.format(column=column,lower=str(lower)))\n print('For {column} the upper limit is {upper}'.format(column=column,upper=str(upper)))\n\n from pyspark.sql.functions import lit\n #Filter out outliers\n df = df.where(\"{column} < {upper} AND {column} > {lower} \"\\\n .format(column=column,upper=upper,lower=lower))\n return df\n\n\n else:\n import numpy as np\n\n df = None\n if frame_type == 'h2o':\n # convert to pandas\n df = dataframe.as_data_frame()\n elif frame_type == 'pandas':\n df = dataframe\n\n for column in columns:\n ulimit = np.percentile(train_df[column].values, 100 - limits)\n llimit = np.percentile(train_df[column].values, limits)\n df[column] = df[df[column] < ulimit]\n df[column] = df[df[column] > llimit]\n\n if frame_type == 'h2o':\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n df = h2o.H2OFrame(df)\n print('Done.')\n return df\n else:\n return df", "def replace_outliers(data, threshold=4):\n zscores = stats.zscore(data)\n mean, std = data.mean(), data.std()\n data.loc[zscores >= threshold] = mean + std * threshold\n data.loc[zscores <= -threshold] = mean - std * threshold\n\n return data", "def detect_outlier(df,method='iqr',val=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n if method=='z_score':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']\n df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))\n elif method=='iqr':\n for i in c_name:\n q1,q3 = df.approxQuantile(i,[0.25,0.75],0)\n IQR = q3-q1\n lo = q1-(1.5*IQR)\n up = q3+(1.5*IQR)\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n elif method=='std':\n for i in c_name:\n stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()\n m = stat[0]['mean']\n s = stat[0]['std']*thresh\n lo = m - s\n up = m + s\n df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))\n return df", "def replace(df,column_to_filter,standard_deviations=3):\n import math\n #This function will flatten the row of the dataframe\n def flatten_column(row):\n return tuple(float(x) for x in row)\n stats = df.select(column_to_filter).rdd.flatMap(flatten_column).stats()\n mean = stats.mean()\n variance = stats.variance()\n stddev = math.sqrt(variance)\n stddev_threshhold = stddev*standard_deviations\n # print(stddev_threshhold)\n from pyspark.sql.functions import lit,abs\n from pyspark.sql.functions import when\n\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) > 0), (mean+stddev_threshhold))\n .otherwise(df[column_to_filter]))\n df = df.withColumn(column_to_filter,\n when((abs(df[column_to_filter] - mean) > stddev_threshhold) & ((df[column_to_filter] - mean) < 0), (mean-stddev_threshhold))\n .otherwise(df[column_to_filter]))\n\n return df", "def check_outlier(dataframe, col_name):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name, 0.05, 0.95)\n if dataframe[(dataframe[col_name] > up_limit) |\n (dataframe[col_name] < low_limit)].any(axis=None):\n return True\n else:\n return False", "def _standard_process(col, upper, replace_target=''):\n if col.dtype == 'object' and replace_target != '':\n col = replace_blank(col.copy(), replace_target).astype(np.int)\n mask = (col < upper)\n return col, mask", "def FE_capping_outliers_beyond_IQR_Range(df, features, cap_at_nth_largest=5, IQR_multiplier=1.5,\r\n drop=False, verbose=False):\r\n outlier_indices = []\r\n df = df.copy(deep=True)\r\n if isinstance(features, str):\r\n features = [features]\r\n # iterate over features(columns)\r\n for col in features:\r\n ### this is how the column looks now before capping outliers\r\n if verbose:\r\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,5))\r\n df[col].plot(kind='box', title = '%s before capping outliers' %col, ax=ax1)\r\n # 1st quartile (25%)\r\n Q1 = np.percentile(df[col], 25)\r\n # 3rd quartile (75%)\r\n Q3 = np.percentile(df[col],75)\r\n # Interquartile range (IQR)\r\n IQR = Q3 - Q1\r\n\r\n # outlier step using multiplier\r\n outlier_step = IQR_multiplier * IQR\r\n\r\n lower_limit = Q1 - outlier_step\r\n upper_limit = Q3 + outlier_step\r\n\r\n # Determine a list of indices of outliers for feature col\r\n outlier_list_col = df[(df[col] < lower_limit) | (df[col] > upper_limit )].index\r\n\r\n ### Capping using the n largest value based on n given in input.\r\n maxval = df[col].max() ## what is the maximum value in this column?\r\n num_maxs = df[df[col]==maxval].shape[0] ## number of rows that have max value\r\n ### find the n_largest values after the maximum value based on given input n\r\n num_largest_after_max = num_maxs + cap_at_nth_largest\r\n capped_value = df[col].nlargest(num_largest_after_max).iloc[-1] ## this is the value we cap it against\r\n df.loc[df[col]==maxval, col] = capped_value ## maximum values are now capped\r\n ### you are now good to go - you can show how they are capped using before and after pics\r\n if verbose:\r\n df[col].plot(kind='box', title = '%s after capping outliers' %col, ax=ax2)\r\n plt.show()\r\n\r\n # Let's save the list of outliers and see if there are some with outliers in multiple columns\r\n outlier_indices.extend(outlier_list_col)\r\n\r\n # select certain observations containing more than one outlier in 2 columns or more. We can drop them!\r\n outlier_indices = Counter(outlier_indices)\r\n multiple_outliers = list( k for k, v in outlier_indices.items() if v > 3 )\r\n ### now drop these rows altogether ####\r\n if drop:\r\n print('Shape of dataframe before outliers being dropped: %s' %(df.shape,))\r\n number_of_rows = df.shape[0]\r\n df.drop(multiple_outliers, axis=0, inplace=True)\r\n print('Shape of dataframe after outliers being dropped: %s' %(df.shape,))\r\n print('\\nNumber_of_rows with multiple outliers in more than 3 columns which were dropped = %d' %(number_of_rows-df.shape[0]))\r\n return df", "def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped", "def clip_outliers(df, std_threshold: float = 3):\n df_std = df.std(axis=0, skipna=True)\n df_mean = df.mean(axis=0, skipna=True)\n\n lower = df_mean - (df_std * std_threshold)\n upper = df_mean + (df_std * std_threshold)\n df2 = df.clip(lower=lower, upper=upper, axis=1)\n\n return df2", "def outlier_determine_threshold(df, col):\r\n df = df.copy(deep=True)\r\n keep_looping = True\r\n number_of_loops = 1\r\n thresh = 5\r\n while keep_looping:\r\n if number_of_loops >= 10:\r\n break\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n pct_outliers = len(dfout_index)/len(df)\r\n if pct_outliers == 0:\r\n if thresh > 5:\r\n thresh = thresh - 5\r\n elif thresh == 5:\r\n return thresh\r\n else:\r\n thresh = thresh - 1\r\n elif pct_outliers <= 0.01:\r\n keep_looping = False\r\n else:\r\n thresh_multiplier = int((pct_outliers/0.01)*0.5)\r\n thresh = thresh*thresh_multiplier\r\n number_of_loops += 1\r\n print(' %s Outlier threshold = %d' %(col, thresh))\r\n return thresh", "def flag_outliers_in_col(self, df, col='paciente_idade', threshold=2):\n data = df[col]\n mean = np.mean(data)\n std = np.std(data)\n outlier = []\n for i in data:\n z = (i-mean)/std\n outlier.append(z > threshold)\n outlier = pd.Series(outlier)\n print(f\"Number of outliers: {outlier.sum()}\")\n return outlier", "def fix_values(df, columns):\n df[df.loc[:, columns] > 90] -= 180\n df[df.loc[:, columns] < -90] += 180\n arg_chi5s = [col for col in df.columns.values if \"ARG\" in col and \"chi5\" in col]\n return df.drop(arg_chi5s, axis=1)", "def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap", "def fast_outlier_id(data, cols=\"All\", method=\"z-score\",\n threshold_low_freq=0.05):\n\n # ASSERT TESTS\n assert isinstance(data, pd.DataFrame), \"Data must be in pandas Data Frame!\"\n\n if type(cols) == str:\n if cols.lower() == \"all\":\n cols = list(data.columns)\n\n if type(cols) != str:\n assert isinstance(cols, list), \"Columns must be inputted in a list\"\n for i in cols:\n assert i in list(\n data.columns), \"Columns must exist in the inputted data \" \\\n \"dataframe\"\n\n assert method.lower() in [\"z-score\",\n \"interquartile\"], \\\n \"The only permitted values are z-score or interquantile,thank you\"\n\n # Initialize lists containing summary values\n no_nans_list = list()\n col_type_list = list()\n perc_nans_list = list()\n outlier_values_list = list()\n outlier_count_list = list()\n outlier_perc_list = list()\n method_list = list()\n\n # Subsetting the data by the columns selected by the user\n subset = data[cols]\n for i in cols:\n # More lists containing summary values\n no_nans = subset[i].isna().sum()\n no_nans_list.append(no_nans)\n col_type_list.append(subset[i].dtype)\n perc_nans_list.append(round(no_nans / len(subset[i]), 2))\n data_no_nans = subset[i][~pd.isna(subset[i])]\n if data_no_nans.dtypes in ['float64', 'int64']:\n if method.lower() == \"z-score\":\n score = np.abs(stats.zscore(data_no_nans))\n data_no_nans = data_no_nans.to_numpy()\n outlier_values = data_no_nans[np.where(score > 2)]\n outlier_count_list.append(len(outlier_values))\n outlier_perc_list.append(\n round(len(outlier_values) / len(data_no_nans), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"Z-Score\")\n elif method.lower() == \"interquartile\":\n Q1 = np.quantile(data_no_nans, 0.25)\n Q3 = np.quantile(data_no_nans, 0.75)\n IQR = Q3 - Q1\n score = (data_no_nans < (Q1 - 1.5 * IQR)) | (\n data_no_nans > (Q3 + 1.5 * IQR))\n data_no_nans = data_no_nans.to_numpy()\n outlier_values = data_no_nans[np.where(score > 0)]\n outlier_count_list.append(len(outlier_values))\n outlier_perc_list.append(\n round(len(outlier_values) / len(data_no_nans), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"Interquartile\")\n elif data_no_nans.dtype in ['object']:\n score = data_no_nans.value_counts() / len(data_no_nans)\n outlier_values = score[score < threshold_low_freq].index.tolist()\n outlier_count_list.append(\n data_no_nans.value_counts()[score < threshold_low_freq].sum())\n outlier_perc_list.append(\n round(sum(score[score < threshold_low_freq]), 2))\n outlier_values_list.append(outlier_values)\n method_list.append(\"low-freq\")\n summary_dict = {'column_name': cols, 'type': col_type_list,\n 'no_nans': no_nans_list, 'perc_nans': perc_nans_list,\n 'outlier_method': method_list,\n \"no_outliers\": outlier_count_list,\n \"perc_outliers\": outlier_perc_list,\n \"outlier_values\": outlier_values_list}\n summary = pd.DataFrame(summary_dict)\n return (summary)", "def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices", "def calculate_data_will_be_affected(dataframe, column,\n low_quantile=0.0, up_quantile=1.0):\n ratio_for_up_limit = (len(dataframe[dataframe[column] > dataframe[column].\n quantile(up_quantile)]) /\n dataframe[column].shape[0]) * 100\n ratio_for_low_limit = (len(dataframe[dataframe[column] < dataframe[column].\n quantile(low_quantile)]) /\n dataframe[column].shape[0]) * 100\n ratio_of_affected_data = round(ratio_for_up_limit + ratio_for_low_limit, 2)\n print(\"When we do reassign operation in the ({} - {}) range, %{} of data \"\n \"will be affected in this operation.\".format(low_quantile,\n up_quantile,\n ratio_of_affected_data))", "def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]", "def apply_bounds(self, column_name, lower_bound=-np.inf,\n upper_bound=np.inf):\n self.check_for_column(column_name)\n\n if lower_bound is None:\n lower_bound = -np.inf\n if upper_bound is None:\n upper_bound = np.inf\n column = self.data[column_name]\n self.data[column_name] = column.clip(lower_bound, upper_bound)", "def define_threshold(self, threshold):\n def func(x, threshold):\n if x > threshold:\n return 'up'\n elif x < -threshold:\n return 'down'\n else:\n return 'stable'\n try:\n self.df['Direction'] = self.df.apply(lambda x: func(x['Return'], threshold), axis=1)\n except:\n print(\"issue\")\n return", "def scale(df, lower=-1, upper=1):\n\n scaler = MinMaxScaler(feature_range=(lower, upper))\n scaler = scaler.fit(df)\n\n # Replace values with the scaled dataframe\n df[['Input', 'Output']] = scaler.transform(df)\n\n return df", "def replace_invalids(data, threshold = 50, print_vals = False, med=True):\n \n # prints a description of the features if print_vals True\n means, stds, medians, mins, maxs = summarize_features(data, print_vals=print_vals)\n \n if(print_vals):\n print(\"Number of invalid values: \" + str(len(data[data == -999.0])))\n print(\"Number of Nan values: \" + str(np.count_nonzero(np.isnan(data))))\n print(\"Shape: \" + str(data.shape))\n print()\n \n # gets the number of invalid values for each feature\n percent_invalid = find_percentage_of_invalid(data, mins, print_vals=print_vals)\n \n # stores the indices of the features to delete because the number of invalid values is above the threshold\n to_delete = [k for k,v in percent_invalid.items() if v > threshold]\n \n # stores the indices of the features where the invalid values need to be replaces\n change_to_mean = [k for k in percent_invalid.keys() if k not in to_delete]\n \n for idx in change_to_mean:\n # puts the value np.NaN in place of the invalid values in the features to modify to make the measurements on the features\n # without the invalid values\n data[:,idx] = np.where(data[:,idx]==-999, np.NaN, data[:,idx])\n \n \n # calculates the means, medians and means without the invalid values and NaNs\n n_means, _, n_medians, n_mins, _ = summarize_features(data, include_nan=False, print_vals=print_vals)\n \n \n if med:\n for idx in change_to_mean:\n # replacing the invalid values by the median if med is True\n data[:,idx] = np.where(np.isnan(data[:,idx]), n_medians[idx], data[:,idx])\n else :\n for idx in change_to_mean:\n # replacing the invalid values by the mean if med is False\n data[:,idx] = np.where(np.isnan(data[:,idx]), n_means[idx], data[:,idx])\n\n \n n_percent_invalid = find_percentage_of_invalid(data, n_mins, print_vals=print_vals)\n \n # deletes the features to discard\n data = np.delete(data,to_delete,axis=1)\n \n if(print_vals):\n print()\n print(\"Number of invalid values: \" + str(len(data[data == -999.0])))\n print(\"Number of Nan values: \" + str(np.count_nonzero(np.isnan(data))))\n print(\"New shape: \" + str(data.shape))\n \n return data", "def handle_invalid(x, column_names=None):\n\n invalid_value = -999.0\n invalid_threshold = 0.7\n\n # Remove columns with a pct of invalid values above 70%\n pct_undef = (x <= invalid_value).mean(axis=0)\n below_thresh = pct_undef < invalid_threshold\n\n print(f\"{(~below_thresh).sum()} columns are above the invalid threshold. Removing\", end=\"\\n\\t\")\n if column_names is not None:\n print(*column_names[~below_thresh], sep=\"\\n\\t\")\n column_names = column_names[below_thresh]\n\n x = x[:, below_thresh]\n\n # Replace -999 with mean value of remaining values for each column still in dataset\n for i in range(x.shape[1]):\n col = x[:, i]\n mean = col[col > invalid_value].mean()\n col[col <= invalid_value] = mean\n\n return x, column_names", "def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap" ]
[ "0.7123759", "0.6695039", "0.66256833", "0.65285146", "0.63990915", "0.6376331", "0.6319013", "0.6014559", "0.60104", "0.59987855", "0.5969919", "0.5924092", "0.59177697", "0.58993465", "0.58477944", "0.5838635", "0.5733036", "0.5685656", "0.5676063", "0.5664337", "0.56605303", "0.5643804", "0.5597103", "0.55750096", "0.55446434", "0.5531845", "0.55283844", "0.5508987", "0.5504877", "0.5504877" ]
0.8196213
0
This function stands for plotting graphs of 'category_id' and 'price' columns for visual examination.
def plot_columns(dataframe, title): sns.boxplot(x=dataframe['category_id'], y=dataframe['price']) plt.title(title) plt.xlabel('Category ID') plt.ylabel('Price') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_cat(df, cat_columns, hue = \"default_payment_next_month\"):\n fig = plt.figure(figsize = (20,(len(cat_columns)/2+1)*8))\n loc = 1\n for col in cat_columns:\n ax = fig.add_subplot(len(cat_columns)/2+1, 2, loc)\n df_plot = df[[col, hue, \"id\"]].groupby([col, hue]).count()\n df_plot.reset_index(inplace = True)\n sns.barplot(x=col, y= \"id\", hue = hue, data=df_plot, palette = \"GnBu_d\", ax = ax);\n plt.legend(title = \"default payment (1=yes, 0=no)\")\n plt.ylim([0.0001,15000])\n plt.ylabel(\"clients\");\n loc += 1", "def graph(df):\n df.plot()\n plt.show()", "def _category_plot(self, element, x, y, data):\n labelled = ['y' if self.invert else 'x'] if x != 'index' else []\n if self.value_label != 'value':\n labelled.append('x' if self.invert else 'y')\n\n if 'xlabel' in self._plot_opts and 'x' not in labelled:\n labelled.append('x')\n if 'ylabel' in self._plot_opts and 'y' not in labelled:\n labelled.append('y')\n\n opts = {'plot': dict(self._plot_opts, labelled=labelled),\n 'style': dict(self._style_opts),\n 'norm': self._norm_opts}\n\n id_vars = [x]\n if any(v in self.indexes for v in id_vars):\n data = data.reset_index()\n data = data[y+[x]]\n\n if check_library(data, 'dask'):\n from dask.dataframe import melt\n else:\n melt = pd.melt\n\n df = melt(data, id_vars=[x], var_name=self.group_label, value_name=self.value_label)\n kdims = [x, self.group_label]\n vdims = [self.value_label]+self.hover_cols\n if self.subplots:\n obj = Dataset(df, kdims, vdims).to(element, x).layout()\n else:\n obj = element(df, kdims, vdims)\n return obj.redim(**self._redim).relabel(**self._relabel).opts(**opts)", "def drawCatplot(df, xColumn):\n plt.style.use('default')\n plt.style.use('dark_background')\n types = getSpectralTypes()\n colors = getColors()\n sns.set_palette(sns.color_palette(colors))\n \n sns.catplot(x=xColumn, y=\"spectral_type\", data=df, order=types, height=3, \n aspect=4);\n plt.show()", "def plot_catplot(\n df: pd.DataFrame,\n cat_col: str,\n quanti_cols_list: list,\n order: list = None,\n h: int = 5,\n w: int = 10,\n) -> sns.catplot:\n\n # iterate over quantiative variables\n # and plot catplot against the category variable\n for col in quanti_cols_list:\n sns.catplot(\n data=df,\n kind=\"bar\",\n x=cat_col,\n y=col,\n order=order,\n ci=None,\n height=h,\n aspect=w / h,\n )\n plt.show()\n return", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def plotting_helper_method(x_axis, y_axis, df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for color, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n plt.scatter(filtered_df[x_axis], filtered_df[y_axis], c=color, label=genre)", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def data_visualization(df):\r\n\r\n # Visualizing the target variable\r\n plt.figure(figsize=(14, 10))\r\n plt.title(\"Count of bike sharing according to dates\")\r\n plt.plot(df['dteday'], df['cnt'])\r\n #plt.show()\r\n plt.savefig(\"Raw data visualization.png\")\r\n\r\n # box plot for visualizing outliers\r\n fig=px.box(df, y=\"cnt\", notched=True,title='Box plot of the count variable')\r\n #fig.show()\r\n plt.savefig(\"Box Plot.png\")\r\n\r\n # point plot for hourly utilization\r\n for column in ['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit']:\r\n hist = px.histogram(df, x=column, y='cnt')\r\n hist.show()\r\n plt.savefig(\"Histogram plots for each column.png\")\r\n sns.pointplot(x=df['hr'], y='cnt', data=df);\r\n plt.title(\"Hourly Utilization\")\r\n plt.ylabel(\"Bike Shares\", fontsize=12)\r\n plt.xlabel(\"Hour\", fontsize=12)\r\n plt.savefig(\"Hourly Utilization point plot.png\", dpi=300, bbox_inches='tight')\r\n\r\n # line plot for hourly utilization\r\n for c in ['holiday','season','workingday']:\r\n sns.lineplot(data=df,x='hr',y='cnt',hue=c)\r\n plt.title('Hourly plot vs count')\r\n plt.savefig(\"Hour vs count plot_main features.png\",dpi=300, bbox_inches='tight')\r\n\r\n # point plots for humidity vs count\r\n sns.pointplot(x='hum', y='cnt', data=df)\r\n plt.title(\"Amount of bike shares vs humidity\", fontsize=25)\r\n plt.xlabel(\"Humidity (%)\", fontsize=20)\r\n plt.ylabel('count of bike shares', fontsize=20)\r\n plt.locator_params(axis='x', nbins=10)\r\n plt.savefig(\"Pointplot of humidity vs count.png\",dpi=300, bbox_inches='tight')\r\n\r\n # box plots of whole df\r\n bx=px.box(df, y=\"cnt\")\r\n bx.show()\r\n\r\n # feature correlation plot\r\n corrs = abs(df.corr())\r\n sns.heatmap(corrs, annot=True)\r\n plt.title(\"Feature Correlation\")\r\n plt.savefig(\"Feature_correlation.png\", dpi=300, bbox_inches='tight')\r\n return plt", "def plot_selected(df, title='title', columns=[], shouldNormalize=True, symbol='any stock'):\n # df = df[columns][start_index:end_index]\n # df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel = \"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:, ['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n # print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)", "def plot_selected(df, title='title', columns=[], shouldNormalize = True, symbol='any stock'):\n #df = df[columns][start_index:end_index]\n #df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel=\"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:,['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n #print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)", "def plot_density(df: pd.DataFrame, title: str, column_line_name: list, column_category_name: str, plot_width: int = 330,\n plot_height: int = 330, colours: list = ['#00BFA5', \"#8c9eff\", \"#536dfe\"]):\n\n hover = HoverTool()\n\n p = figure(title=title, plot_width=plot_width, plot_height=plot_height, tools=[\"save\", hover])\n\n for ind, category_ in enumerate(df[column_category_name].unique()):\n temp_df = df[df[column_category_name] == category_]\n density = stats.kde.gaussian_kde(temp_df[column_line_name])\n xs = np.linspace(0, 1, 100)\n source = ColumnDataSource(pd.DataFrame({'density': density(xs), 'xs': xs}))\n p.line(x='xs', y='density', source=source, line_color=colours[ind], legend=category_, line_width=2)\n\n p.title.text_font = p.xaxis.axis_label_text_font = p.yaxis.axis_label_text_font = \"Helvetica Neue\"\n p.xgrid.visible = p.ygrid.visible = False\n\n tooltips = [(\"density\", \"@ density {0%}\")]\n\n hover = p.select(dict(type=HoverTool))\n hover.tooltips = tooltips\n\n return p", "def performance_level1_plot(self, df, urls_map=None, asl=False):\n fig, ax = plt.subplots(nrows=1, ncols=1)\n fig.set_size_inches(10,12)\n\n bars = ax.barh(range(len(df)), df['Total ASL'], color=df['Color'], height=0.5, zorder=2) if asl else ax.barh(range(len(df)), df['Amount'], color=df['Color'], height=0.5, zorder=2)\n # ax.set_title('\\nTotal Service Price\\n\\n', fontsize=14)\n ax.title.set_position([0, 1])\n ax.title.set_horizontalalignment('left')\n ax.set_title('Level 2 Services', rotation='vertical', x=-0.3, y=0.5, fontsize=14)\n\n ax.set_yticks(range(len(df)))\n labels = ax.set_yticklabels(self.shorten_ytick_labels(df['Category']))\n for label in labels:\n label.set_horizontalalignment('left')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.xaxis.grid(color='lightgray', linewidth=2)\n ax.tick_params(axis='x', which='major', labelsize=12, length=0)\n ax.tick_params(axis='y', which='major', labelsize=12, length=0)\n\n pos1 = ax.get_position() # get the original position\n pos2 = [pos1.x0 + 0.15, pos1.y0 + 0.1, pos1.width, pos1.height]\n ax.set_position(pos2) # set a new position\n\n xmin, xmax = ax.get_xlim()\n xmin = xmax * 0.002 * -1\n ax.set_xlim(xmin, xmax)\n xaxis_label = 'ASL' if asl else 'Dollars'\n ax.set_xlabel(xaxis_label, fontsize=14)\n ax.yaxis.set_label_coords(0, 1)\n\n # Create the tooltips for the bar graph\n for i, bar in enumerate(bars.get_children()):\n style = 'style=\"background: white; border: 1px solid black; padding: 2px;\"'\n fargs = (style, df['Category'].iloc[i], df['Formatted Amount'].iloc[i])\n label = '<div {}><strong>{}</strong><br>{}</div>'.format(*fargs)\n tooltip = mpld3.plugins.LineHTMLTooltip(bar, label=label, hoffset=15, voffset=-5)\n mpld3.plugins.connect(fig, tooltip)\n # Create links in x-axis labels\n xlabellinks = TickLabelLink(urls_map, axis='y')\n mpld3.plugins.connect(fig, xlabellinks)\n # Format the x-axis values\n mpld3.plugins.connect(fig, XTickFormat())\n # Fix hbar graph to make it look better\n mpld3.plugins.connect(fig, FixHBarGraph())\n html = mpld3.fig_to_html(fig)\n return html", "def PlotProduct(product, keys=['AMAZON', 'USED', 'COUNT_USED', 'SALES'],\n price_limit=1000):\n if not plt_loaded:\n raise Exception('Plotting not available. Install matplotlib with:\\n' +\n 'pip install matplotlib')\n\n if 'data' not in product:\n product['data'] = ParseCSV[product['csv']]\n\n # Use all keys if not specified\n if not keys:\n keys = product['data'].keys()\n\n # Create three figures, one for price data, offers, and sales rank\n pricefig, priceax = plt.subplots(figsize=(10, 5))\n pricefig.canvas.set_window_title('Product Price Plot')\n plt.title(product['title'])\n plt.xlabel('Date')\n plt.ylabel('Price')\n pricelegend = []\n\n offerfig, offerax = plt.subplots(figsize=(10, 5))\n offerfig.canvas.set_window_title('Product Offer Plot')\n plt.title(product['title'])\n plt.xlabel('Date')\n plt.ylabel('Listings')\n offerlegend = []\n\n salesfig, salesax = plt.subplots(figsize=(10, 5))\n salesfig.canvas.set_window_title('Product Sales Rank Plot')\n plt.title(product['title'])\n plt.xlabel('Date')\n plt.ylabel('Sales Rank')\n saleslegend = []\n\n # Add in last update time\n lstupdate = keepaTime.KeepaMinutesToTime(product['lastUpdate'])\n\n # Attempt to plot each key\n for key in keys:\n # Continue if key does not exist\n if key not in product['data']:\n print('%s not in product' % key)\n continue\n\n elif 'SALES' in key and 'time' not in key:\n if product['data'][key].size == 1:\n print('%s not in product' % key)\n continue\n x = np.append(product['data'][key + '_time'], lstupdate)\n y = np.append(product['data'][key],\n product['data'][key][-1]).astype(np.float)\n ReplaceInvalid(y)\n salesax.step(x, y, where='pre')\n saleslegend.append(key)\n\n elif 'COUNT_' in key and 'time' not in key:\n x = np.append(product['data'][key + '_time'], lstupdate)\n y = np.append(product['data'][key],\n product['data'][key][-1]).astype(np.float)\n ReplaceInvalid(y)\n offerax.step(x, y, where='pre')\n offerlegend.append(key)\n\n elif 'time' not in key:\n x = np.append(product['data'][key + '_time'], lstupdate)\n y = np.append(product['data'][key],\n product['data'][key][-1]).astype(np.float)\n ReplaceInvalid(y, max_value=price_limit)\n priceax.step(x, y, where='pre')\n pricelegend.append(key)\n\n # Add in legends or close figure\n if pricelegend:\n priceax.legend(pricelegend)\n else:\n plt.close(pricefig)\n\n if offerlegend:\n offerax.legend(offerlegend)\n else:\n plt.close(offerfig)\n\n if not saleslegend:\n plt.close(salesfig)\n\n plt.show(block=True)\n plt.draw()", "def show_log(logfile, category, selection, x_mode, y_mode):\n cat_dict = dict()\n cat = 0\n with open(logfile, 'rt') as f:\n f = csv.reader(f)\n cat_rows = []\n for idx, row in enumerate(f):\n if (idx != 0) and row[0] == 'epoch':\n cat_dict['{}'.format(cat)] = np.asarray(cat_rows)\n cat = cat + 1\n cat_rows = []\n cat_rows.append(row)\n cat_dict['{}'.format(cat)] = np.asarray(cat_rows)\n\n fig, ax = plt.subplots()\n min_vals = []\n for key in selection:\n key = str(key)\n cat_idx = np.where(cat_dict[key][0] == category)[0][0]\n cat_name = cat_dict[key][1, cat_idx].astype(str)\n\n if x_mode == 'epoch':\n x_values = cat_dict[key][1:, 0].astype(int)\n unit = ' #'\n y_column = np.where(cat_dict[key][0] == y_mode)[0][0]\n\n y_values = cat_dict[key][1:, y_column].astype(float)\n min_vals.append(np.min(y_values))\n\n ax.plot(x_values, y_values, label=cat_name, lw=2.0)\n ax.set_ylabel(y_mode)\n ax.set_xlabel(x_mode + unit)\n\n plt.grid()\n plt.legend()\n return fig, ax", "def plot_data(dataframe: pd.DataFrame, category: str=None, show_corr: bool=False) -> None:\n if category:\n dataframe = dataframe.loc[:, dataframe.columns.str.contains(category)]\n\n if show_corr:\n _ , axs = plt.subplots(1, 2, figsize=(30, 15))\n # --- display missing values\n sns.heatmap(dataframe.isna(),\n cbar=False,\n ax=axs[0])\n # --- display correlation heatmap\n corr = dataframe.corr()\n sns.heatmap(corr,\n mask = np.triu(np.ones_like(corr, dtype=bool)),\n ax = axs[1],\n center=0)\n else:\n _ , axs = plt.subplots(1, 1, figsize=(15,15))\n # --- display missing values\n sns.heatmap(dataframe.isna(),\n cbar=False,\n ax=axs)\n return 0", "def graph(self):\n seq_obj = MultiSequence(self.symbol, self.__best_model.window_size,1)\n test_predict = self.__best_model.model.predict(seq_obj.X)\n\n #our data is scaled between -1 and 1 so lets scale it back up\n scaler = MinMaxScaler(feature_range=(self.__min_price ,self.__max_price))\n orig_data = seq_obj.original_data.reshape(-1,1)\n orig_prices = scaler.fit_transform(orig_data).flatten()\n \n # plot actual prices\n plt.plot(orig_prices, color='k')\n \n # plot test set prediction after scaling back up\n length = len(seq_obj.X) + self.__best_model.window_size \n test_in = np.arange(self.__best_model.window_size,length,1)\n pred_prices = scaler.fit_transform(test_predict.reshape(-1,1)).flatten()\n plt.plot(test_in,pred_prices,color = 'b')\n \n # pretty up graph\n plt.xlabel('day')\n plt.ylabel('Closing price of stock')\n plt.title(\"Price prediction for {}\".format(self.symbol))\n plt.legend(['Actual','Prediction'],loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()", "def show_results_geopandas(df, number_of_categories, title, subtitle, legend_title, column_name, selected_classification):\n\n # Set up the subplot\n fig, ax = plt.subplots(figsize=(12, 8), subplot_kw={'aspect': 'equal'})\n\n # Assign the new temporary column cl to the data frame and plot it\n df.plot(column=column_name,\n scheme=selected_classification,\n categorical=True,\n k=number_of_categories,\n cmap='OrRd',\n linewidth=0.1,\n ax=ax,\n edgecolor='white',\n legend=True,\n legend_kwds={'loc': 'best', 'title': legend_title, 'shadow': True},\n )\n\n # Set a title and a subtitle\n fig.suptitle(title, y=0.98, fontsize=16)\n plt.title(subtitle, fontsize=10)\n\n # Blend the axis off\n ax.set_axis_off()\n\n # Just for PyCharm\n #plt.show()\n\n # legend_kwds={'label': \"Population by Country\", 'orientation': \"vertical\"}\n return", "def visualise_food_consumption(data: LogData, directory: Path):\n\n figure, axes = plot.subplots()\n\n food_history = get_food_history(data)\n\n axes.plot(food_history.keys(), food_history.values(), label=\"Food\", color=\"blue\", **{\"ls\": \"--\"})\n\n axes.legend(loc=\"upper left\")\n axes.set_xlim(0, data.duration_secs())\n axes.set_xlabel(\"Time (seconds)\")\n axes.set_ylabel(\"Amount\")\n axes.set_title(\"Food availability\")\n\n plot.savefig(directory / Path(\"food_consumption.png\"))\n plot.close()", "def show_cleaned_vis(data, x, y = 'price', categorical = False, kde = True):\n\n ### Filter outliers first\n \n idx_out = find_outliers_IQR(data[x])\n \n df_cleaned = data[~idx_out].copy()\n\n ### Plot Data\n \n df_cleaned.value_counts().sort_index()\n \n fig, axs = plt.subplots(ncols=2, figsize= (12,6))\n \n sns.regplot(data=df_cleaned, x=x, y=y, ax=axs[0],line_kws={\"color\": \"red\"})\n sns.histplot(data=df_cleaned, x=x, discrete=categorical, kde=kde, ax=axs[1])\n \n fig.suptitle(f'{x.title()} vs. {y.title()}', fontsize=16)\n plt.tight_layout();\n \n return #df_cleaned", "def create_income_expense_scatter_plot(year_id):\n \n month_objects = get_months_by_year(year_id)\n \n # build chart data \n months = convert_to_verbose_months(month_objects)\n\n y_expenses = get_transactions_sum_data(month_objects, amount_type='expenses')\n \n y_incomes = get_transactions_sum_data(month_objects, amount_type='incomes')\n \n # buids scatter-chart\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(\n x=months, y=y_expenses, name=\"Gastos\",\n line=dict(color='#b22222', width=4)\n ))\n \n fig.add_trace(go.Scatter(\n x=months, y=y_incomes, name=\"Rendas\",\n line=dict(color='#22b222', width=4)\n ))\n\n fig.update_layout(\n xaxis_title='Meses',\n yaxis_title='Agregado por mês'\n )\n\n # render chart in a context\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n\n return plot_div", "def plot_equity_prices(ticker, prices):\n\n # define x-axis data points\n x = np.linspace(0, prices.shape[0], prices.shape[0])\n\n plt.plot(x, prices[ticker], linewidth=1, color='b', label=ticker)\n plt.legend(loc='upper left')\n plt.xlabel('Time (days)')\n plt.ylabel('Price')\n plt.title('Price vs Time: ' + ticker)\n plt.show()", "def plot_equity_price_analytics(ticker, e_ma, s_ma, t_wap, v_wap, close_prices, interval):\n\n # define x-axis data points\n x = np.linspace(0, close_prices.shape[0], close_prices.shape[0])\n\n figure = plt.figure()\n axis = figure.add_subplot(111)\n\n axis.plot(x, close_prices, linewidth=1, color='k', label='Close Price')\n axis.plot(x, e_ma, linewidth=1, color='r', label='EMA Price')\n axis.plot(x[interval:], s_ma, linewidth=1, color='b', label='SMA Price')\n axis.plot(x[interval:], t_wap, linewidth=1, color='g', label='TWAP Price')\n axis.plot(x[interval:], v_wap, linewidth=1, color='m', label='VWAP Price')\n axis.legend(loc='upper left')\n axis.set_xlabel('Time (days)')\n axis.set_ylabel('Price')\n axis.set_title('Price vs Time: ' + ticker)\n\n return figure", "def _line_example_2_chart(price_by_date_and_country):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"datetime\")\n ch.set_title(\"Line charts - Grouped by color\")\n ch.plot.line(\n # Data must be sorted by x column\n data_frame=price_by_date_and_country.sort_values(\"date\"),\n x_column=\"date\",\n y_column=\"total_price\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def density(categorical_var, numerical_var):\n #print(categorical_var)\n cat_list = categorical_var.astype('category')\n for cat in cat_list:\n sns.kdeplot(numerical_var[categorical_var == cat], label=cat)#, categorical_var)\n\n plt.show()", "def plot_data(data_frame):\n # plot the price at daily close of the dow after each day\n data_frame[\"Close\"].plot()\n\n # show the plot\n plot.show()\n\n input(\"Press enter to continue\")", "def hourlyplot(metdat, catinfo, category=None, basecolor='span'):\n\n if category is None:\n print('not sure what to plot...')\n pass\n\n colors = utils.get_colors(len(catinfo['columns'][category]), basecolor=basecolor, reverse=True)\n colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category], reverse=True)\n \n plotdat = metdat[colnames].groupby(metdat.index.hour).mean()\n \n fig, ax = plt.subplots(figsize=(5,3.5), sharex=True, sharey=True)\n for iax in range(len(colnames)):\n ax.plot(plotdat[colnames[iax]], color=colors[iax])\n\n leg = ax.legend([str(v) + ' m' for v in vertlocs], loc=6, bbox_to_anchor=(1, 0.5), frameon=False)\n ax.set_xlabel('Time [hour]')\n ax.set_ylabel(catinfo['labels'][category])\n\n fig.tight_layout()\n \n return fig, ax", "def plot_equity_price_analytics(ticker, e_ma, s_ma, t_wap, v_wap, mean_prices):\n\n # define x-axis data points\n x = np.linspace(0, mean_prices.shape[0], mean_prices.shape[0])\n\n plt.plot(x, mean_prices, linewidth=1, color='k', label='Mean Price')\n plt.plot(x, e_ma, linewidth=1, color='r', label='EMA Price')\n plt.plot(x[interval:], s_ma, linewidth=1, color='b', label='SMA Price')\n plt.plot(x[interval:], t_wap, linewidth=1, color='g', label='TWAP Price')\n plt.plot(x[interval:], v_wap, linewidth=1, color='m', label='VWAP Price')\n plt.legend(loc='upper left')\n plt.xlabel('Time (days)')\n plt.ylabel('Price')\n plt.title('Price vs Time: ' + ticker)\n plt.show()", "def multirow_scatter(dataframe, key, col_head, x_vals, x_label, y_label):\n #collect unique keys to use as titles for each graph\n unique_keys = dataframe[key].unique()\n #create empty dict to store figure objects\n fig_dict = {}\n\n for entry in unique_keys:\n #collect all instances of a single key and set index\n y_cols = dataframe.loc[(dataframe[key] == entry)].set_index(col_head)\n y_cols = y_cols.drop(key, axis=1)\n #collect y_labels\n y_list = y_cols.index.values\n #make empty figure axis\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n #for each x_col, add y_ against urea to plot\n for label in y_list:\n x = x_vals\n y = y_cols.loc[label]\n name = label\n ax.plot(x, y, label=name, marker='.')\n #adjust plot parameters\n Leg_pos = -(len(y_list)/10+0.1)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(entry)\n ax.legend(bbox_to_anchor=(0., Leg_pos, 1., .102), loc=4,\n ncol=1, mode=\"expand\", borderaxespad=0.)\n #append figure object to dictionary according to protein name\n fig_dict[entry] = fig\n\n return fig_dict" ]
[ "0.62422293", "0.5959139", "0.587027", "0.58107173", "0.5709584", "0.5664822", "0.56324095", "0.5617443", "0.5605391", "0.5602949", "0.5583337", "0.558135", "0.55679935", "0.5539779", "0.55202734", "0.5518238", "0.5473018", "0.5467684", "0.5465236", "0.54648185", "0.5434635", "0.54312754", "0.5386179", "0.53859687", "0.53836447", "0.5381808", "0.53794986", "0.5368504", "0.5357119", "0.53507924" ]
0.6512989
0
This function stands for testing the target column of given dataframe according to normal distribution is valid on it or not via using shapiro test.
def test_normality(dataframe, iteration_column, target_column): normals = [] not_normals = [] category_ids = [cat_id for cat_id in dataframe[iteration_column].unique()] for id_ in category_ids: ttest, p_value = shapiro(dataframe. loc[dataframe[iteration_column] == id_, target_column]) if p_value >= 0.05: normals.append(id_) else: not_normals.append(id_) return normals, not_normals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_normality(df, features_to_analyse, p_value_threshold=0.05):\n \n is_normal_threshold = 1 - p_value_threshold\n\n normality_results = pd.DataFrame(data=None, index=['stat','pval'], columns=features_to_analyse)\n for f, feature in enumerate(features_to_analyse):\n try:\n stat, pval = shapiro(df[feature])\n # NB: UserWarning: Input data for shapiro has range zero \n # Some features contain all zeros - shapiro(np.zeros(5))\n normality_results.loc['stat',feature] = stat\n normality_results.loc['pval',feature] = pval\n except Exception as EE:\n print(\"WARNING: %s\" % EE)\n \n prop_normal = (normality_results.loc['pval'] < p_value_threshold).sum()/len(features_to_analyse) \n if prop_normal > is_normal_threshold:\n print(\"\"\"More than %d%% of control features (%.1f%%) were found to obey a \n normal (Gaussian) distribution, so parametric analyses will be \n preferred.\"\"\" % (is_normal_threshold*100, prop_normal*100))\n TEST = f_oneway\n else:\n print(\"\"\"Less than %d%% of control features (%.1f%%) were found to obey a \n normal (Gaussian) distribution, so non-parametric analyses will be \n preferred.\"\"\" % (is_normal_threshold*100, prop_normal*100))\n TEST = kruskal\n return TEST", "def return_in_norm_df(df, col, sigma):\n return np.abs(df[col] - df[col].mean()) <= (sigma*df[col].std())", "def normality_test(df, columns, multivariate=False):\n if multivariate:\n # Multivariate testing.\n is_normal, p = df.groupby(['user', 'block'])[columns].apply(pg.multivariate_normality)\n res = df.groupby(['user', 'block'])[['df1', 'df2']].apply(pg.multivariate_normality).apply(pd.Series)\\\n .rename(columns={0: 'normal', 1: 'p'})\n else:\n # We would want to minimize type II error rate, risk of not rejecting the null when it's false.\n res = df.groupby(['user', 'block'])[columns].apply(pg.normality).unstack(level=2) # Shapiro-Wilk tests.\n return res", "def test():\n df = df_random()\n print('Random DataFrame')\n print(df.head())\n\n # Test the numerical column generator\n df['delta_v'] = df_numeric_column(-100, 100)\n print('\\nNumerical column generator (added delta_v)')\n print(df.head())\n\n # Test the categorical column generator\n df['color'] = df_categorical_column(['red', 'green', 'blue'])\n print('\\nCategorical column generator (added color)')\n print(df.head())\n\n # Test the categorical column generator with probabilities\n df['color'] = df_categorical_column(['red', 'green', 'blue'], probabilities=[0.6, 0.3, 0.1])\n print('\\nProbabilities should be ~60% red, %30 green and %10 blue')\n print(df['color'].value_counts())\n\n # Also we can just use the built in Numpy method for detailed control\n # over the numeric distribution\n my_series = pd.Series(np.random.normal(0, 1, 1000))\n print('\\nStats on numpy normal (gaussian) distribution')\n print(my_series.describe())", "def test_call(self):\n expected = self.df.columns\n actual = self.normalizer()(\n self.df, **self.kwargs).columns\n\n expected = sorted(expected)\n actual = sorted(actual)\n self.assertListEqual(actual, expected)", "def check_dataframe_valid(self, df, option):\n # display(df)\n if df[option].isna().sum() > df.shape[0]/2:\n print(\"invalid data\")\n return False\n else:\n print(\"valid data\")\n return True", "def test_single(self):\n df = self.df.head(1).copy()\n n = df.index.size\n out = cross_ratios(df)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))", "def checkrowstest(chosen_df):\n if not chosen_df.shape[0] >= 1:\n raise ValueError('Less than 10 rows')", "def test_check_numeric_columns_call(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n expected_call_args = {0: {\"args\": (d.create_df_2(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.numeric.ScalingTransformer,\n \"check_numeric_columns\",\n expected_call_args,\n return_value=d.create_df_2(),\n ):\n\n x.fit(df)", "def test_column_missing(self):\n columns = self.normalizer().config['columns']\n for column in columns:\n df = self.df.copy()\n df = df.drop(column, axis=1)\n with self.assertRaises(ValueError):\n self.normalizer().normalize(df, **self.kwargs)", "def checkStdDev(df,thr):\n greaterThanThreshold = True\n positions= np.array([])\n for i in range(1,df.shape[0]):\n stdDev = np.std(df.iloc[i,1:].astype(np.longdouble))\n if (stdDev < thr):\n greaterThanThreshold = False\n positions = np.append(positions,i)\n \n return greaterThanThreshold", "def test_in_silico_essentiality(input_df, expected_df, model):\n in_silico = essential.in_silico_essentiality(model, input_df)\n assert in_silico[:2].equals(expected_df)", "def test_non_pd_df_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=[1, 2, 3, 4, 5, 6])", "def test_non_pd_df_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.columns_set_or_check(X=[1, 2, 3, 4, 5, 6])", "def test_X_returned(self):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n df_returned = x.check_numeric_columns(df)\n\n h.assert_equal_dispatch(\n expected=df,\n actual=df_returned,\n msg=\"unexepcted object returned from check_numeric_columns\",\n )", "def get_stat_dif(column, target_column, data, alpha):\n cols = data.loc[:, column].value_counts().index[:]\n combinations_all = list(combinations(cols, 2))\n for comb in combinations_all:\n a = data.loc[data.loc[:, column] == comb[0], target_column]\n b = data.loc[data.loc[:, column] == comb[1], target_column]\n result = ttest_ind(a, b).pvalue\n\n if result <= alpha/len(combinations_all):\n print('Найдены статистически значимые различия для колонки', column)\n break", "def testExampleDataFrameGeneration(ref):\n df = generate_dataframe()\n columns = ref.all_fields_except(['random'])\n ref.assertDataFrameCorrect(df, 'dataframe_result.csv',\n check_data=columns, check_types=columns)", "def prod_test(dataframe):\n dataframe.columns = [\"word_values\"]\n dataframe = dataframe.dropna(subset=['word_values'])\n return su.pipe().transform(dataframe)", "def test_full_function(self):\n\t\t# Import the rubric\n\t\ttemplate_data = pd.read_csv('tests/statcast_spin/live_Darvish_July2019_test.csv').round(SIG_DIG)\n\n\t\t# Run the method in question\n\t\tdf = spin.statcast_pitcher_spin(start_dt='2019-07-01', end_dt='2019-07-31', player_id=506433)\n\n\t\t# Columns needed to be checked\n\t\ttarget_columns = ['Mx', 'Mz', 'phi', 'theta']\n\n\t\tfor column in target_columns:\n\t\t\tlogging.info(\"Begin testing on {}\".format(column))\n\n\n\t\t\tif column in ['Mz']:\n\t\t\t# Almost equal assertion is necessary for small differences that arise after consecutive calculations\n\t\t\t\tself.assertTrue(self.compare_almost_equal(df, template_data, column))\n\n\t\t\telse:\n\t\t\t\tself.assertTrue(self.compare_columns(df, template_data, column))\n\n\t\t\tlogging.info(\"{} passed\".format(df, template_data, column))\n\n\n\t\tlogging.info(\"All tests completed\")", "def test_check_numeric_columns_call(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\", copy=True)\n\n x.fit(df)\n\n expected_call_args = {0: {\"args\": (d.create_df_2(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.base.BaseTransformer,\n \"transform\",\n expected_call_args,\n return_value=d.create_df_2(),\n ):\n\n x.transform(df)", "def test_single(self):\n df = self.df.head(1).copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))", "def row1_invariant(self, target_col):\n # replace with your code\n if self.lower_row_invariant(1, target_col):\n return True\n return False", "def test_exception_raised(self):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\", \"b\", \"c\"], scaler=\"standard\")\n\n with pytest.raises(\n TypeError,\n match=r\"\"\"The following columns are not numeric in X; \\['b', 'c'\\]\"\"\",\n ):\n\n x.check_numeric_columns(df)", "def test_create_dataframe(dataframe):\n results = True\n rows = dataframe.shape[0]\n column_names = sorted(dataframe.columns)\n column_datatypes = list(dataframe[column_names].dtypes)\n\n # Checks columns match those specified in #1\n if column_names != DATA_COLUMNS:\n raise ValueError(\"DataFrame does not have necessary datatypes: \" + str(DATA_COLUMNS))\n # Checks column datatypes match\n if column_datatypes != DATA_DATATYPES:\n raise ValueError(\"DataFrame does not have necessary column names: \" + str(DATA_DATATYPES))\n # Checks for a least 3 rows in DataFrame\n if rows < 10:\n raise ValueError(\"DataFrame does not have enough rows of data (>=10).\")\n\n return results", "def test_distr_evaluate(normal, metric, multivariate):\n y_pred = normal.create_test_instance()\n y_true = y_pred.sample()\n\n m = metric(multivariate=multivariate)\n\n if not multivariate:\n expected_cols = y_true.columns\n else:\n expected_cols = [\"score\"]\n\n res = m.evaluate_by_index(y_true, y_pred)\n assert isinstance(res, pd.DataFrame)\n assert (res.columns == expected_cols).all()\n assert res.shape == (y_true.shape[0], len(expected_cols))\n\n res = m.evaluate(y_true, y_pred)\n assert isinstance(res, pd.DataFrame)\n assert (res.columns == expected_cols).all()\n assert res.shape == (1, len(expected_cols))", "def sanity_checks(df: pd.DataFrame) -> None:\n df_temp = df.copy()\n # checks that the max date is less than tomorrow's date.\n assert datetime.datetime.strptime(df_temp['Date'].max(), '%Y-%m-%d') < (datetime.datetime.utcnow() + datetime.timedelta(days=1))\n # checks that there are no duplicate dates\n assert df_temp['Date'].duplicated().sum() == 0, 'One or more rows share the same date.'\n if 'Cumulative total' not in df_temp.columns:\n df_temp['Cumulative total'] = df_temp['Daily change in cumulative total'].cumsum()\n # checks that the cumulative number of tests on date t is always greater than the figure for t-1:\n assert (df_temp['Cumulative total'].iloc[1:] >= df_temp['Cumulative total'].shift(1).iloc[1:]).all(), \"On one or more dates, `Cumulative total` is greater on date t-1.\"\n # df.iloc[1:][df['Cumulative total'].iloc[1:] < df['Cumulative total'].shift(1).iloc[1:]]\n # cross-checks a sample of scraped figures against the expected result.\n assert len(sample_official_data) > 0\n for dt, d in sample_official_data:\n val = df_temp.loc[df_temp['Date'] == dt, SERIES_TYPE].squeeze().sum()\n assert val == d[SERIES_TYPE], f\"scraped value ({val:,d}) != official value ({d[SERIES_TYPE]:,d}) on {dt}\"\n return None", "def process_real(df):\n df_c = df.copy()\n df_c = df_c.apply(lambda s: H.to_quants(s, std=1), axis=1)\n df_c = df_c > 0\n if type(df.index) == pd.MultiIndex:\n df_c.index = map(lambda s: '_'.join(s), df_c.index)\n return df_c.T", "def test_column_presence(self):\n\n columns = [\n \"assay_ontology_term_id\",\n \"development_stage_ontology_term_id\",\n \"disease_ontology_term_id\",\n \"ethnicity_ontology_term_id\",\n \"is_primary_data\",\n \"sex_ontology_term_id\",\n \"tissue_ontology_term_id\",\n ]\n\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n # Remove batch condition because it has a dependency with is_primary_data\n self.validator.adata.uns.pop(\"batch_condition\")\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [f\"ERROR: Dataframe 'obs' is missing \" f\"column '{column}'.\"],\n )", "def voxelConsistency(cleaned_dataframe, column_number, expected_size):\n consistency_boolean = True\n for row in cleaned_dataframe.index:\n if cleaned_dataframe[column_number][row] == expected_size:\n continue\n elif cleaned_dataframe[column_number][row] != expected_size:\n print(\"Subject scan \" + cleaned_dataframe[0][row] + \" does not have voxel size of \" +str(expected_size))\n consistency_boolean = False\n return consistency_boolean", "def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)" ]
[ "0.6389563", "0.6188177", "0.6131274", "0.5834058", "0.5714856", "0.56960815", "0.56791514", "0.5624745", "0.5621683", "0.5608999", "0.5575569", "0.5574775", "0.55731064", "0.5562695", "0.5555359", "0.55408436", "0.5511444", "0.5477347", "0.5473243", "0.5456144", "0.5433261", "0.5427762", "0.5426414", "0.5425659", "0.54233015", "0.54232246", "0.5400602", "0.53979164", "0.539699", "0.539286" ]
0.71757084
0
Trim altimeter data when out of water during a deployment and by bin range if specified
def trim_alt(ds, data_vars=["Altitude_m", "Counts", "Temperature_C"]): if "trim_method" in ds.attrs: trm_list = ds.attrs["trim_method"] if not isinstance(trm_list, list): # make sure it is a list before looping trm_list = [trm_list] for trm_meth in trm_list: if trm_meth.lower() == "altitude": print("Trimming using altitude data") altitude = ds[ "Altitude_m" ] # need to use atltitude values before starting trimming for var in data_vars: ds[var] = ds[var].where(~(altitude < ds.attrs["Deadzone_m"])) ds[var] = ds[var].where(~(altitude > ds.attrs["Range_m"])) print(f"Trimming {var}") histtext = "Trimmed altimeter data using Altimeter_m = 0." ds = utils.insert_history(ds, histtext) elif trm_meth.lower() == "bin range": print("Trimming using good_bins of %s" % str(ds.attrs["good_bins"])) if "bins" in ds.coords: # trim coordinate bins ds = ds.isel( bins=slice(ds.attrs["good_bins"][0], ds.attrs["good_bins"][1]) ) # reset Bin_count attribute ds.attrs["Bin_count"] = ( ds.attrs["good_bins"][1] - ds.attrs["good_bins"][0] ) histtext = ( "Removed extra bins from altimeter data using good_bins attribute." ) ds = utils.insert_history(ds, histtext) else: print("Did not trim altimeter data") return ds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def truncate_data(self, width):\n times_from_mid = self.time - self.midtime\n idxs = np.abs(times_from_mid) < 0.5 * width * self.duration\n self.time = self.time[idxs]\n self.flux = self.flux[idxs]", "def remove_invalid_values(self, lower_bound=float('-inf'), upper_bound=float('inf')) :\n valid_indices = (lower_bound < self.signal) * (self.signal < upper_bound)\n self.time_scale = self.time_scale[valid_indices]\n self.signal = self.signal[valid_indices]", "def zero_blind_range(data):\n try:\n start_i = data['first_data_bin']\n except:\n start_i = 0\n data['data'][...,:start_i] = 0.0", "def trim_site(df : pd.DataFrame) -> pd.DataFrame: \n df.loc[df.site_id == 6,'meter_reading'] = np.clip(df.loc[df.site_id == 6,'meter_reading'],a_min=0,a_max=1.9e5)\n df.loc[(df.primary_use == 'Education') &(df.site_id == 13),'meter_reading'] = np.clip(df.loc[(df.primary_use == 'Education') &(df.site_id == 13),'meter_reading'],a_min=0,a_max=1.5e5)\n df.loc[df.site_id == 9,'meter_reading'] = np.clip(df.loc[df.site_id == 9 ,'meter_reading'],a_min =0,a_max = 1.9e5)\n return df", "def trim_occluded_throats(network, mask='all'):\n occluded_ts = network['throat.area'] == 0\n if np.sum(occluded_ts) > 0:\n occluded_ts *= network[\"throat.\"+mask]\n trim(network=network, throats=occluded_ts)", "def parse_trim(config):\n config['bins'] = _parse_list_of_lists(config['bins'], delimiter_elements='-', delimiter_lists=',')\n return config", "def remove_out_of_bounds(self, data, low_bound, high_bound):\n data = data.dropna()\n data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)] \n return data", "def trim(wavelength, spectra, bins):\n if type(bins[0]) != list:\n bins = [bins]\n\n spectra_trim = np.array([]).reshape(0, spectra.shape[1])\n wavelength_trim = np.array([])\n for wave_range in bins:\n mask = np.bitwise_and(wavelength >= wave_range[0], wavelength <= wave_range[1])\n spectra_trim = np.vstack((spectra_trim, spectra[mask, :]))\n wavelength_trim = np.hstack((wavelength_trim, wavelength[mask]))\n return wavelength_trim, spectra_trim", "def trim_occluded_throats(self):\n occluded_ts = list(self.throats()[self[\"throat.area\"]==0])\n #occluded_throats = np.asarray(occluded_throats)\n if len(occluded_ts) > 0:\n self.trim(throats=occluded_ts)\n \"Also get rid of isolated pores\"\n isolated_ps = self.check_network_health()['isolated_pores']\n if len(isolated_ps) > 0:\n self.trim(isolated_ps)", "def process_pain(x, lb, ub):\n x = x.abs()\n x.loc[(x > ub)] = 8\n x.loc[(x < lb) | (x > ub)] = np.nan\n return x", "def trim(self, start, stop=None):\n if stop is None:\n stop = self.data.shape[0]\n\n start = max(start, 0)\n stop = min(stop, self.data.shape[0])\n self.data = self.data.iloc[start:stop,:]", "def remove_baseline(self):\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol", "def exclude_zero_bins(f,fitmin,fitmax,hs=[],limit=0,sumall=False):\n if len(hs)==0:\n return\n nbins = fitmax - fitmin + 1\n nexcl = 0\n exclist = []\n for ibin in xrange(fitmin,fitmax+1):\n do_exc = any( [h.GetBinContent(ibin)<limit for h in hs] ) if sumall==False else (sum([h.GetBinContent(ibin) for h in hs])<limit)\n if do_exc:\n f.ExcludeBin(ibin)\n exclist.append(ibin)\n nexcl += 1\n if nexcl>0:\n print 'WARNING: TFractionFitter excluded %d out of %d bins due to low statistics (nentries<%d)'%(nexcl,nbins,limit)\n print ' bins: ' + ' '.join([str(xx) for xx in exclist])\n else:\n print 'INFO: TFractionFitter does not need to exclude any bins'\n return True", "def trim_range(self, low_bound, hi_bound, full_bound=True):\n low_bound_int = int(low_bound[:self.place+1])\n hi_bound_int = int(hi_bound[:self.place+1])\n\n # Remove keys outside of range\n # modifying dict during loop caused lots of problems - del after loop\n keys_to_del = []\n for key in self.Poss_Tree:\n if key < int(low_bound[:self.place]):\n keys_to_del.append(key)\n continue\n elif key > int(hi_bound[:self.place]):\n keys_to_del.append(key)\n continue\n for key in keys_to_del:\n del self.Poss_Tree[key]\n\n # Remove values outside of range\n vals_to_del = defaultdict(list)\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n password = int(construct_pass(key, choice))\n if password > hi_bound_int or password < low_bound_int:\n vals_to_del[key].append(choice)\n for key in vals_to_del:\n for val in vals_to_del[key]:\n self.Poss_Tree[key].remove(val)", "def _xtrim(self, lower, upper):\n trm = pd.Series(data=True, index=self._data.index)\n for c in self.index_colnames_all:\n l_limit = np.percentile(self._data[c], 100 * lower)\n u_limit = np.percentile(self._data[c], 100 * upper)\n trm &= self._data[c].apply(lambda x: True if l_limit <= x <= u_limit else False)\n\n return trm", "async def _truncate_adjusted_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][1][:truncate]\n del self.adjusted_close_values[pair][:truncate]", "def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]", "def filter_for_activity(self, window, ssd_thres, minimum_wb):\n data_wb = self.data.copy()\n applyOffsetRemove(data_wb)\n applyFilter(data_wb)\n window = window \n ssd_threshold = ssd_thres\n minimum = minimum_wb\n ranges_ww = runWalkingBoutDetection(\n data_wb,\n ssd_threshold,\n window,\n minimum,\n )\n try:\n segment = ranges_ww[0]\n lower = self.data.loc[segment[0],0]\n upper = self.data.loc[segment[1],0]\n self.data = self.data[(self.data[0]>lower) & (self.data[0]<=upper)]\n except:\n print(\"No movement detected\")", "def _trim_dataset_for_analysis(self,\n low_UMI_count_cutoff: int = 30,\n num_transition_barcodes: Union[int, None] = 7000,\n gene_blacklist: List[int] = []):\n\n logging.info(\"Trimming dataset for inference.\")\n\n # Get data matrix and barcode order that sorts barcodes by UMI count.\n matrix = self.data['matrix']\n umi_counts = np.array(matrix.sum(axis=1)).squeeze()\n umi_count_order = np.argsort(umi_counts)[::-1]\n\n # Initially set the default to be the whole dataset.\n self.analyzed_barcode_inds = np.arange(start=0, stop=matrix.shape[0])\n self.analyzed_gene_inds = np.arange(start=0, stop=matrix.shape[1])\n\n # Expected cells must not exceed nonzero count barcodes.\n num_nonzero_barcodes = np.sum(umi_counts > 0).item()\n n_cells = min(self.priors['n_cells'], num_nonzero_barcodes)\n\n try:\n\n # Choose which genes to use based on their having nonzero counts.\n # (All barcodes must be included so that inference can generalize.)\n gene_counts_per_barcode = np.array(matrix.sum(axis=0)).squeeze()\n self.analyzed_gene_inds = np.where(gene_counts_per_barcode\n > 0)[0].astype(dtype=int)\n\n if len(gene_blacklist) > 0:\n\n # Ensure genes on the blacklist are excluded.\n self.analyzed_gene_inds = np.array([g for g in\n self.analyzed_gene_inds\n if g not in gene_blacklist])\n\n except IndexError:\n logging.warning(\"Something went wrong trying to trim genes.\")\n\n # Estimate priors on cell size and 'empty' droplet size.\n self.priors['cell_counts'], self.priors['empty_counts'] = \\\n get_d_priors_from_dataset(self) # After gene trimming\n\n # If running the simple model, just use the expected cells, no more.\n if self.model_name == \"simple\":\n\n self.analyzed_barcode_inds = np.array(umi_count_order[:n_cells],\n dtype=int)\n\n # If not using the simple model, include empty droplets.\n else:\n\n try:\n\n # Get the cell barcodes.\n cell_barcodes = umi_count_order[:n_cells]\n\n # Set the low UMI count cutoff to be the greater of either\n # the user input value, or an empirically-derived value.\n empirical_low_UMI = int(self.priors['empty_counts'] * 0.8)\n low_UMI_count_cutoff = max(low_UMI_count_cutoff,\n empirical_low_UMI)\n logging.info(f\"Excluding barcodes with counts below \"\n f\"{low_UMI_count_cutoff}\")\n\n # See how many barcodes there are to work with total.\n num_barcodes_above_umi_cutoff = \\\n np.sum(umi_counts > low_UMI_count_cutoff).item()\n\n # Get a number of transition-region barcodes.\n num = min(num_transition_barcodes,\n num_barcodes_above_umi_cutoff - cell_barcodes.size)\n num = max(0, num)\n transition_barcodes = umi_count_order[n_cells:\n (n_cells + num)]\n\n # Use the cell barcodes and transition barcodes for analysis.\n self.analyzed_barcode_inds = np.concatenate((\n cell_barcodes,\n transition_barcodes)).astype(dtype=int)\n\n # Identify probable empty droplet barcodes.\n if num < num_transition_barcodes:\n\n # This means we already used all the barcodes.\n empty_droplet_barcodes = np.array([])\n\n else:\n\n # Decide which empty barcodes to include.\n empty_droplet_sorted_barcode_inds = \\\n np.arange(n_cells + num, num_barcodes_above_umi_cutoff,\n dtype=int) # The entire range\n # empty_droplet_sorted_barcode_inds = \\\n # np.arange(n_cells + num,\n # min(num_barcodes_above_umi_cutoff\n # - cell_barcodes.size - num,\n # n_cells + num + num_empty_droplets),\n # dtype=int)\n empty_droplet_barcodes = \\\n umi_count_order[empty_droplet_sorted_barcode_inds]\n\n self.empty_barcode_inds = empty_droplet_barcodes.astype(dtype=int)\n\n logging.info(f\"Using {cell_barcodes.size} probable cell barcodes, \"\n f\"plus an additional {transition_barcodes.size} barcodes, \"\n f\"and {empty_droplet_barcodes.size} empty droplets.\")\n\n except IndexError:\n logging.warning(\"Something went wrong trying to trim barcodes.\")\n\n self.is_trimmed = True", "def _trimTime(time,data,tStart,tStop):\t\n\tif tStart is None:\n\t\tiStart=0;\n\t\tiStop=len(time);\n\telse:\n\t\t# determine indices of cutoff regions\n\t\tiStart=_process.findNearest(time,tStart); # index of lower cutoff\n\t\tiStop=_process.findNearest(time,tStop);\t # index of higher cutoff\n\t\t\n\t# trim time\n\ttime=time[iStart:iStop];\n\t\n\t# trim data\n\tif type(data) is not list:\n\t\tdata=[data];\n\tfor i in range(0,len(data)):\n\t\tdata[i]=data[i][iStart:iStop];\n\t\t\n\treturn time, data", "def trim(self, start, end):", "def trim(self):\n while np.any(self.vertex_valance <= 1):\n edge_to_keep = np.all(self.vertex_valance[self.edges] > 1,\n axis=1).tolist();\n self.raw_wires.filter_edges(edge_to_keep);\n vertex_to_keep = [len(self.get_vertex_neighbors(i)) > 0 for i in\n range(self.num_vertices)];\n self.raw_wires.filter_vertices(vertex_to_keep);\n\n self.__initialize_wires();\n if len(self.vertices) == 0:\n raise RuntimeError(\"Zero vertices left after trimming.\");", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n self.data = self.data[self.data['status'] <= max_status]\n\n return", "async def _truncate_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][0][:truncate]\n del self.close_values[pair][:truncate]\n del self.close_times[pair][:truncate]", "def bpfilter(\n data: numpy.ndarray, short_min: float = 2, long_min: float = 12\n) -> Optional[numpy.ndarray]:\n return butter_bandpass_filter(\n data, 1 / (long_min * 60), 1 / (short_min * 60), 1 / DATA_RATE\n )", "def clean_data(cube, max_value, min_value):\n\n data_clean = numpy.where(cube.data < min_value, min_value, cube.data) \n data_clean = numpy.where(data_clean > max_value, max_value, data_clean)\n \n return data_clean", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)", "def trim_support(\n dict_, data, logit, bins=25, trim=True, reestimate_p=False, show_output=False\n):\n # Find common support\n prop_score = data[\"prop_score\"]\n common_support = _define_common_support(dict_, data, bins, show_output)\n\n # Trim the data. Recommended.\n if trim is True:\n # data, prop_score = trim_data(prop_score, common_support, data)\n data = data[\n (data.prop_score >= common_support[0])\n & (data.prop_score <= common_support[1])\n ]\n prop_score = prop_score[\n (prop_score >= common_support[0]) & (prop_score <= common_support[1])\n ]\n\n # Optional. Not recommended\n # Re-estimate baseline propensity score on the trimmed sample\n if reestimate_p is True:\n # Re-estimate the parameters of the decision equation based\n # on the new trimmed data set\n data = estimate_treatment_propensity(dict_, data, logit, show_output)\n\n else:\n pass\n else:\n pass\n\n data = data.sort_values(by=\"prop_score\", ascending=True)\n prop_score = prop_score.sort_values(axis=0, ascending=True)\n X = data[dict_[\"TREATED\"][\"order\"]]\n Y = data[[dict_[\"ESTIMATION\"][\"dependent\"]]]\n\n return X, Y, prop_score", "def filter_binaries_beamprofile(bin_arr, beamprofile, cutoff=0.75, dilate=0):\r\n bp_bool = beamprofile < cutoff * beamprofile.max()\r\n out_binary = np.empty_like(bin_arr, dtype=int)\r\n total_cells = 0\r\n removed_cells = 0\r\n\r\n for i, img in enumerate(bin_arr):\r\n labeled, n = mh.labeled.label(img)\r\n total_cells += n\r\n for l in np.unique(labeled)[1:]:\r\n selected_binary = multi_dilate(labeled == l, dilate)\r\n if np.any(np.logical_and(selected_binary, bp_bool)): # Cell lies outside of\r\n labeled[labeled == l] = 0\r\n removed_cells += 1\r\n out_binary[i] = labeled\r\n print('Removed {} cells out of a total of {} cells.'.format(removed_cells, total_cells))\r\n return out_binary", "def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)" ]
[ "0.59564203", "0.59462726", "0.5891383", "0.5773936", "0.5772101", "0.5748919", "0.57270765", "0.57121384", "0.56339175", "0.5611969", "0.5601741", "0.55495125", "0.55369914", "0.5513442", "0.5478489", "0.54771066", "0.5410769", "0.54082984", "0.53611666", "0.53212947", "0.53141534", "0.5303362", "0.5302313", "0.52990764", "0.5288101", "0.52729225", "0.52674246", "0.524713", "0.5238523", "0.52238077" ]
0.634952
0
Return gpu(i) if exists, otherwise return cpu().
def try_gpu(i=0): if torch.cuda.device_count() >= i + 1: return torch.device(f'cuda:{i}') return torch.device('cpu')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')", "def get_device(i=0):\n if torch.cuda.is_available():\n return torch.device(\"cuda:%d\" % i)\n else:\n return torch.device(\"cpu\")", "def try_gpu(x):\n global _GPUS_EXIST\n\n if _GPUS_EXIST:\n try:\n return x.cuda()\n except (AssertionError, RuntimeError):\n # actually, GPUs don't exist\n print 'No GPUs detected. Sticking with CPUs.'\n _GPUS_EXIST = False\n return x\n else:\n return x", "def try_gpu(x):\n global _GPUS_EXIST\n if _GPUS_EXIST:\n try:\n return x.cuda()\n except (AssertionError, RuntimeError):\n print('No GPUs detected. Sticking with CPUs.')\n _GPUS_EXIST = False\n return x", "def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)", "def try_gpu():\n try:\n ctx = mx.gpu()\n _ = nd.array([0], ctx=ctx)\n except:\n ctx = mx.cpu()\n return ctx", "def try_gpu():\n try:\n ctx = mx.gpu()\n _ = nd.array([0], ctx=ctx)\n except:\n ctx = mx.cpu()\n return ctx", "def try_gpu():\r\n try:\r\n ctx = mx.gpu()\r\n _ = nd.array([0], ctx=ctx)\r\n except mx.base.MXNetError:\r\n ctx = mx.cpu()\r\n return ctx", "def detect_gpus():\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n if is_tensorflow_version_higher_or_equal('1.8.0'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n else:\n devices = list(device_lib.list_local_devices())\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()", "def return_free_GPU():\r\n if torch.cuda.is_available():\r\n gpu_num = torch.cuda.device_count()\r\n device = torch.device('cuda:{}'.format(gpu_num-1))\r\n print('Using GPU:[{}]/[{}] for training...'.format(gpu_num-1,gpu_num-1))\r\n return device\r\n \r\n raise ValueError('GPU not available for training. Check CUDA env with function \"check_cuda_env\"')", "def cuda_if_gpu(T):\n\n return T.cuda() if use_cuda else T", "def get_cpu(self):\n pass", "def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')", "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def GetGPU():\n return option['device_id']", "def get_device():\n import torch\n\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def device():\n return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")", "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def try_all_gpus(): #@save\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def maybe_cuda(t):\n if torch.cuda.is_available():\n return t\n return t", "def __getGPUthread(self, interactive, force) :\n #first try to import Reikna\n try :\n import reikna as rk \n except ModuleNotFoundError :\n if force: raise\n self.logger.warningglobal(\"Reikna isn't installed. Please install with 'pip install reikna' to use GPU devices.\")\n return None\n #create an API\n #try :\n # api = rk.cluda.cuda_api()\n #except Exception :\n # logger.info('CUDA-based GPU API not available, will try to get one based on OpenCL instead.')\n # try :\n # api = rk.cluda.ocl_api()\n # except Exception :\n # logger.warningglobal('WARNING: Failed to create an OpenCL API, no GPU computation will be available!!')\n # return None\n try :\n api = rk.cluda.ocl_api()\n #return a thread from the API\n return api.Thread.create(interactive=interactive)\n except Exception :\n if force: raise\n self.logger.warningglobal('Failed to create an OpenCL API, no GPU computation will be available!!')\n return None", "def try_all_gpus():\n ctx_list = []\n try:\n for i in range(16):\n ctx = mx.gpu(i)\n _ = nd.array([0], ctx=ctx)\n ctx_list.append(ctx)\n except:\n pass\n if not ctx_list:\n ctx_list = [mx.cpu()]\n return ctx_list", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def get_cpu_function(host, cpu):\n for s in range(0, len(host.nodes)):\n functions = host.cpu_functions[s]\n for f in CORE_FUNCTIONS:\n if cpu.cpu in functions[f]:\n return f\n return constants.NO_FUNCTION", "def create_cpu():\n return CPU()", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")" ]
[ "0.7673299", "0.7155733", "0.6743398", "0.67328984", "0.6628977", "0.66274136", "0.66274136", "0.65953624", "0.65533084", "0.64042103", "0.6377482", "0.621364", "0.6204196", "0.6093279", "0.6086354", "0.60763216", "0.6047976", "0.6047976", "0.60436594", "0.60353875", "0.59832215", "0.5977356", "0.59742266", "0.5947903", "0.5940012", "0.59112024", "0.58978623", "0.5895874", "0.5893198", "0.5829759" ]
0.78513676
1
Parse input arguments. A config file is required ncdc forces downloading of new NCDC data
def parse_args(): parser = ArgumentParser() parser.add_argument("config", help="Path to config file") parser.add_argument("-ncdc", "--download-ncdc", action="store_true", dest="d_ncdc", help="Download new NCDC data (overwrites existing)") arguments = parser.parse_args() return arguments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run NCF..\")\n parser.add_argument(\n \"--config_file\",\n nargs=\"?\",\n type=str,\n default=\"../configs/ncf_default.json\",\n help=\"Specify the config file name. Only accept a file from ../configs/\",\n )\n # If the following settings are specified with command line,\n # These settings will used to update the parameters received from the config file.\n parser.add_argument(\n \"--dataset\",\n nargs=\"?\",\n type=str,\n help=\"Options are: tafeng, dunnhunmby and instacart\",\n )\n parser.add_argument(\n \"--data_split\",\n nargs=\"?\",\n type=str,\n help=\"Options are: leave_one_out and temporal\",\n )\n parser.add_argument(\n \"--root_dir\", nargs=\"?\", type=str, help=\"working directory\",\n )\n parser.add_argument(\n \"--emb_dim\", nargs=\"?\", type=int, help=\"Dimension of the embedding.\"\n )\n parser.add_argument(\"--lr\", nargs=\"?\", type=float, help=\"Intial learning rate.\")\n parser.add_argument(\"--max_epoch\", nargs=\"?\", type=int, help=\"Number of max epoch.\")\n parser.add_argument(\n \"--batch_size\", nargs=\"?\", type=int, help=\"Batch size for training.\"\n )\n parser.add_argument(\"--optimizer\", nargs=\"?\", type=str, help=\"OPTI\")\n parser.add_argument(\"--activator\", nargs=\"?\", type=str, help=\"activator\")\n parser.add_argument(\"--alpha\", nargs=\"?\", type=float, help=\"ALPHA\")\n return parser.parse_args()", "def parse_config(cmdline_opts):\n cmdline_opts.add_argument(\n '-p', '--port', help='Enter port number', default=8001)\n cmdline_opts.add_argument(\n '--host', help='Enter host name', default='localhost')\n cmdline_opts.add_argument(\n '-c', '--config', help='Enter config file', default='config.json')", "def cl_args(description):\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\"infile\", type=str,\n help=\"input ATL03 file\")\n parser.add_argument(\"track\", type=str,\n help=\"ground track: gt1l,gt1r,gt2l,gt2r,gt3l,gt3r,\")\n parser.add_argument(\"-o\", type=str, default=None,\n help=\"output file root name\")\n parser.add_argument(\"-c\", type=int, default=2,\n help=\"minimum signal confidence to plot (0-4)\"\n \"default is 4 (high)\")\n parser.add_argument(\"-f\", action=\"store_true\",\n help=\"force overwriting output data file\")\n parser.add_argument(\"-p\", action=\"store_true\",\n help=\"plot photon heights\")\n parser.add_argument(\"-v\", action=\"store_true\",\n help=\"increase the output verbosity\")\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def read_arguments(argv):\n\tif argv[0] in ('1', '2'):\n\t\tconos_config['endpoint'] = endpoint[argv[0]]\n\telse:\n\t\tusage()\n\n\tif argv[1] in ('dev', 'test', 'int', 'prod'):\n\t\tconos_config['environment'] = argv[1]\n\t\tconos_config['sts_url'] = eval(argv[1] + '_sts_url')\n\t\tconos_config['aicuu_url'] = eval(argv[1] + '_aicuu_url')\n\telse:\n\t\tusage()\n\n\tif len(argv) == 6:\n\t\tconos_config['number_threads'] = '1'\n\telse:\n\t\tif argv[6] in ('1', '2', '3', '4', '5', '6', '7', '8'):\n\t\t\tconos_config['number_threads'] = argv[6]\n\t\telse:\n\t\t\tusage()\n\n\tconos_config['client_id'] = argv[2]\n\tconos_config['client_secret'] = argv[3]\n\tconos_config['input_file'] = argv[4]\n\tconos_config['output_file'] = argv[5]", "def parse_arguments():\n description = 'Code checkout script for NEMSfv3gfs'\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('--config', action='store', help='name of checkout config', \n default='checkout_nemsfv3gfs.cfg')\n args = parser.parse_args()\n return args.config", "def setup_config(args):\n # Set the default configuration file\n f = pkgutil.get_data(__package__, 'dnstap.conf')\n cfg = load_yaml(f)\n\n # Overwrites then with the external file ? \n if args.c:\n cfg_ext = load_yaml(open(args.c, 'r'))\n merge_cfg(u=cfg_ext,o=cfg)\n\n # Or searches for a file named dnstap.conf in /etc/dnstap_receiver/ \n else:\n etc_conf = \"/etc/dnstap_receiver/dnstap.conf\"\n f = pathlib.Path(etc_conf)\n if f.exists():\n cfg_etc = load_yaml(open(etc_conf, 'r'))\n merge_cfg(u=cfg_etc,o=cfg)\n \n # update default config with command line arguments\n if args.v:\n cfg[\"trace\"][\"verbose\"] = args.v \n if args.u is not None:\n cfg[\"input\"][\"unix-socket\"][\"enable\"] = True\n cfg[\"input\"][\"unix-socket\"][\"path\"] = args.u\n if args.l != DFLT_LISTEN_IP:\n cfg[\"input\"][\"tcp-socket\"][\"local-address\"] = args.l\n if args.l != DFLT_LISTEN_PORT:\n cfg[\"input\"][\"tcp-socket\"][\"local-port\"] = args.p\n\n return cfg", "def config_argparser(parser):\n add_usual_input_args(parser)\n parser.add_argument('--edges', action='store_true',\n help='First/last dialogues only')\n parser.set_defaults(func=main)", "def parsing_arguments(args=None):\n description = ''\n parser = argparse.ArgumentParser(\n prog='hatchet plot-cn',\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument('INPUT', help='One or more space-separated files in CN_BBC format')\n parser.add_argument(\n '-n',\n '--patientnames',\n required=False,\n default=config.plot_cn.patientnames,\n type=str,\n help='One or more space-separated patient names (default: inferred from filenames)',\n )\n parser.add_argument(\n '-u',\n '--minu',\n required=False,\n default=config.plot_cn.minu,\n type=float,\n help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"',\n )\n parser.add_argument(\n '-x',\n '--rundir',\n required=False,\n default=config.plot_cn.rundir,\n type=str,\n help='Running directory (default: current directory)',\n )\n parser.add_argument(\n '-b',\n '--baseCN',\n required=False,\n default=config.plot_cn.basecn,\n type=int,\n help='Base copy number (default: inferred from tumor ploidy)',\n )\n parser.add_argument(\n '-sC',\n '--figsizeclones',\n required=False,\n default=config.plot_cn.figsizeclones,\n type=str,\n help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sP',\n '--figsizecn',\n required=False,\n default=config.plot_cn.figsizecn,\n type=str,\n help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sG',\n '--figsizegrid',\n required=False,\n default=config.plot_cn.figsizegrid,\n type=str,\n help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-rC',\n '--resolutionclones',\n required=False,\n default=config.plot_cn.resolutionclones,\n type=int,\n help='Number of bins to merge together for plotting clone profiles (default: 100)\"',\n )\n parser.add_argument(\n '-rP',\n '--resolutioncn',\n required=False,\n default=config.plot_cn.resolutioncn,\n type=int,\n help='Number of bins to merge together for plotting proportions (default: 500)\"',\n )\n parser.add_argument(\n '-rG',\n '--resolutiongrid',\n required=False,\n default=config.plot_cn.resolutiongrid,\n type=int,\n help='Number of bins to merge together in grids (default: 100)\"',\n )\n parser.add_argument(\n '-e',\n '--threshold',\n required=False,\n default=config.plot_cn.threshold,\n type=float,\n help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"',\n )\n parser.add_argument(\n '--ymax',\n required=False,\n default=config.plot_cn.ymax,\n type=int,\n help='Maximum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--ymin',\n required=False,\n default=config.plot_cn.ymin,\n type=int,\n help='Minimum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--clonepalette',\n required=False,\n default=config.plot_cn.clonepalette,\n type=str,\n help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"',\n )\n parser.add_argument(\n '--linkage',\n required=False,\n default=config.plot_cn.linkage,\n type=str,\n help=(\n 'Linkage method used for clustering (default: single, available (single, complete, average, weighted, '\n 'centroid, median, ward) from SciPy)\"'\n ),\n )\n parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error('Please specify at least one sample as input!'))\n if args.patientnames is None:\n patientnames = {fil: os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f: n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error('Multiple patients have the same name but they should unique!'))\n if args.figsizeclones is not None:\n figsizeclones = to_tuple(args.figsizeclones, error_message='Wrong format of figsizeclones!')\n if args.figsizecn is not None:\n figsizecn = to_tuple(args.figsizecn, error_message='Wrong format of figsizecn!')\n if args.figsizegrid is not None:\n figsizegrid = to_tuple(args.figsizegrid, error_message='Wrong format of figsizegrid!')\n\n if not os.path.isdir(args.rundir):\n raise ValueError(error('Running directory does not exist!'))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error('The minimum proportion for subclonal CNAs must be in [0, 1]!'))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error('Base CN must be greater or equal than 2!'))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.threshold < 0:\n raise ValueError(error('Threshold must be positive!'))\n if args.linkage not in {\n 'single',\n 'complete',\n 'average',\n 'weighted',\n 'centroid',\n 'median',\n 'ward',\n }:\n raise ValueError(error('Unknown linkage method!'))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {\n 'input': args.INPUT.split(),\n 'names': patientnames,\n 'rundir': args.rundir,\n 'minu': args.minu,\n 'base': args.baseCN,\n 'clonefigsize': figsizeclones,\n 'propsfigsize': figsizecn,\n 'clusterfigsize': figsizegrid,\n 'profileres': args.resolutionclones,\n 'cnres': args.resolutioncn,\n 'clusterres': args.resolutiongrid,\n 'threshold': args.threshold,\n 'linkage': args.linkage,\n 'ymax': args.ymax,\n 'ymin': args.ymin,\n 'clonepalette': pal,\n }", "def parseArg(self, c):\n\n\t\trocks.app.Application.parseArg(self, c)\n\n\t\tif c[0] in ('--conf', '-c'):\n\t\t\tself.config.setFile(c[1])\n\t\t\tself.config.parse()\n\t\telif c[0] in ('--master',):\n\t\t\tself.masters = [rocks.service411.Master(c[1])]\n\t\telif c[0] in ('--shared',):\n\t\t\tself.shared_filename = c[1]\n\t\telif c[0] in ('--pub',):\n\t\t\tself.pub_filename = c[1]\n\t\telif c[0] == \"--comment\":\n\t\t\tself.comment = c[1]\n\t\telif c[0] == \"--all\":\n\t\t\tself.getall = 1\n\t\telif c[0] in (\"--local\", \"--file\"):\n\t\t\tself.doFile = 1\n\t\telif c[0] in (\"-v\", \"--verbose\"):\n\t\t\tself.verbose += 1", "def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()", "def parse_config(args):\n if args.get('--config-file'):\n config = parse_config_file(args.get('--config-file'))\n else:\n config = {\n 'oauth_token': args['--oauth-token'],\n 'channel_name': args['--channel-name'],\n 'cmd_rate': args['--cmd-rate'],\n 'sleep_rate': args['--sleep-rate'],\n }\n\n return config", "def handleCmdLine(self):\n description = \"Nagios monitoring script to check for open ports\\n\"\n usage = (\"%prog <options>\\n\")\n parser = OptionParser(usage=usage, description=description)\n\n parser.add_option(\"-c\", \"--config\",\n type=\"string\",\n help=\"path to open ports configuration file\")\n parser.add_option(\"-l\", \"--list\",\n type=\"string\",\n help=\"supply list of allowed ports seperated by comma.\")\n\n (self.options, args) = parser.parse_args()", "def ParseArguments():\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--db_file_path\", required = False, help = \"SQLite database filename\", type = str, default = '')\n parser.add_argument(\"--server_url\", required = False, help = \"Server's domain name\", type = str, default = '')\n parser.add_argument(\"--substation_name\", required = False, help = \"Substation name\", type = str, default = '')\n parser.add_argument(\"--net_interfaces\", required = False, help = \"List of network interfaces\", type = str)\n parser.add_argument(\"--no_hostname_check\", required = False, help = \"Disable TLS certificate hostname validation (insecure)\", action = 'store_true')\n parser.add_argument(\"--external_interval\", required = False, help = \"Interval (s) between invocations of external commands\", default = '3')\n parser.add_argument(\"--external_timeout\", required = False, help = \"Maximum time (s) an external command is allowed to run\", default = None)\n parser.add_argument(\"--external_command\", required = False, help = \"Periodically invoke given command to gather external data. Use repeatedly.\", action = 'append', default = [])\n parser.add_argument(\"--uuid\", required = False, help = \"To get UUID or hostname (set by default) as an unique ID.\", action = 'store_true')\n\n argv = parser.parse_args(sys.argv[1:])\n\n config_data = {}\n try:\n if os.path.isfile('config.json'):\n json_data_file = open('config.json', 'r')\n config_data = json.load(json_data_file)\n required_keys = ['db_file_path', 'server_url']\n for key in required_keys:\n if key not in config_data:\n print('config.json file does not contain required key %s' % key)\n sys.exit()\n else:\n print('config.json does not exist in the current directory.')\n sys.exit()\n except IOError:\n sys.exit()\n\n config_data['no_hostname_check'] = argv.no_hostname_check or config_data.get('no_hostname_check', False)\n\n interval = float(argv.external_interval)\n\n if argv.external_timeout is None:\n timeout = interval\n else:\n timeout = float(argv.external_timeout)\n\n if timeout > interval:\n print('Error: External command timeout is longer than interval')\n sys.exit()\n\n config_data['external_interval'] = interval\n config_data['external_timeout'] = timeout\n config_data['external_command'] = argv.external_command\n config_data['substation_name'] = argv.substation_name\n config_data['uuid'] = argv.uuid\n\n if argv.db_file_path == '':\n if not config_data['db_file_path']:\n config_data['db_file_path'] = '/var/local/agent_sqlite3.db'\n print('SQLite3 database file is set by default: /var/local/agent_sqlite3.db')\n else:\n config_data['db_file_path'] = argv.db_file_path\n\n ifs = argv.net_interfaces or config_data.get('net_interfaces', [])\n if isinstance(ifs, str):\n ifs = ifs.split(',')\n config_data['net_interfaces'] = utils.interfaces_to_ip(ifs)\n\n # TODO verify provided URL\n if argv.server_url == '':\n if not config_data['server_url']:\n print('Please, provide server URL as an argument or in config.json file.')\n print('It must be in this format: wss://<domain_name>:443/agent')\n sys.exit()\n else:\n config_data['server_url'] = argv.server_url\n\n\n return config_data", "def parse_args(self, config_file=None, config_dat=None):\n\n global _verbose\n global _print_config\n\n if _debug:\n print(\"parse_args\")\n\n args = self.get_args()\n\n # print(\"arg config_file =\", config_file)\n # print(\"arg config_dat =\", config_dat)\n # print(\"ARGS =\", args)\n\n args_v = vars(args)\n # special case for config_file\n # to avoid a chicken or egg problem we parse config location early\n if 'config_file' in args_v:\n self.config_file = args_v['config_file']\n self.config_data = None\n elif config_dat is not None:\n self.config_data = config_dat\n elif config_file is not None:\n self.config_file = config_file\n else:\n self.config_file = ArpMon.defaults['config_file']\n\n confvalues = self.get_confg()\n\n self.args.update(ArpMon.defaults)\n self.args.update(self.kargs)\n self.args.update(confvalues)\n self.args.update(args_v)\n merged_args = self.args\n # merged_args.update(confvalues)\n # merged_args.update(vars(args))\n print(\"\\nconfvalues\", confvalues)\n print(\"\\nargs\", vars(args))\n print(\"\\nmerged_args\", merged_args)\n\n if 'verbose' in merged_args and merged_args['verbose'] is not None:\n self.verbose = int(merged_args['verbose'])\n _verbose = self.verbose\n\n if 'http_port' in merged_args and merged_args['http_port'] is not None:\n self.http_port = int(merged_args['http_port'])\n\n if 'stat_file' in merged_args:\n self.stat_file = merged_args['stat_file']\n\n if 'target_file' in merged_args:\n self.target_file = merged_args['target_file']\n\n if 'log_dir' in merged_args:\n self.log_dir = merged_args['log_dir']\n\n if 'pid_dir' in merged_args:\n self.pid_dir = merged_args['pid_dir']\n\n if 'iface' in merged_args:\n self.iface = merged_args['iface']\n\n if 'time_away' in merged_args and merged_args['time_away'] is not None:\n self.time_away = int(merged_args['time_away'])\n\n if 'redirect_io' in merged_args:\n self.redirect_io = bool(merged_args['redirect_io'])\n\n if 'pconf' in merged_args:\n _print_config = merged_args['pconf']\n\n if 'time_sleep' in merged_args and merged_args['time_sleep'] is not None:\n self.time_sleep = int(merged_args['time_sleep'])\n\n if 'time_recheck' in merged_args and merged_args['time_recheck'] is not None:\n self.time_recheck = int(merged_args['time_recheck'])\n\n # if upload_config and self.config_file is None:\n # print(\"upload option require have config file option\")\n # sys.exit()\n\n #\n # calc other settings\n #\n if self.time_away is None:\n self.time_away = TIME_AWAY_DEFAULT\n\n if self.time_sleep is None:\n self.time_sleep = int(self.time_away/3)\n\n if self.time_recheck is None:\n self.time_recheck = int(self.time_away/2) - 10\n\n # if self.time_var_refresh is None:\n # self.time_var_refresh = int(self.time_away * 4) + 10\n\n if self.sniff_timeout is None:\n self.sniff_timeout = SNIFF_TIMEOUT # int(self.time_var_refresh / 3) + 10\n\n Mtargets._verbose = self.verbose\n\n print(\"args\", type(args))\n print(\"args pr\", args)\n print(\"vars args\", vars(args))\n print(\"redirect_io\", self.redirect_io)\n print(\"log_dir\", self.log_dir)\n print(\"pid_dir\", self.pid_dir)\n\n\n # redirect_io=1\n # exit(0)", "def parse_cmd_line_args(args: typing.Optional[typing.List] = None) -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', '-c', dest='cfg', metavar='nvpy.cfg', help='path to config file')\n return parser.parse_args(args)", "def parse_commandline_args():\n\n epilog = \"\"\"\n The configuration file must contained a JSON-encoded map. Example: \"{\"name\":\"foo\"}\".\n \"\"\"\n\n parser = utils.ConnectionArgumentParser(\n description=\"Update config (key/value pairs) on a board\", epilog=epilog\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=\"JSON file to load config from (default stdin)\",\n type=open,\n default=sys.stdin,\n dest=\"file\",\n )\n parser.add_argument(\n \"ids\", metavar=\"DEVICEID\", nargs=\"+\", type=int, help=\"Device IDs to flash\"\n )\n\n return parser.parse_args()", "def parse_command_line_args(args=None):\n description = 'CodaNorm arguments that override default settings'\n parser = argparse.ArgumentParser(description=description)\n \n # argument on which channel to calc\n parser.add_argument(\"-c\", \"--channel\", help=\"Channel to use (N, E, Z).\", \n choices=CHANNELS, nargs='+')\n \n # add option to calc with another coda window length\n parser.add_argument(\"--sd\", type=int, default=0,\n help=\"Specify length of Coda-window (in seconds)\")\n \n # maybe we want to plot anyway\n parser.add_argument('--plot', help='Plot results', action='store_true')\n \n # change station\n parser.add_argument(\"--station\", default=None,\n help=\"Specify station to use\")\n \n # verbose mode\n parser.add_argument(\"-v\", \"--verbose\", help=\"output verbosity\", \n action=\"store_true\")\n \n args = parser.parse_args(args)\n \n return args", "def parse_args():\n parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')\n parser.add_argument('--dataset', help='training dataset')\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n\n parser.add_argument('--load_ckpt', help='path of checkpoint to load')\n parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')\n\n parser.add_argument('--output_dir', help='output directory to save the testing results. If not provided, defaults to [args.load_ckpt|args.load_detectron]/../test.')\n\n parser.add_argument('--set', dest='set_cfgs', help='set config keys, will overwrite config in the cfg_file. See lib/core/config.py for all options', default=[], nargs='*')\n parser.add_argument('--range', help='start (inclusive) and end (exclusive) indices', type=int, nargs=2)\n parser.add_argument('--multi-gpu-testing', help='using multiple gpus for inference', action='store_true')\n parser.add_argument('--vis', dest='vis', help='visualize detections', action='store_true')\n parser.add_argument('--model', help='Set model', type=str)\n\n parser.add_argument('--use_matlab', help='use matlab?', action='store_true')\n parser.add_argument('--early_stop', help='run eval only to 10 images', action='store_true')\n \n\n\n\n return parser.parse_args()", "def _parse_config(self, args, experiment_id):\r\n if not args:\r\n if experiment_id:\r\n config = importlib.import_module('configs.config_' + experiment_id)\r\n args = config.load_config()\r\n else:\r\n raise ValueError('No arguments or configuration data given')\r\n # Mandatory parameters for all architectures\r\n self.network_type = args.net\r\n self.is_training = args.training_mode\r\n self.train_data_file = args.train_data_file\r\n self.valid_data_file = args.valid_data_file\r\n self.test_data_file = args.test_data_file\r\n self.checkpoint_dir = args.checkpoint_dir\r\n self.trainlog_dir = args.trainlog_dir\r\n self.lr = args.lr\r\n self.batch_size = args.batch_size\r\n self.num_epochs = args.num_epochs\r\n self.loss_type = args.loss\r\n self.accuracy_type = args.accuracy\r\n self.optimizer = args.optimizer\r\n self.dropout = args.dropout\r\n self.gpu_load = args.gpu_load\r\n self.num_filters = args.num_filters\r\n self.nonlin = args.nonlin\r\n self.loss_type = args.loss\r\n self.task_type = args.task_type\r\n self.long_summary = args.long_summary\r\n self.experiment_path = args.experiment_path\r\n self.chpnt2load = args.chpnt2load\r\n self.lr_mode = args.lr_mode\r\n\r\n if not self.is_training:\r\n self.class_labels = args.class_labels\r\n if args.image_size:\r\n self.img_size = args.image_size\r\n else:\r\n self.img_size = None\r\n if args.num_classes:\r\n self.num_classes = args.num_classes\r\n else:\r\n self.num_classes = None\r\n if args.augmentation:\r\n self.augmentation_dict = args.augmentation\r\n else:\r\n self.augmentation_dict = None\r\n if args.normalize:\r\n self.normalize = args.normalize\r\n else:\r\n self.normalize = None\r\n if args.zero_center:\r\n self.zero_center = args.zero_center\r\n else:\r\n self.zero_center = None\r\n\r\n\r\n self._initialize_data()", "def parse_args(argv):\n\n parser = argparse.ArgumentParser(description='Fetch the requested report from App Nexus and save it to file system.')\n\n parser.add_argument('report_request', help='Path to JSON file that contains the report request.')\n parser.add_argument('-c', '--config', help='Path to JSON file that contains the keys \"api_endpoint\", \"user\" and \"pass\". If this parameter is not given, env vars APPNEXUS_API_ENDPOINT, APPNEXUS_USER and APPNEXUS_PASS must be set.')\n parser.add_argument('-d', '--save_dir', default='', help='The directory to save the report CSV. Default is current directory.')\n parser.add_argument('-I', '--no-interaction', action='store_true', help='Whether to ask confirmation before fetching report.')\n parser.add_argument('-s', '--start', help='Value for \"start_date\" parameter of report request.')\n parser.add_argument('-e', '--end', help='Value for \"end_date\" parameter of report request.')\n parser.add_argument('-i', '--interval', help='Value for \"report_interval\" parameter of report request.')\n parser.add_argument('--debug', action='store_true', help='Whether to print extra debug information or not.')\n\n args = parser.parse_args(argv[1:])\n\n\n if args.config:\n args.config = json.load(open(args.config, 'r'))\n\n elif (\n os.environ.get('APPNEXUS_API_ENDPOINT') and\n os.environ.get('APPNEXUS_USER') and\n os.environ.get('APPNEXUS_PASS')\n ):\n args.config = {\n 'api_endpoint': os.environ['APPNEXUS_API_ENDPOINT'],\n 'user' : os.environ['APPNEXUS_USER'],\n 'pass' : os.environ['APPNEXUS_PASS']\n }\n\n else:\n print \"\"\"\n You must either provide a --config parameter or\n set the env vars APPNEXUS_API_ENDPOINT, APPNEXUS_USER and APPNEXUS_PASS!\n Call this script with the --help option for more information.\n \"\"\"\n\n sys.exit(1)\n\n\n return args", "def parse_args(args):\n assert os.path.isfile(args.data_path), \"The specified data file does not exist.\"\n assert os.path.isfile(args.model_path), \"The specified model file does not exist.\"\n\n if args.read_batches is not False:\n if args.read_batches.lower() in (\"y\", \"yes\", \"1\", \"\", \"true\", \"t\"):\n args.read_batches = True\n else:\n args.read_batches = False", "def crnn_cfg(self):\r\n try:\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--model_path', type=str, default='')\r\n parser.add_argument('--data_path', type=str, default='')\r\n parser.add_argument('--gpus', type=int, nargs='*', default=[0])\r\n parser.add_argument('--characters', type=str, default='0123456789' + string.ascii_lowercase + '-')\r\n parser.add_argument('--label_len', type=int, default=16)\r\n parser.add_argument('--nb_channels', type=int, default=1)\r\n parser.add_argument('--width', type=int, default=200)\r\n parser.add_argument('--height', type=int, default=31)\r\n parser.add_argument('--model', type=str, default='CRNN_STN', choices=['CRNN_STN', 'CRNN'])\r\n parser.add_argument('--conv_filter_size', type=int, nargs=7, default=[64, 128, 256, 256, 512, 512, 512])\r\n parser.add_argument('--lstm_nb_units', type=int, nargs=2, default=[128, 128])\r\n parser.add_argument('--timesteps', type=int, default=50)\r\n parser.add_argument('--dropout_rate', type=float, default=0.25)\r\n\r\n return parser.parse_args()\r\n except:\r\n print('Error in method {0} in module {1}'.format('crnn_cfg', 'crnn_bridge.py'))\r\n return None", "def main(args):\n\n with open(args.cfg_fn, 'r') as cfg_fd:\n config = cfg_fd.read().split(\"\\n\")\n\n with open(args.opt_fn, 'r') as opt_fd:\n for oline in opt_fd:\n option, value = oline.strip().split(\"=\")\n\n conf_addition = \"%s=%s\" % (option, value)\n added = False\n for line_nr, line in enumerate(config):\n if \"# %s is not set\" % option in line or \\\n \"%s=\" % option in line:\n config[line_nr] = conf_addition\n added = True\n break\n\n if not added:\n config.append(conf_addition)\n\n with open(args.cfg_fn, 'w') as cfg_fd:\n cfg_fd.write(\"\\n\".join(config))", "def analysis_config():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-g\", \"--ground-truth\", type=str, default=\"./BRUTE_FORCE_near_duplicates.tsv\")\n parser.add_argument(\"-p\", \"--pred\", type=str, default=\"./PRED_near_duplicates.tsv\")\n\n return parser.parse_args()", "def readConfigFile(self):\n self.config_obj = ConfigParser.ConfigParser()\n self.config_obj.readfp(open(self.configfile))\n\n # Set the log file\n if (not self.args_obj.log_file and self.config_obj.has_option('DEFAULT','logfile')):\n self.logfile = self.config_obj.get('DEFAULT', 'logfile')\n\n # Set the baud rate\n if (not self.args_obj.baud_rate and self.config_obj.has_option('DEFAULT','baud')):\n self.baudrate = self.config_obj.get('DEFAULT', 'baud')\n\n # Set the device port \n if (not self.args_obj.device and self.config_obj.has_option('DEFAULT','device')):\n self.device = self.config_obj.get('DEFAULT', 'device')\n\n # Set the connection timeout\n if (not self.args_obj.timeout and self.config_obj.has_option('DEFAULT','timeout')):\n self.timeout = self.config_obj.get('DEFAULT','timeout')\n\n if DEBUG:\n print('(DEBUG) Config Options:')\n self.pp.pprint(self.config_obj.sections())", "def parse_arguments():\n custom_config = config.read()\n arguments = docopt(__doc__, version='Montanus %s' % __version__)\n logger.debug(custom_config)\n conf_file = arguments.get('--with-conf')\n if conf_file is not None:\n conf_config = config.read(conf_file)\n\n for (k, v) in conf_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(arguments)\n command_config = {\n 'templates_path': arguments.get('<templates_path>'),\n 'static_files_path': arguments.get('--with-static-files-path') \\\n if arguments.get('-with-static-files-path') is not None \\\n else arguments.get('<templates_path>'),\n 'delete_source': arguments.get('--delete'),\n 'protocol': arguments.get('--with-protocol'),\n 'domains': arguments.get('--with-domains').split(',') \\\n if arguments.get('--with-domains') is not None \\\n else None,\n 'md5_len': int(arguments.get('--with-md5-len')),\n 'md5_concat_by': arguments.get('--with-md5-concat-by')\n }\n logger.debug(command_config)\n\n for (k, v) in command_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(custom_config)\n return DictWrapper(custom_config)", "def parse_args():\n\n parser = argparse.ArgumentParser(description='Quant Robot Server')\n\n parser.add_argument('--timeframe', '-t', default = 'M',\n help='datas timeframe: Y-year, W-week, D-day, H-hour, M-minute, T-tick')\n\n parser.add_argument('--data0', \n default='data/AgTD_1D_20061030_20180427.csv',\n help='1st data into the system')\n\n parser.add_argument('--data1',\n default='data/AgLD_1D_20150204_20180427.csv',\n help='2nd data into the system')\n\n parser.add_argument('--fromdate', '-f',\n default='2015-02-04',\n help='Starting date in YYYY-MM-DD format')\n\n parser.add_argument('--enddate', '-e',\n default='2018-04-27',\n help='Starting date in YYYY-MM-DD format')\n\n parser.add_argument('--runnext', action='store_true',\n help='Use next by next instead of runonce')\n\n parser.add_argument('--nopreload', action='store_true',\n help='Do not preload the data')\n\n parser.add_argument('--oldsync', action='store_true',\n help='Use old data synchronization method')\n\n parser.add_argument('--printout', default=False, action='store_true',\n help='Print out the price stdmean and Transactions. ')\n\n parser.add_argument('--plot', '-p', default=False, action='store_true',\n help='Plot the read data')\n\n parser.add_argument('--numfigs', '-n', default=1,\n help='Plot using numfigs figures')\n\n parser.add_argument('--signalfile', default='',\n help='output the datas ,spread , signals to file.')\n\n parser.add_argument('--tradefile', default='',\n help='output the tradeinfo to file')\n\n parser.add_argument('--orderfile', default='',\n help='output the orderinfo to file')\n\n parser.add_argument('--analysfile', default='',\n help='output the analysis report to file')\n\n #------------------broker and trade parameters----------------------# \n parser.add_argument('--cash', default=100000, type=int,\n help='Starting Cash')\n\n parser.add_argument('--fixed_tradecash', default=False, action='store_true',\n help='固定交易资金量为初始资金,不随盈利和亏损波动. ')\n\n parser.add_argument('--comm', default=2.5, type=float,\n help='fixed commission (2.5')\n\n parser.add_argument('--stake', default=10, type=int,\n help='Stake to apply in each operation')\n\n parser.add_argument('--coc', default=False, action='store_true',\n help='cheat on close, use close[0] to trading.')\n\n parser.add_argument('--ewmmode', action='store_true', default=False,\n help='打开 EWM模式,使用指数移动平均线(expma)方法计算均值和标准差. ')\n\n parser.add_argument('--afamode', action='store_true', default=False,\n help='打开 AFA模式,使用自适应平均线(auto fit average)方法计算均值和标准差. ')\n\n parser.add_argument('--riskfreerate', default=0.04, type=float,\n help='risk free rate (0.04 or 4%')\n\n #------------------strategy parameters----------------------# \n\n parser.add_argument('--fixedzscore', action='store_true', default=False,\n help='Fixed the mean and stddev in bought datetime. ')\n\n parser.add_argument('--period', default=30, type=int,\n help='Period to apply to the Simple Moving Average')\n\n parser.add_argument('--cuscore_limit', default=5600, type=int,\n help='cuscore的阈值,用于判断cuscore是否已经偏离趋势')\n\n parser.add_argument('--cuscore_close', action='store_true', default=False,\n help='关闭cuscore的阈值止损,不再判断cuscore是否已经偏离趋势,不再CUSCORE止损')\n\n parser.add_argument('--adj_period', default=30, type=int,\n help='Period to adjust the mean and stddev')\n\n parser.add_argument('--fixedslope', default=0.0, type=float,\n help='if fixedslope equal 0 then PAIR FACTOR will use the OLS.slope. ')\n\n parser.add_argument('--intercept', default=0.0, type=float,\n help='intercept will effective if fixedslope not equal 0 ')\n\n parser.add_argument('--stop_limit', default=2500, type=float,\n help='upper stop limit. default is none.')\n\n parser.add_argument('--signal_limit', '-u', default=2, type=float,\n help='upper limit. default is 2 * spread_mean.')\n\n parser.add_argument('--medium_limit', default=0.1, type=float,\n help='upper medium. default is 0.1 * spread_mean.')\n\n parser.add_argument('--time_stop', default=30, type=int,\n help='time to stop. default is 30 days.')\n\n return parser.parse_args()", "def parse() -> Namespace:\n parser = ArgumentParser()\n parser.add_argument(\n \"--config\",\n \"-c\",\n default=\"qwauto.cfg\",\n help=\"Config file. Defaults to qwauto.cfg.\",\n )\n return parser.parse_args()", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing" ]
[ "0.6683603", "0.6602869", "0.64225274", "0.64112264", "0.64049214", "0.63174736", "0.6263098", "0.6236142", "0.6228275", "0.6186609", "0.6181876", "0.6176194", "0.6171366", "0.6159623", "0.61590225", "0.61305803", "0.6121058", "0.6115244", "0.6084246", "0.60457355", "0.603711", "0.60223705", "0.5990375", "0.59665865", "0.59409565", "0.59400296", "0.5927233", "0.58675086", "0.58514917", "0.58479494" ]
0.78507113
0
This function takes a segmentation mask as input and fills the inpainted vertebra wherever not filled (i.e. not vertebra_id everywhere inside).
def fill_vert(self, mask): im_floodfill = np.copy(mask) im_floodfill[im_floodfill!=self.vertebra_id] = 0 im_floodfill[im_floodfill==self.vertebra_id] = 255 im_floodfill_copy = np.copy(im_floodfill) # The size needs to be 2 pixels larger than the image. h, w = im_floodfill.shape[:2] mask4mask = np.zeros((h+2, w+2), np.uint8) # Floodfill from point (0, 0) cv2.floodFill(im_floodfill, mask4mask, (0,0), 255) # Invert floodfilled image im_floodfill_inv = cv2.bitwise_not(im_floodfill) # Combine the two images to get the foreground. im_floodfill_inv = im_floodfill_inv | im_floodfill_copy im_floodfill_inv[im_floodfill_inv==255] = self.vertebra_id mask_filled = mask | im_floodfill_inv return mask_filled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def fill_blind_pores(im):\n holes = find_disconnected_voxels(im)\n im[holes] = False\n return im", "def inpaint(self, img_slice, mask_slice, min_x, max_x, min_y, max_y, views='lateral'):\n # create binary mask\n mask = np.zeros(img_slice.shape)\n mask[min_x:max_x, min_y:max_y] = 1\n # keep a copy of original to have background later \n img_orig = np.copy(img_slice)\n mask_binary = np.copy(mask)\n\n # rotate image if coronal\n if views=='coronal':\n img_slice = np.rot90(img_slice, axes=(1, 0)) # image is from lat,ax -> ax,lat\n mask_slice = np.rot90(mask_slice, axes=(1, 0))\n mask = np.rot90(mask, axes=(1, 0))\n \n # prepare binary mask for net\n mask = cv2.resize(mask, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask = torch.Tensor(mask) # gives dtype float32\n mask = mask.unsqueeze(0)\n mask = mask.unsqueeze(0)\n\n # prepare seg mask for net\n mask_slice[mask_slice==self.vertebra_id] = 0\n # resize to network size\n mask_seg = cv2.resize(mask_slice, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask_seg = np.uint8(np.round(mask_seg)) # just to be sure\n\n mask_seg = self.map_vert_to_class(mask_seg)\n mask_seg = torch.Tensor(mask_seg) # gives dtype float32\n mask_seg_one_hot = torch.nn.functional.one_hot(mask_seg.long(), num_classes=6)\n mask_seg_one_hot = mask_seg_one_hot.permute(2,0,1)\n mask_seg_one_hot = mask_seg_one_hot.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n\n # prepare img for net \n img_slice = cv2.resize(img_slice, self.resize_size)\n img_slice = np.clip(img_slice, -1024, 3071) # clip to HU units\n img_slice = np.uint8(255*(img_slice+1024)/4095) # normalize to range 0-255 \n img_slice = img_slice[:,:, None]\n img_slice = self.toTensor(img_slice)\n img_slice = img_slice.unsqueeze(0)\n corrupt_img = (1-mask)*img_slice\n\n if self.use_cuda:\n mask = mask.cuda()\n mask_seg = mask_seg.cuda()\n corrupt_img = corrupt_img.cuda() \n\n # inpaint\n if views=='lateral':\n netG = self.netGlat\n elif views=='coronal':\n netG = self.netGcor\n\n # get prediction\n with torch.no_grad():\n _, inpainted_mask, inpainted_img = netG(corrupt_img, mask_seg, mask)\n inpainted_mask = self.softmax(inpainted_mask)\n\n #inpainted_mask = torch.argmax(inpainted_mask, dim=1)\n inpainted_img = inpainted_img * mask + corrupt_img * (1. - mask)\n inpainted_mask = inpainted_mask * mask + mask_seg_one_hot * (1. - mask)\n #inpainted_mask = self.map_class_to_vert(inpainted_mask)\n\n # set img back to how it was\n inpainted_img = inpainted_img.squeeze().detach().cpu().numpy()\n inpainted_img = (inpainted_img)*4095 - 1024 # normalize back to HU units \n inpainted_img = cv2.resize(inpainted_img, (self.orig_ax_length, self.orig_ax_length))\n # set mask back\n inpainted_mask = inpainted_mask.squeeze().detach().cpu().numpy()\n inpainted_mask_resized = np.zeros((6, self.orig_ax_length, self.orig_ax_length))\n for i in range(6):\n if views=='coronal':\n inpainted_mask_resized[i,:,:] = np.rot90(cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length))) #, interpolation=cv2.INTER_NEAREST)\n else:\n inpainted_mask_resized[i,:,:] = cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length)) #, interpolation=cv2.INTER_NEAREST)\n inpainted_mask = inpainted_mask_resized\n \n if views=='coronal':\n inpainted_img = np.rot90(inpainted_img) #, axes=(1, 0))\n\n return inpainted_img, inpainted_mask, mask_binary", "def clean_mask(mask, background=0):\n kernels = [\n np.array([[ 1, -1, -1], [-1, 1, -1], [-1, -1, -1]]), # top left standalone pixel\n np.array([[-1, -1, 1], [-1, 1, -1], [-1, -1, -1]]), # top right standalone pixel\n np.array([[-1, -1, -1], [-1, 1, -1], [ 1, -1, -1]]), # bottom left standalone pixel\n np.array([[-1, -1, -1], [-1, 1, -1], [-1, -1, 1]]) # bottom right standalone pixel\n ]\n\n proc_masks = [cv2.morphologyEx(mask, cv2.MORPH_HITMISS, kernel).astype(np.bool) for kernel in kernels]\n\n for proc_mask in proc_masks:\n mask[proc_mask] = background\n return mask", "def segment_cells(frame, mask=None):\n \n blurred = filters.gaussian(frame, 2)\n ridges = enhance_ridges(frame)\n \n # threshold ridge image\n thresh = filters.threshold_otsu(ridges)\n thresh_factor = 0.5\n prominent_ridges = ridges > thresh_factor*thresh\n prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256)\n prominent_ridges = morphology.binary_closing(prominent_ridges)\n prominent_ridges = morphology.binary_dilation(prominent_ridges)\n \n # skeletonize\n ridge_skeleton = morphology.medial_axis(prominent_ridges)\n ridge_skeleton = morphology.binary_dilation(ridge_skeleton)\n ridge_skeleton *= mask\n ridge_skeleton = np.bitwise_xor(ridge_skeleton, mask)\n \n # label\n cell_label_im = measure.label(ridge_skeleton)\n \n # morphological closing to fill in the cracks\n for cell_num in range(1, cell_label_im.max()+1):\n cell_mask = cell_label_im==cell_num\n cell_mask = morphology.binary_closing(cell_mask, disk(3))\n cell_label_im[cell_mask] = cell_num\n \n return cell_label_im", "def __mask_region(self, img, vertices):\n\n mask = np.zeros_like(img) \n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n cv2.fillConvexPoly(mask, vertices, ignore_mask_color)\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers", "def mask(self):\n\n mask = np.zeros(shape=(self._info.height, self._info.width), dtype=np.uint8)\n\n self.draw(image=mask, color=constants.COLOR_WHITE_MONO)\n\n mask_with_border = np.pad(mask, 1, 'constant', constant_values=255)\n\n cv2.floodFill(image=mask,\n mask=mask_with_border,\n seedPoint=(int(self.middle_point[0]), int(self.middle_point[1])),\n newVal=constants.COLOR_WHITE_MONO)\n\n return mask", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def fill_sat_holes (data_mask, mask_value):\n\n value_sat = mask_value['saturated']\n value_satcon = mask_value['saturated-connected']\n mask_satcon = ((data_mask & value_sat == value_sat) |\n (data_mask & value_satcon == value_satcon))\n struct = np.ones((3,3), dtype=bool)\n mask_satcon = ndimage.binary_closing(mask_satcon, structure=struct)\n mask_satcon = ndimage.binary_fill_holes(mask_satcon, structure=struct)\n mask_satcon2add = (mask_satcon & (data_mask==0))\n data_mask[mask_satcon2add] = value_satcon", "def get_masked_scene(orig, mask, local_context_size = 80, dilation=False):\n orig_scene = orig.copy()\n mask_scene = mask.copy()\n orig_scene_no_mask = orig.copy()\n \n mask_info = np.where(mask_scene == 0) \n min_x = max(min(mask_info[0]) - local_context_size, 0)\n max_x = max(mask_info[0]) + local_context_size\n min_y = max(min(mask_info[1]) - local_context_size, 0)\n max_y = max(mask_info[1]) + local_context_size\n \n orig_scene = orig_scene[min_x:max_x,min_y:max_y]\n orig_scene_no_mask = orig_scene_no_mask[min_x:max_x,min_y:max_y]\n mask_scene = mask_scene[min_x:max_x,min_y:max_y]\n \n dialation_mask = np.zeros(mask_scene.shape) + 255\n \n if dilation:\n dialation_mask = cv2.dilate(255-mask_scene, np.ones((local_context_size,local_context_size)))\n \n #implot(dialation_mask)\n #plt.imshow(dialation_mask, 'gray')\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x, y] == 0:\n orig_scene[x, y, :] = 0\n orig_scene_no_mask[x,y,:] = 0\n if dilation:\n if dialation_mask[x,y] == 0:\n orig_scene[x, y, :] = 0\n \n return orig_scene, mask_scene, orig_scene_no_mask, dialation_mask", "def mask(self, byclass, height, width, margin=0, figsize=(10, 10), dpi=180):\n # make ~binary mask using available classes\n style = {cls: ('k', '-') for cls in byclass}\n fig = Figure(figsize=figsize)\n fig.tight_layout(pad=0)\n fig.subplots_adjust(hspace=0, wspace=0, left=0, right=1, bottom=0, top=1)\n canvas = FigureCanvas(fig)\n ax = fig.subplots(1, 1)\n self.show_style(ax, style, byclass)\n ax.set_xlim(0 - margin, height + margin)\n ax.set_ylim(0 - margin, width + margin)\n canvas.draw()\n mask = self.figure_buffer(fig, dpi=dpi)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n # fill in the gaps via:\n # https://www.learnopencv.com/filling-holes-in-an-image-using-opencv-python-c/\n _, thresholded = cv2.threshold(mask, 220, 255, cv2.THRESH_BINARY_INV);\n floodfilled = thresholded.copy()\n h, w = thresholded.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(floodfilled, mask, (0, 0), 255);\n mask = cv2.bitwise_not(thresholded | cv2.bitwise_not(floodfilled))\n return mask", "def unmold_mask(mask, bbox, image_shape):\n threshold = 0.5\n y1, x1, y2, x2 = bbox\n mask = scipy.misc.imresize(\n mask, (y2 - y1, x2 - x1), interp='bilinear').astype(np.float32) / 255.0\n mask = np.where(mask >= threshold, 1, 0).astype(np.uint8)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2], dtype=np.uint8)\n full_mask[y1:y2, x1:x2] = mask\n return full_mask", "def mask_border(self, left=3, right=3, top=3, bottom=3):\n self.MaskPrefix = 'b' + self.MaskPrefix #prepend 'b' for border\n print('Masking edge pixels: left={0}, right={1}, top={2}, bottom={3}'.format(left,right,top,bottom))\n for ig in self.Set:\n igram = self.load_ma(ig)\n igram[:top,:] = ma.masked\n igram[-bottom:,:] = ma.masked\n igram[:,:left] = ma.masked\n igram[:,-right:] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n print('mask_border() complete: {0} interferograms'.format(self.Set.Nig))", "def make_lungmask(img, display=False):\n row_size= img.shape[0]\n col_size = img.shape[1]\n \n mean = np.mean(img)\n std = np.std(img)\n img = img-mean\n img = img/std\n\n # uses hounsfield values near lungs to normalize images\n\n middle = img[int(col_size/5):int(col_size/5*4),int(row_size/5):int(row_size/5*4)] \n mean = np.mean(middle) \n max = np.max(img)\n min = np.min(img)\n img[img==max]=mean\n img[img==min]=mean\n \n # uses kmeans to separate foreground (soft tissue / bone) and background (lung/air)\n\n kmeans = KMeans(n_clusters=2).fit(np.reshape(middle,[np.prod(middle.shape),1]))\n centers = sorted(kmeans.cluster_centers_.flatten())\n threshold = np.mean(centers)\n thresh_img = np.where(img<threshold,1.0,0.0)\n\n # performs erosion and dilation\n\n eroded = morphology.erosion(thresh_img,np.ones([3,3]))\n dilation = morphology.dilation(eroded,np.ones([8,8]))\n\n labels = measure.label(dilation) # Different labels are displayed in different colors\n label_vals = np.unique(labels)\n regions = measure.regionprops(labels)\n good_labels = []\n for prop in regions:\n B = prop.bbox\n if B[2]-B[0]<row_size/10*9 and B[3]-B[1]<col_size/10*9 and B[0]>row_size/5 and B[2]<col_size/5*4:\n good_labels.append(prop.label)\n mask = np.ndarray([row_size,col_size],dtype=np.int8)\n mask[:] = 0\n\n # makes mask\n\n for N in good_labels:\n mask = mask + np.where(labels==N,1,0)\n mask = morphology.dilation(mask,np.ones([10,10])) # one last dilation\n final = mask * img\n \n # shows and saves output\n\n plt.imshow(final)\n im = Image.fromarray(final*128)\n im = im.convert(\"L\")\n im.save(S)\n \n return", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def map_class_to_vert(self, inpainted_mask):\n inpainted_mask[inpainted_mask==1] = self.vertebra_range[0]\n inpainted_mask[inpainted_mask==2] = self.vertebra_range[1]\n inpainted_mask[inpainted_mask==3] = self.vertebra_range[2]\n inpainted_mask[inpainted_mask==4] = self.vertebra_range[3]\n inpainted_mask[inpainted_mask==5] = self.vertebra_range[4]\n return inpainted_mask", "def set_region(img, vertices):\n mask = np.zeros_like(img)\n channel_count = img.shape[2]\n match_mask_color = (255,) * channel_count\n cv2.fillPoly(mask, vertices, match_mask_color)\n\n masked_img = cv2.bitwise_and(img, mask)\n\n new_mask = np.zeros(masked_img.shape[:2], np.uint8)\n\n bg = np.ones_like(masked_img, np.uint8) * 255\n cv2.bitwise_not(bg, bg, mask=new_mask)\n\n return masked_img", "def applymask(self,mask):\n self.spec[mask==0]=np.nan", "def unmold_mask(mask, bbox, image_shape):\n threshold = 0.5\n y1, x1, y2, x2 = bbox\n mask = resize(mask, (y2 - y1, x2 - x1))\n mask = np.where(mask >= threshold, 1, 0).astype(np.bool)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2], dtype=np.bool)\n full_mask[y1:y2, x1:x2] = mask\n return full_mask", "def mask_creation(image, mask_path, image_index):\n # convert image to hsv color space\n image = cv2.imread(image)\n \n im_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(im_hsv)\n\n # compute the mean value of hue, saturation and value for the border of the image\n hue_mean_border = (np.mean(h[0, :]) + np.mean(h[:, 0]) + np.mean(h[-1, :]) + np.mean(h[:, -1]))/4\n saturation_mean_border = (np.mean(s[0, :]) + np.mean(s[:, 0]) + np.mean(s[-1, :]) + np.mean(s[:, -1]))/4\n value_mean_border = (np.mean(v[0, :]) + np.mean(v[:, 0]) + np.mean(v[-1, :]) + np.mean(v[:, -1]))/4\n\n # compute lower and upper limits for the mask\n # we need to find the good limits to segment the background by color\n lower_hue = (hue_mean_border - 40)\n upper_hue = (hue_mean_border + 40)\n lower_saturation = (saturation_mean_border - 20)\n upper_saturation = (saturation_mean_border + 20)\n lower_value = (value_mean_border - 200)\n upper_value = (value_mean_border + 200)\n\n lower_limit = np.array([lower_hue, lower_saturation, lower_value])\n upper_limit = np.array([upper_hue, upper_saturation, upper_value])\n\n # create mask\n mask = cv2.inRange(im_hsv, lower_limit, upper_limit)\n mask = cv2.bitwise_not(mask)\n\n # resize masks\n n_mask, m_mask = mask.shape[0], mask.shape[1]\n mask = cv2.resize(mask, (1000, 1000)) \n\n # apply mask to find contours\n mask = np.uint8(mask)\n \n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # create new mask with the contours found\n mask_contours = cv2.fillPoly(mask, contours, [255, 255, 255])\n\n # Apply morphological filter to clean\n kernel = np.ones((9, 9), np.float32)/25\n mask_erode = cv2.morphologyEx(mask_contours, cv2.MORPH_ERODE, kernel, iterations = 1)\n mask_dilate = cv2.morphologyEx(mask_erode, cv2.MORPH_DILATE, kernel, iterations = 1)\n\n # resize masks to original size\n new_mask = cv2.resize(mask_dilate, (m_mask, n_mask))\n\n # save mask image inside the same folder as the image\n # cv2.imwrite(mask_path + str(image_index).zfill(2) + \"_mask.png\", new_mask)\n\n return new_mask", "def mask_particle_rect(image,bounded_rectangle, color):\r\n\r\n \r\n masked_image = cv2.rectangle(image, (int(bounded_rectangle[0])-1, int(bounded_rectangle[1])-1),\r\n (int(bounded_rectangle[0]+bounded_rectangle[2])+1, \r\n int(bounded_rectangle[1]+bounded_rectangle[3])+1),\r\n color, -1)\r\n\r\n return masked_image", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def map_vert_to_class(self, mask_seg):\n mask_seg[mask_seg==self.vertebra_range[0]] = 1\n mask_seg[mask_seg==self.vertebra_range[1]] = 2\n mask_seg[mask_seg==self.vertebra_range[2]] = 3\n mask_seg[mask_seg==self.vertebra_range[3]] = 4\n mask_seg[mask_seg==self.vertebra_range[4]] = 5\n vert_values=np.arange(1,6)\n found_vert = np.in1d(mask_seg, vert_values)\n found_vert = np.reshape(found_vert, mask_seg.shape)\n mask_seg[found_vert==False] = 0\n return mask_seg", "def fuse_images_without_background(img, img_to_insert, img_to_insert_segmentation_mask, box):\r\n x1, y1, x2, y2 = box\r\n # Take the patch of the original image that will suffer modification\r\n original_img_patch = img[y1:y2, x1:x2]\r\n # Extract a boolean mask containing the background\r\n background_mask = resize(image=img_to_insert_segmentation_mask, output_shape=original_img_patch.shape[:2],preserve_range=True,\r\n anti_aliasing=False).astype(np.bool)\r\n # Paste the non background part of img_to_insert in this patch\r\n original_img_patch[background_mask] = img_to_insert[background_mask]\r\n # Put again the modified patch into img\r\n img[y1:y2, x1:x2] = original_img_patch\r\n return img", "def get_regions_mask(self, input):", "def fillNDIGap(self, knn):\n nirrho = self.readImage(self.nirfile, self.rhoband).astype(np.float_)\n nirnhits = self.readImage(self.nirfile, self.nhitsband).astype(np.int)\n nirmask = self.readImage(self.nirfile, self.maskband).astype(np.bool_)\n \n swirrho = self.readImage(self.swirfile, self.rhoband).astype(np.float_)\n swirnhits = self.readImage(self.swirfile, self.nhitsband).astype(np.int)\n swirmask = self.readImage(self.swirfile, self.maskband).astype(np.bool_)\n\n hitmask = np.logical_and(np.greater(nirnhits, 0), np.greater(swirnhits, 0))\n if not hitmask.any():\n # no valid hit at all!\n print \"Error, no shot has returns! Check your data\"\n sys.exit()\n xhit, yhit = np.where(hitmask)\n nirrhohit = nirrho[hitmask]/nirnhits[hitmask]\n swirrhohit = swirrho[hitmask]/swirnhits[hitmask]\n\n ndi = np.zeros_like(nirrho)\n mask = np.zeros_like(nirrho, dtype=int) + 3\n tmpflag = np.logical_and(np.invert(nirmask), np.invert(swirmask))\n mask[tmpflag] = 0\n \n ndihit = (nirrhohit - swirrhohit) / (nirrhohit + swirrhohit)\n ndi[hitmask] = ndihit\n mask[hitmask] = 1\n \n nirgapmask = np.logical_and(np.equal(nirnhits, 0), np.greater(swirnhits, 0))\n swirgapmask = np.logical_and(np.greater(nirnhits, 0), np.equal(swirnhits, 0))\n\n if (not nirgapmask.any()) and (not swirgapmask.any()):\n # no gap\n print \"No fillable gap.\"\n return ndi, mask\n\n gapmask = np.logical_or(nirgapmask, swirgapmask)\n xgap, ygap = np.where(gapmask)\n\n X = np.hstack((xhit.reshape(len(xhit), 1), yhit.reshape(len(yhit), 1))).astype(np.float32)\n T = np.hstack((xgap.reshape(len(xgap), 1), ygap.reshape(len(ygap), 1))).astype(np.float32)\n ndigap = self.fillGap(X, ndihit, T, knn)\n ndi[gapmask] = ndigap\n mask[gapmask] = 2\n\n self.ndi = ndi\n self.mask = mask\n \n return ndi, mask", "def mask_frame(image, mask):\n # For segmentation mask display\n mask[mask == 1] = 2\n mask[mask == 0] = 1\n\n # segmentation will display final output\n segmentation = image\n segmentation = cv2.cvtColor(segmentation, cv2.COLOR_BGR2RGB)\n\n segmentation[:, :, 0] = segmentation[:, :, 0] * mask\n segmentation[:, :, 1] = segmentation[:, :, 1] * mask\n return segmentation", "def generate_effective_mask(self, mask_size: tuple, polygons_ignore):\n mask = np.ones(mask_size, dtype=np.uint8)\n\n for poly in polygons_ignore:\n instance = poly.astype(np.int32).reshape(1, -1, 2)\n cv2.fillPoly(mask, instance, 0)\n\n return mask", "def fill_region(image,mask,value=1):\n\tim = image.copy().ravel()\n\tif image.ndim > 2:\n\t\tim_h, im_w, im_ch = image.shape\n\telse:\n\t\tim_ch = 1\n\t\tim_h, im_w = self.image.shape\n\t# linear indices of masked pixels\n\tind = masked_indices(mask)\n\tfor i in ind:\n\t\tfor ch in range(im_ch):\n\t\t\tim.data[i*im_ch+ch] = value\n\treturn im.reshape(image.shape)" ]
[ "0.62132704", "0.61124694", "0.6104057", "0.59376204", "0.5899385", "0.5842893", "0.5798474", "0.57937765", "0.57837266", "0.577448", "0.57716745", "0.57418275", "0.57412785", "0.5726708", "0.57038915", "0.56873864", "0.56623226", "0.5640787", "0.56124526", "0.56116915", "0.5609737", "0.5595169", "0.5593159", "0.55788964", "0.55724376", "0.5570057", "0.55618", "0.5561316", "0.554134", "0.55392325" ]
0.6978054
0
This function inpaints a 2D image and mask
def inpaint(self, img_slice, mask_slice, min_x, max_x, min_y, max_y, views='lateral'): # create binary mask mask = np.zeros(img_slice.shape) mask[min_x:max_x, min_y:max_y] = 1 # keep a copy of original to have background later img_orig = np.copy(img_slice) mask_binary = np.copy(mask) # rotate image if coronal if views=='coronal': img_slice = np.rot90(img_slice, axes=(1, 0)) # image is from lat,ax -> ax,lat mask_slice = np.rot90(mask_slice, axes=(1, 0)) mask = np.rot90(mask, axes=(1, 0)) # prepare binary mask for net mask = cv2.resize(mask, self.resize_size, interpolation=cv2.INTER_NEAREST) mask = torch.Tensor(mask) # gives dtype float32 mask = mask.unsqueeze(0) mask = mask.unsqueeze(0) # prepare seg mask for net mask_slice[mask_slice==self.vertebra_id] = 0 # resize to network size mask_seg = cv2.resize(mask_slice, self.resize_size, interpolation=cv2.INTER_NEAREST) mask_seg = np.uint8(np.round(mask_seg)) # just to be sure mask_seg = self.map_vert_to_class(mask_seg) mask_seg = torch.Tensor(mask_seg) # gives dtype float32 mask_seg_one_hot = torch.nn.functional.one_hot(mask_seg.long(), num_classes=6) mask_seg_one_hot = mask_seg_one_hot.permute(2,0,1) mask_seg_one_hot = mask_seg_one_hot.unsqueeze(0) mask_seg = mask_seg.unsqueeze(0) mask_seg = mask_seg.unsqueeze(0) # prepare img for net img_slice = cv2.resize(img_slice, self.resize_size) img_slice = np.clip(img_slice, -1024, 3071) # clip to HU units img_slice = np.uint8(255*(img_slice+1024)/4095) # normalize to range 0-255 img_slice = img_slice[:,:, None] img_slice = self.toTensor(img_slice) img_slice = img_slice.unsqueeze(0) corrupt_img = (1-mask)*img_slice if self.use_cuda: mask = mask.cuda() mask_seg = mask_seg.cuda() corrupt_img = corrupt_img.cuda() # inpaint if views=='lateral': netG = self.netGlat elif views=='coronal': netG = self.netGcor # get prediction with torch.no_grad(): _, inpainted_mask, inpainted_img = netG(corrupt_img, mask_seg, mask) inpainted_mask = self.softmax(inpainted_mask) #inpainted_mask = torch.argmax(inpainted_mask, dim=1) inpainted_img = inpainted_img * mask + corrupt_img * (1. - mask) inpainted_mask = inpainted_mask * mask + mask_seg_one_hot * (1. - mask) #inpainted_mask = self.map_class_to_vert(inpainted_mask) # set img back to how it was inpainted_img = inpainted_img.squeeze().detach().cpu().numpy() inpainted_img = (inpainted_img)*4095 - 1024 # normalize back to HU units inpainted_img = cv2.resize(inpainted_img, (self.orig_ax_length, self.orig_ax_length)) # set mask back inpainted_mask = inpainted_mask.squeeze().detach().cpu().numpy() inpainted_mask_resized = np.zeros((6, self.orig_ax_length, self.orig_ax_length)) for i in range(6): if views=='coronal': inpainted_mask_resized[i,:,:] = np.rot90(cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length))) #, interpolation=cv2.INTER_NEAREST) else: inpainted_mask_resized[i,:,:] = cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length)) #, interpolation=cv2.INTER_NEAREST) inpainted_mask = inpainted_mask_resized if views=='coronal': inpainted_img = np.rot90(inpainted_img) #, axes=(1, 0)) return inpainted_img, inpainted_mask, mask_binary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def show_mask(image, mask): \n plt.subplot(1,2,1)\n plt.title('image')\n plt.imshow(image)\n plt.subplot(1,2,2)\n plt.title('mask')\n plt.imshow(mask)\n plt.show()", "def overlay(image, mask):\n if len(image.shape) == 3:\n image = image[:, :, 0]\n if len(mask.shape) == 3:\n mask = mask[:, :, 0]\n if np.amax(image) > 100:\n image = image / 255\n\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def Inpainting_mosaic(im, mask):\n im0 = np.copy(im) # Lager kopi av bilde\n im[im < 0] = 0 # klipp til lovlige verdier\n im[im > 1] = 1\n for i in range(25): # Itererer \n im = eksplisitt(im, n=1) # Løser ved hjelp av eksplisittfunksjon\n im[np.logical_not(mask)] = im0[np.logical_not(mask)] \n return im", "def draw_mask(self, image, ship, dims):\n # Get the center x, y and the size s\n x, y, s, r, m = dims\n\n #Load files\n if ship == 'cruiseship':\n ma_path = 'ships/cruiseship_isolated_mask.png'\n id = 1\n elif ship == 'tanker':\n ma_path = 'ships/tanker_isolated_mask.png'\n id = 2\n\n #Transforming mask\n mask = cv2.imread(ma_path)\n mask_transformed = self.transform(mask.copy(), dims)\n mask_shape = np.shape(mask_transformed)\n s_x = int((mask_shape[0]+0.5)//2)\n s_y = int((mask_shape[1]+0.5)//2)\n mask_transformed = mask_transformed[0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[0],\n 0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[1],\n :]\n\n mask_transformed_th = self.threshold(mask_transformed)\n\n #Adding mask to image\n image[x-s_x:x+s_x, y-s_y:y+s_y, :] = id/255*mask_transformed_th\n\n return image", "def mask_image(image):\n pass", "def overlay_mask(img, mask, transparency=0.5):\n im_over = np.ndarray(img.shape)\n im_over[:, :, 0] = (1 - mask[:, :, 0]) * img[:, :, 0] + mask[:, :, 0] * (\n 255 * transparency + (1 - transparency) * img[:, :, 0])\n im_over[:, :, 1] = (1 - mask[:, :, 1]) * img[:, :, 1] + mask[:, :, 1] * (\n 255 * transparency + (1 - transparency) * img[:, :, 1])\n im_over[:, :, 2] = (1 - mask[:, :, 2]) * img[:, :, 2] + mask[:, :, 2] * (\n 255 * transparency + (1 - transparency) * img[:, :, 2])\n return im_over", "def flip_inpainting(input_nchw: torch.Tensor,\n mask_n1hw: torch.Tensor,\n logger: Optional[logging.Logger] = None) -> torch.Tensor:\n # Error handling for missing or wrong parameters\n # Initialize logger, if not provided.\n if not logger:\n logger = logging.getLogger(__name__)\n\n if input_nchw.is_floating_point():\n raise TypeError('Tensor must be of integer data type.')\n\n sanity_checks.verify_batch_size(input_nchw, mask_n1hw)\n\n inpainted_img_rgb_nchw: torch.Tensor = torch.zeros(0,\n dtype=torch.float,\n device=DEVICE)\n batch_size: int = utils.get_batch_size(input_nchw=input_nchw)\n\n # Loop over all images and masks in batch\n for batch_index in range(batch_size):\n mask_1hw: torch.Tensor = mask_n1hw[batch_index].to(device=DEVICE)\n input_chw: torch.Tensor = input_nchw[batch_index].to(device=DEVICE)\n\n logger.debug(\"Mask %s will flip a total of %s elements in image.\",\n batch_index,\n mask_1hw.count_nonzero().item())\n\n # Reduce number of channels in mask from 3 to 1.\n mask_arr_hw1: numpy.ndarray = tensor_to_opencv_inpainting(\n img_rgb_chw=mask_1hw, grayscale=True)\n img_bgr_hwc: numpy.ndarray = tensor_to_opencv_inpainting(\n img_rgb_chw=input_chw)\n\n inpainted_img_bgr_hwc: numpy.ndarray = cv2.inpaint(img_bgr_hwc,\n mask_arr_hw1,\n 3,\n cv2.INPAINT_TELEA)\n\n # Convert back inpainted image to tensor\n inpainted_img_rgb_chw: torch.Tensor = opencv_to_tensor(\n img_bgr_hwc=inpainted_img_bgr_hwc).to(device=DEVICE)\n\n # Simulate batch by adding a new dimension using unsqueeze(0)\n # Concatenate inpainted image with the rest of the batch of inpainted images.\n # Shape of inpainted_img_rgb_nchw: (batch_size, 3, height, width)\n inpainted_img_rgb_nchw = torch.cat(\n (inpainted_img_rgb_nchw, inpainted_img_rgb_chw.unsqueeze(0))).to(device=DEVICE)\n\n return inpainted_img_rgb_nchw", "def __init__(self, g_impath, f_impath):\n self.image_g = cv2.imread(g_impath)\n assert self.image_g is not None\n if f_impath is None:\n self.image_f = self.image_g\n else:\n self.image_f = cv2.imread(f_impath)\n assert self.image_f is not None\n self.f_path = f_impath\n self.g_path = g_impath\n self.mask = np.zeros_like(self.image_g)\n self.draw = False\n self.size = 5\n self.image_g_reset = self.image_g.copy()\n self.image_f_reset = self.image_f.copy()\n self.mask_reset = self.mask.copy()\n self.original_mask_copy = np.zeros(self.image_f.shape)\n self.window_name = \"Draw mask: s-save; r:reset; q:quit; l:larger painter; m:smaller painter\"\n self.window_name_move = \"Move mask: s-save; r:reset; q:quit;\"\n self.to_move = False\n self.move=False\n self.x0 = 0\n self.y0 = 0\n self.is_first = True\n self.xi = 0\n self.yi = 0", "def paintMask(self):\n if self.avatarConfiguration[\"mask\"]:\n if not os.path.isfile(MASK_UPLOAD):\n image = self.parent.getPlayer().getImageLabel()\n filePath = GG.genteguada.GenteGuada.getInstance().getDataPath(image)\n guiobjects.generateImageSize(filePath, [244, 244], IMG_UPLOAD)\n self.generateMask(\"imgUpload.png\")\n imgPath = MASK_UPLOAD\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\"))\n self.newAvatarImage(imgPath, \"mask\")", "def on_draw_over_image(self):", "def apply_mask_to_image(img, mask):\n img_size = img.shape[0]\n mask = cv2.resize(mask, dsize=(img_size, img_size))\n\n # Find contour of the mask\n imgray = mask\n ret,thresh = cv2.threshold(imgray, 127, 255, 0)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw contours on image\n segmented_img = cv2.drawContours(img, contours, -1, (0,255,0), 3)\n\n return segmented_img", "def vis_mask(img, mask,width,height, col, alpha=0.4, show_border=True, border_thick= -1):\n\n img = img.astype(np.float32)\n idx = np.nonzero(mask)\n #np.PredictionBoxes(col)\n img[idx[0], idx[1], :] *= 1.0 - alpha\n img[idx[0], idx[1], :] += alpha * (400/255.0)\n\n if show_border:\n _, contours, _ = cv2.findContours(\n mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n cv2.drawContours(img, contours, -1,col, border_thick, cv2.LINE_AA)\n #cv2.drawContours(c, contours, -1, 1, border_thick, cv2.LINE_AA)\n\n return img.astype(np.uint8)", "def draw_mask(img, percentage_x=100, percentage_y=100, offset_x=0, offset_y=0, rotation=0, rectangle=True):\n ydim, xdim = img.shape\n mask = np.zeros((ydim, xdim))\n\n # Convert percentages to fractions\n offset_x = (xdim * offset_x/100)\n offset_y = (ydim * offset_y/100)\n percentage_x = percentage_x/100\n percentage_y = percentage_y/100\n\n if rectangle is False:\n x_rad = np.floor((img.shape[1]/2) * percentage_x)\n y_rad = np.floor((img.shape[0]/2) * percentage_y)\n\n x_center = img.shape[1]//2 + offset_x\n y_center = img.shape[0]//2 - offset_y\n\n\n [x, y] = draw.ellipse(y_center, x_center, y_rad, x_rad, shape = img.shape, rotation=rotation)\n\n else:\n ysub = ydim * (1 - percentage_y)\n y1 = max(ysub/2 - offset_y, 0)\n y2 = min(ydim - ysub/2 - offset_y, ydim)\n r_coords = np.array([y1, y1, y2, y2, y1])\n\n xsub = xdim * (1 - percentage_x)\n x1 = max(xsub/2 + offset_x,0)\n x2 = min(xdim - xsub/2 + offset_x, xdim)\n c_coords = np.array([x1, x2, x2, x1, x1])\n\n x, y = draw.polygon(r_coords, c_coords)\n\n mask[x, y] = 1\n\n return(mask)", "def test(shape=(1000,2000)):\n mask = Mask()\n mask.addCircle(400,300,250)\n mask.subtractCircle(400,300,150)\n mask.addRectangle(350,250,1500,700)\n plt.imshow( mask.getMask(shape) )\n return mask", "def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers", "def blending_example1():\n pic_desert = read_image(relpath(\"./externals/pic_desert.jpg\"), 2)\n pic_pool = read_image(relpath(\"./externals/pic_swim.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_desert.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n print(pic_desert.shape[2])\n [R1, G1, B1] = np.dsplit(pic_desert, pic_desert.shape[2])\n [R2, G2, B2] = np.dsplit(pic_pool, pic_pool.shape[2])\n R1 = np.reshape(R1, (512,1024))\n R2 = np.reshape(R2, (512,1024))\n G1 = np.reshape(G1, (512,1024))\n G2 = np.reshape(G2, (512,1024))\n B1 = np.reshape(B1, (512,1024))\n B2 = np.reshape(B2, (512,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_desert)\n ax2.imshow(pic_pool)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_desert, pic_pool, mask, new_pic", "def show_image(self, mask=np.ones((32, 32), dtype=bool)):\n image = np.copy(self.__image)\n image[~mask] = 0\n plt.imshow(image, aspect=\"auto\")\n plt.show()", "def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")", "def on_draw_over_backgroundimage(self):", "def inpaint(self):\n\n self._validate_inputs()\n self._initialize_attributes()\n\n start_time = time.time()\n keep_going = True\n while keep_going:\n self._find_front()\n print(self.front.shape)\n #imwrite('front.jpg',self.front)\n if self.plot_progress:\n self._plot_image()\n\n self._update_priority()\n\n target_pixel = self._find_highest_priority_pixel()\n find_start_time = time.time()\n source_patch = self._find_source_patch(target_pixel)\n #print('Time to find best: %f seconds'\n #% (time.time()-find_start_time))\n\n self._update_image(target_pixel, source_patch)\n\n keep_going = not self._finished()\n\n print('Took %f seconds to complete' % (time.time() - start_time))\n return self.working_image", "def show_holes_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n radius=9\n out_image = img.copy()\n out_image = cv2.cvtColor(out_image, cv2.COLOR_GRAY2RGB)\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n y_center = (dy.start + dy.stop - 1)/2 \n center=(x_center,y_center)\n cv2.circle(out_image, center, radius,(111,17,108),thickness=2)\n\n plt.figure()\n plt.imshow(out_image)\n plt.autoscale(False)\n return out_image", "def img_process(fgMask):\n backSub = cv.createBackgroundSubtractorKNN()\n kernel1 = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(2,2))\n kernel2 = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(2,2))\n #kernel1 = np.ones((3,3),np.uint8)\n #kernel2 = np.ones((3,3), np.uint8)\n\n fgMask = cv.threshold(fgMask, 230, 255, cv.THRESH_BINARY)[1]\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_OPEN, kernel1,iterations = 2)\n fgMask = cv.dilate(fgMask, kernel2, iterations = 2)\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_CLOSE, kernel2, iterations = 2)\n return fgMask", "def imshow_overlay(im, mask, alpha=0.5, color='red', **kwargs):\n mask = mask > 0\n mask = ma.masked_where(~mask, mask) \n plt.imshow(im, **kwargs)\n plt.imshow(mask, alpha=alpha, cmap=ListedColormap([color]))", "def __mask_region(self, img, vertices):\n\n mask = np.zeros_like(img) \n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n cv2.fillConvexPoly(mask, vertices, ignore_mask_color)\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]", "def get_mask(self, img):\n raise NotImplementedError()", "def mask_show(image, mask, groups, name=\"image\"):\n img = cv2.addWeighted(image, 0.4, mask, 0.6, 0)\n img = sg.mark_boundaries(img, groups, color=(1,1,1))\n cv2.imshow(name, img)\n cv2.waitKey(0)", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def draw_mask(image_shape, geometry, antialias=False):\n import celiagg\n image = numpy.zeros(image_shape, dtype=numpy.uint8, order='F')\n # NB celiagg uses (h, w) C-order convention for image shapes, so give it the transpose\n canvas = celiagg.CanvasG8(image.T)\n state = celiagg.GraphicsState(drawing_mode=celiagg.DrawingMode.DrawFill, anti_aliased=antialias)\n fill = celiagg.SolidPaint(1,1,1)\n transform = celiagg.Transform()\n canvas.draw_shape(geometry, transform, state, fill=fill)\n return image" ]
[ "0.7301254", "0.6654888", "0.6625699", "0.63060254", "0.6295693", "0.62804675", "0.6265031", "0.62536275", "0.6225253", "0.62194", "0.6191099", "0.61354077", "0.6087833", "0.6080922", "0.60753363", "0.6053279", "0.603352", "0.6032018", "0.6021544", "0.60159034", "0.6003691", "0.5980015", "0.5974111", "0.59459347", "0.58890116", "0.5880498", "0.58711445", "0.5801452", "0.5769561", "0.5767248" ]
0.7023641
1
The reverse operation of map_vert_to_class. Here the mask includes labels 15 which are mapped back to the original labels in the scan
def map_class_to_vert(self, inpainted_mask): inpainted_mask[inpainted_mask==1] = self.vertebra_range[0] inpainted_mask[inpainted_mask==2] = self.vertebra_range[1] inpainted_mask[inpainted_mask==3] = self.vertebra_range[2] inpainted_mask[inpainted_mask==4] = self.vertebra_range[3] inpainted_mask[inpainted_mask==5] = self.vertebra_range[4] return inpainted_mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_vert_to_class(self, mask_seg):\n mask_seg[mask_seg==self.vertebra_range[0]] = 1\n mask_seg[mask_seg==self.vertebra_range[1]] = 2\n mask_seg[mask_seg==self.vertebra_range[2]] = 3\n mask_seg[mask_seg==self.vertebra_range[3]] = 4\n mask_seg[mask_seg==self.vertebra_range[4]] = 5\n vert_values=np.arange(1,6)\n found_vert = np.in1d(mask_seg, vert_values)\n found_vert = np.reshape(found_vert, mask_seg.shape)\n mask_seg[found_vert==False] = 0\n return mask_seg", "def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)", "def change_class_labels(classes):\n u,indices=np.unique(classes,return_inverse=True)\n return u,indices", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def label_to_mask(labels):\n # get the image size\n h, w = labels.shape\n\n # build a color to label map\n idx_to_color = {}\n for label in class_info:\n idx_to_color[class_info[label].id] = class_info[label].color\n\n # generate label matrix\n mask = np.zeros((h, w, 3), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n id = labels[y, x]\n r, g, b = idx_to_color[id]\n mask[y, x] = np.array([b, g, r])\n\n return mask", "def decode_labels(mask, num_classes=41):\n h, w = mask.shape\n outputs = np.zeros((h, w, 3), dtype=np.uint8)\n\n img = Image.new('RGB',(len(mask[0]), len(mask)))\n pixels = img.load()\n for j_, j in enumerate(mask):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs = np.array(img)\n return outputs", "def decode_segmap(label_mask, num_classes):\n label_colours = get_capsicum_labels()\n\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, num_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)).astype(np.uint8)\n # rgb[:, :, 0] = r / 255.0\n # rgb[:, :, 1] = g / 255.0\n # rgb[:, :, 2] = b / 255.0\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb", "def mask_label_into_class_label(self, mask_labels, img_resolution = 256, bigger_than_percent=3.0):\n array_of_number_of_change_pixels = []\n\n for mask in mask_labels:\n number_of_ones = np.count_nonzero(mask.flatten()) # << loading takes care of this 0 vs non-zero\n array_of_number_of_change_pixels.append(number_of_ones)\n\n self.debugger.save_arr(array_of_number_of_change_pixels, \"BALANCING\")\n array_of_number_of_change_pixels = self.debugger.load_arr(\"BALANCING\")\n\n array_of_number_of_change_pixels = array_of_number_of_change_pixels / (\n img_resolution * img_resolution) * 100.0 # percentage of image changed\n\n class_labels = []\n for value in array_of_number_of_change_pixels:\n is_change = value > bigger_than_percent\n class_labels.append(int(is_change))\n\n return np.array(class_labels)", "def _labels_to_one_hot_class(self, labels, mask):\n classes = tf.one_hot(\n tf.cast(labels, tf.int32), self._num_classes, dtype=tf.float32)\n return tf.where(tf.expand_dims(mask, axis=-1), classes, 0.0)", "def compute_mask_class(y_true: Tensor):\n y_true = y_true.detach().cpu()\n batch_size = y_true.size(0)\n num_classes = y_true.size(1)\n if num_classes == 1:\n y_true = y_true.view(batch_size, -1)\n elif num_classes == 2:\n y_true = y_true[:, 1, ...].contiguous().view(batch_size, -1) # Take salt class\n else:\n raise ValueError('Unknown num_classes')\n\n img_area = float(y_true.size(1))\n percentage = y_true.sum(dim=1) / img_area\n class_index = (percentage * 4).round().byte()\n return class_index", "def compute_mask_class(y_true: Tensor):\n batch_size = y_true.size(0)\n num_classes = y_true.size(1)\n if num_classes == 1:\n y_true = y_true.view(batch_size, -1)\n elif num_classes == 2:\n y_true = y_true[:, 1, ...].contiguous().view(batch_size, -1) # Take salt class\n else:\n raise ValueError('Unknown num_classes')\n\n img_area = float(y_true.size(1))\n percentage = y_true.sum(dim=1) / img_area\n class_index = (percentage * 4).round().byte()\n return class_index", "def mask_to_label(mask):\n # get the image size\n h, w, _ = mask.shape\n\n # build a color to label map\n color_to_idx = {}\n for label in class_info:\n color_to_idx[class_info[label].color] = class_info[label].id\n\n # generate label matrix\n label = np.zeros((h, w), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n b, g, r = mask[y, x]\n color = (r, g, b)\n label[y, x] = color_to_idx[color]\n\n return label", "def mask_classes(outputs: torch.Tensor, dataset: ContinualDataset, k: int) -> None:\n outputs[:, 0:k * dataset.N_CLASSES_PER_TASK] = -float('inf')\n outputs[:, (k + 1) * dataset.N_CLASSES_PER_TASK:\n dataset.N_TASKS * dataset.N_CLASSES_PER_TASK] = -float('inf')", "def mask2categorical(Mask: tf.Tensor, labels: dict) -> tf.Tensor:\n assert type(labels) == dict, \"labels variable should be a dictionary\"\n\n X = Mask\n\n if X.dtype == \"float32\":\n X = tf.cast(X*255, dtype=\"uint8\")\n\n Y = tf.zeros(X.shape[0:2] , dtype=\"float32\")\n for i, key in enumerate(labels):\n Y = tf.where(np.all(X == labels[key], axis=-1), i, Y)\n Y = tf.cast(Y, dtype=\"uint8\")\n return Y", "def change_class_labels_back(classes,given):\n classes=np.asarray(classes)\n classes_new=np.zeros(classes.shape,dtype=object)\n for i in range(len(given)):\n classes_new[classes==i]=given[i]\n return classes_new", "def _map_class(max_iou_indices, labels):\n #add on index column\n max_iou_indices = tf.stack([tf.reshape(tf.convert_to_tensor([np.arange(0, tf.shape(all_anchors)[0])]), [1, tf.shape(max_iou_indices)[0]]),\n tf.cast(tf.expand_dims(max_iou_indices, axis=0), dtype=tf.int32)], axis=0)\n max_iou_indices = tf.transpose(tf.squeeze(max_iou_indices))\n broadcasted_labels = tf.broadcast_to(labels, [tf.shape(all_anchors)[0], tf.shape(random_labels)[0]])\n anchor_classes = tf.gather_nd(broadcasted_labels, temp)\n return anchor_classes", "def class2color(self, labels, clean_up_clusters=0, mode=None):\n clean_up_clusters *= clean_up_clusters # create an area\n colored_labels = np.zeros(labels.shape[:2] + (3,)).astype(np.uint8)\n labels = np.squeeze(labels)\n if clean_up_clusters > 0:\n labels = DropClusters.drop(labels, min_size=clean_up_clusters)\n ys, xs = np.where(labels)\n colored_labels[ys, xs, :] = self.label_color\n return colored_labels", "def decode_segmap(label_mask, n_classes, hex_color_dict, dataset, plot=False):\r\n\r\n r = label_mask.copy()\r\n g = label_mask.copy()\r\n b = label_mask.copy()\r\n for ll in range(0, n_classes):\r\n r[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[0]\r\n g[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[1]\r\n b[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[2]\r\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n\r\n return rgb", "def decode_segmap(self, label_mask, plot=False):\n label_colours = self.get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, self.n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb", "def decode_segmap(self, label_mask, plot=False):\n label_colours = self.get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, self.n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb", "def encode_segmap(self, mask):\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(self.get_pascal_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask", "def gt_covert(gt):\n gt = np.array(gt)\n new_gt = np.zeros_like(gt)\n for label in labels:\n if label.id != 0 and label.trainId == 255:\n new_gt = new_gt + (gt == label.id) * 128\n elif label.id != -1:\n new_gt = new_gt + (gt == label.id) * label.trainId\n else:\n new_gt = new_gt + (gt == label.id) * 255\n return new_gt", "def categorical2mask(X, labels):\n X_shape = X.shape[0:2]\n if type(X_shape) == tuple:\n X_shape = list(X_shape)\n Y = np.zeros(X_shape + [3], dtype=\"uint8\")\n for i, key in enumerate(labels):\n print(X.shape,Y.shape)\n Y[...,0] = np.where(X==i, labels[key][0], Y[...,0])\n Y[...,1] = np.where(X==i, labels[key][1], Y[...,1])\n Y[...,2] = np.where(X==i, labels[key][2], Y[...,2])\n return Y", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def inverse_transform_labels(self, indices):\n classes = self.classes_()\n return [classes[ind] for ind in indices]", "def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()", "def mask_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = 0\n else:\n if row[i] == 10:\n erase = True\n row[i] = 1\n return row\n\n ret = np.copy(labels)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)", "def encode_labels(labels, nclass=5):\n y = np.zeros((len(labels), nclass)).astype('float32')\n for j, yj in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(yj) + 1:\n y[j, i] = yj - np.floor(yj)\n if i+1 == np.floor(yj):\n y[j, i] = np.floor(yj) - yj + 1\n return y", "def nms_keep_map(class_id):\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.FAST_RCNN_NMS_IOU_THRESHOLD)\n # Map indicies\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep", "def transform_labels(self, labels):\n # Fallback:\n # return self.encoder.transform(labels)\n classes = list(self.classes_())\n return [classes.index(label) for label in labels]" ]
[ "0.7264654", "0.66162145", "0.64050084", "0.6343444", "0.6311547", "0.62583977", "0.6244545", "0.6026306", "0.6009242", "0.5989441", "0.5958324", "0.5925581", "0.5895624", "0.5866707", "0.5849876", "0.58453006", "0.5836782", "0.5834448", "0.5816841", "0.5816841", "0.5812876", "0.5774617", "0.5767635", "0.57089585", "0.57019657", "0.565494", "0.56093836", "0.5557504", "0.5557316", "0.5538545" ]
0.66599804
1
This function converts a segmentation mask to a onehot encoded version
def get_one_hot(self, mask_slice): mask_one_hot = self.map_vert_to_class(mask_slice).reshape(-1, 1) self.one_hot_encoder.fit(mask_one_hot) mask_one_hot = self.one_hot_encoder.transform(mask_one_hot).toarray() # set 1st dim to num classes mask_one_hot = np.transpose(mask_one_hot, (1, 0)) mask_one_hot = mask_one_hot[:,:, None] mask_one_hot = mask_one_hot.reshape((6, mask_slice.shape[0], mask_slice.shape[1])) return mask_one_hot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_hot_encoding(mask: typing.Any, classes: typing.List[int]):\n raise TypeError(\"Mask should be either np.ndarray of torch.Tensor\")", "def binary_to_one_hot(arr: np.ndarray) -> np.ndarray:\n res = np.zeros((arr.shape[0], 2))\n res[np.where(arr == 1)[0], 0] = 1\n res[np.where(arr == 0)[0], 1] = 1\n return res", "def one_hot_encode(x):\n # TODO: Implement Function\n output = np.zeros((len(x), 10))\n \n for i, j in enumerate(x):\n output[i,j] = 1\n \n return output", "def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot", "def _convert_to_onehot_labels(seg_label, num_classes):\n\n batch_size = seg_label.size(0)\n onehot_labels = seg_label.new_zeros((batch_size, num_classes))\n for i in range(batch_size):\n hist = seg_label[i].float().histc(\n bins=num_classes, min=0, max=num_classes - 1)\n onehot_labels[i] = hist > 0\n return onehot_labels", "def one_hot_encode(y, out_size):\n n = len(y)\n oh = np.zeros((n, out_size))\n oh[range(n), y] = 1\n return oh", "def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A", "def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot", "def one_hot_encoding(y):\n\n y_oh = np.zeros((y.shape[0], y.max() - y.min() + 1))\n\n # currently only works in min is actually 0\n for j in range(0, y_oh.shape[1]):\n y_oh[np.where(y == j), j] = 1\n\n return y_oh", "def encode_one_hot(s):\n all = []\n for c in s:\n x = np.zeros((INPUT_VOCAB_SIZE)) \n index = char_indices[c]\n x[index] = 1 \n all.append(x)\n return all", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def onehot(trace):\n encoded_trace = np.zeros((len(trace), 3), dtype=int)\n encoded_trace[np.arange(len(trace)), trace] = 1\n return encoded_trace.flatten()", "def one_hot_encode(x):\n # TODO: Implement Function\n lb = preprocessing.LabelBinarizer()\n lb.fit([0,1,2,3,4,5,6,7,8,9])\n \n return lb.transform(x)", "def one_hot_encoding(data):\n\n data_encoded = pd.get_dummies(data)\n\n return data_encoded", "def to_one_hot(v):\n n = len(v)\n m = max(v) + 1\n out = np.zeros((n, m))\n out[np.arange(n), v] = 1\n return out", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def one_hot(seq):\n prot_seq = seq\n BASES = 'ARNDCQEGHILKMFPSTWYV'\n bases = np.array([base for base in BASES])\n feat = np.concatenate(\n [[(bases == base.upper()).astype(int)] if str(base).upper() in BASES else np.array([[-1] * len(BASES)]) for base\n in prot_seq])\n return feat", "def encode_one_hot2(s):\n x = np.zeros((LINE_SIZE, INPUT_VOCAB_SIZE))\n for n, c in enumerate(s):\n index = char_indices[c]\n x[n, index] = 1 \n return x", "def one_hot_encoding(labels, num_classes=10):\n num_labels = labels.shape[0]\n encoded = np.zeros((num_labels, num_classes))\n encoded[np.arange(num_labels), labels[np.arange(num_labels)]] = 1\n \n return encoded", "def one_hot_encode(x):\n # TODO: Implement Function\n x_l = list(x)\n for index in np.arange(len(x_l)):\n x_l[index] = get_one_hot_vector(x[index])[x[index]]\n return np.array(x_l)", "def one_hot_enc(self, word):\n word = self.text_to_int(word)\n word = Variable(torch.tensor(word))\n word = torch.nn.functional.one_hot(word, len(self.index_map))\n return word.transpose(0, 1)", "def _one_hot_encode(label_vector, total_num_labels):\n out = np.zeros(shape=(len(label_vector), total_num_labels))\n for i in range(len(label_vector)):\n out[i, int(label_vector[i])] = 1\n return out", "def one_hot_encode(df, col):\n return pd.get_dummies(df, columns=[col], drop_first=True)", "def sequence_to_onehot(seq):\n seq_one_hot = []\n for i in range(MAX_SEQ_LENGTH):\n if i < len(seq):\n if seq[i] == 'A':\n seq_one_hot.append([1,0,0,0])\n elif seq[i] == 'T':\n seq_one_hot.append([0,1,0,0])\n elif seq[i] == 'G':\n seq_one_hot.append([0,0,1,0])\n elif seq[i] == 'C':\n seq_one_hot.append([0,0,0,1])\n else:\n seq_one_hot.append([0,0,0,0])\n else:\n seq_one_hot.append([0,0,0,0])\n seq_one_hot = np.array(seq_one_hot).reshape(MAX_SEQ_LENGTH,4)\n return seq_one_hot", "def make_onehot(x,num_labels=7):\n enc = OneHotEncoder(n_values=num_labels)\n return enc.fit_transform(np.array(x).reshape(-1, 1)).toarray()", "def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]", "def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)", "def label2onehot(self, labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.long()] = 1\n return out", "def _labels_to_one_hot_class(self, labels, mask):\n classes = tf.one_hot(\n tf.cast(labels, tf.int32), self._num_classes, dtype=tf.float32)\n return tf.where(tf.expand_dims(mask, axis=-1), classes, 0.0)", "def to_one_hot_encoding(target_data):\n target_data = target_data.squeeze()\n n_class = len(np.unique(target_data))\n res = np.eye(n_class)[target_data.astype(int)]\n return res" ]
[ "0.7464706", "0.7032775", "0.70228434", "0.69899994", "0.69584465", "0.6958401", "0.69303614", "0.6923459", "0.6914034", "0.68933815", "0.68916476", "0.68892264", "0.68866897", "0.68721896", "0.6867728", "0.68463516", "0.6792192", "0.67787373", "0.6777861", "0.6775648", "0.67347425", "0.6734288", "0.6726394", "0.67261535", "0.67122877", "0.67037386", "0.6703652", "0.6702987", "0.66981477", "0.66975003" ]
0.7079161
1
URI for dmapi DB
def dmapi_uri(self): return self.get_uri(prefix="dmapi")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_uri_for(db_name):\n return '/'.join(_db_uri_parts()[:-1] + [db_name])", "def get_uri(db_params: Dict[str, str]) -> str:\r\n def format_uri(scheme: str) -> str:\r\n auth = f'''{db_params[Toml.USER]}:{db_params[Toml.PASSWORD]}'''\r\n return f'''{scheme}://{auth}@{db_params[Toml.ADDRESS]}/{db_params[Toml.DB]}'''\r\n\r\n try:\r\n uniform_dbms = get_dbms(db_params[Toml.DBMS])\r\n return format_uri(uniform_dbms.scheme())\r\n except KeyError as err:\r\n LOG.exception(err)\r\n exit()", "def get_url(self):\n return self.db_url", "def db_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"db_url\")", "def _uri(helper):\n return '/'.join((\n helper.context_meta['server_uri'],\n 'servicesNS',\n 'nobody',\n 'Splunk_TA_paloalto',\n 'storage',\n 'collections',\n 'data',\n 'minemeldfeeds'))", "def get_database_url(self):\n return self.config['dbase_path']", "def database_uri(env):\r\n return (\r\n \"{dialect}+{driver}://{user}:{password}@{host}:{port}/{database}?charset={charset}\"\r\n ).format(\r\n dialect=configfile.get(\"database\", \"dialect\"),\r\n driver=configfile.get(\"database\", \"driver\"),\r\n user=configfile.get(\"database\", \"username\"),\r\n password=configfile.get(\"database\", \"password\"),\r\n host=configfile.get(\"database\", \"host\"),\r\n port=configfile.get(\"database\", \"port\"),\r\n database=configfile.get(\"database\", \"database\"),\r\n charset=configfile.get(\"database\", \"charset\"),\r\n )", "def build_db_uri() -> str:\n if DatabaseConfig.uri:\n return DatabaseConfig.uri\n\n return (\n f\"postgresql://{DatabaseConfig.username}:{DatabaseConfig.password}\"\n f\"@{DatabaseConfig.host}:{DatabaseConfig.port}/{DatabaseConfig.database}\"\n )", "def __data_url(self):\n path = AGENT_DATA_PATH % self.from_.pid\n return \"http://%s:%s/%s\" % (self.host, self.port, path)", "def build_db_uri() -> str:\n\n return \"{DB_DRIVER}://{DB_USERNAME}:{DB_PASSWD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\".format(**{\n 'DB_DRIVER': os.environ.get('DB_DRIVER', ''),\n 'DB_HOST': os.environ.get('DB_HOST', ''),\n 'DB_PORT': os.environ.get('DB_PORT', ''),\n 'DB_NAME': os.environ.get('DB_NAME', ''),\n 'DB_USERNAME': os.environ.get('DB_USERNAME', ''),\n 'DB_PASSWD': os.environ.get('DB_PASSWD', '')\n })", "def _getURL(serviceName, options):\n system = options['System']\n port = options['Port']\n host = socket.gethostname()\n url = 'dips://%s:%s/%s/%s' % (host, port, system, serviceName)\n return url", "def _db_uri_parts():\n return app.config['SQLALCHEMY_DATABASE_URI'].split('/')", "def xmlrpc_url(self):\n return self.key_name_parts()[0]", "def database_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_url\")", "def pg_url():\n return URL(DEFAULT_PG_URL)", "def resource_uri(self):\n primary_key_value = getattr(self, self.primary_key(), None)\n return '/{}/{}'.format(self.endpoint(), primary_key_value)", "def url(self):\n scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)\n url = six.moves.urllib.parse.urlunsplit((\n scheme, netloc, path + '.dods',\n self.id + hyperslab(self.slice) + '&' +\n '&'.join(self.selection), fragment)).rstrip('&')\n\n return url", "def _adjust_connection_URL(self, text):\n dbname = self.options.db\n parts = text.split('/')\n\n # Preserve the quotes if present\n if parts[-1].endswith(\"'\"):\n dbname += \"'\"\n\n parts[-1] = dbname\n return '/'.join(parts)", "def get_domainURI(self):\n return \"{0}/cdmi_domains/radon/\".format(self.api_root)", "def get_domainURI(self):\n return \"{0}/cdmi_domains/radon/\".format(self.api_root)", "def _format_database(self, request: DatabaseSecretRequest,\n secret: Secret) -> str:\n username, password = secret.value\n return f'{request.engine}://{username}:{password}@' \\\n f'{request.host}:{request.port}/{request.database}?' \\\n f'{request.params}'", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def url_ExoMol():\n url=u\"http://www.exomol.com/db/\"\n return url", "def alchemy_url(db_config):\n return server.alchemy_url(db_config)", "def get_database_url():\n details = {\n # This specifies what database buildbot uses to store its state. You can leave\n # this at its default for all but the largest installations.\n 'db_url': \"mysql+pymysql://{user}:{password}@{host}/{database}?max_idle=300\".format(\n user=USER, password=PASSWORD,\n database=DATABASE, host=HOST\n ),\n 'db_poll_interval': 6,\n }\n return details", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def _uri(self):\n raise NotImplementedError", "def get_db_url(db_name, user=user, host=host, password=password):\n url = f'mysql+pymysql://{user}:{password}@{host}/{db_name}'\n return url", "async def _construct_db_url(config):\n return 'mongodb://{user}:{password}@{host}:{port}/{database}?authSource=admin'.format(\n user=config['user'],\n password=config['password'],\n database=config['database'],\n host=config['host'],\n port=config['port'],\n )", "def doQueryURI(self, query) :\n\t\tqr = self.doQuery(query)\n\t\treturn qr['results']['bindings'][0]" ]
[ "0.6400694", "0.6293949", "0.6125386", "0.59341073", "0.5838207", "0.5829279", "0.58105844", "0.5752424", "0.5700388", "0.5589648", "0.5494501", "0.5470878", "0.5438909", "0.54256606", "0.5406732", "0.5402033", "0.5394537", "0.5393316", "0.5385201", "0.5385201", "0.53774226", "0.53770375", "0.5342838", "0.5335419", "0.5310924", "0.53024936", "0.52628374", "0.5259846", "0.52569824", "0.5244098" ]
0.72291374
0
Creates or updates a Ceph keyring file.
def configure_ceph_keyring(self, key, cluster_name=None): keyring_absolute_path = super().configure_ceph_keyring( key, cluster_name ) # TODO: add support for custom permissions into charms.openstack if os.path.exists(keyring_absolute_path): # NOTE: triliovault access the keyring as the nova user, so # set permissions so it can do this. os.chmod(keyring_absolute_path, 0o640) ceph_keyring = os.path.join( "/etc/ceph", os.path.basename(keyring_absolute_path) ) # NOTE: triliovault needs a keyring in /etc/ceph as well as in the # charm specific location for qemu commands to work if not os.path.exists(ceph_keyring): os.symlink(keyring_absolute_path, ceph_keyring) # NOTE: ensure /var/lib/charm is world readable - this will be the # case with Python >= 3.7 but <= 3.6 has different behaviour os.chmod('/var/lib/charm', 0o755) return keyring_absolute_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_to_keyring_from_data(self, filedata: bytes, keyring_name: str) -> None:\n pubkey, keyidv2 = get_pubkey(filedata)\n if pubkey:\n self.add_pubkey_to_keyring(pubkey, keyring_name, keyidv2=keyidv2)", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def test_rcreate_refuse_to_overwrite_keyfile(self):\n keyfile = os.path.join(self.tmpdir, \"keyfile\")\n with environment_variable(BORG_KEY_FILE=keyfile):\n self.cmd(f\"--repo={self.repository_location}0\", \"rcreate\", KF_ENCRYPTION)\n with open(keyfile) as file:\n before = file.read()\n arg = (f\"--repo={self.repository_location}1\", \"rcreate\", KF_ENCRYPTION)\n if self.FORK_DEFAULT:\n self.cmd(*arg, exit_code=2)\n else:\n with pytest.raises(Error):\n self.cmd(*arg)\n with open(keyfile) as file:\n after = file.read()\n assert before == after", "def create_file(self, key=None):\n self.make_directory()\n open(self.file_path(key), 'w').close()", "def encryptor(file_name, key, plaintext):\n\twith open(file_name, 'w') as efile:\n\t\tenc = encrypt(key, plaintext)\n\t\tefile.write(enc)\n\t\tefile.close()\n\t\tetext = \"An encrypted passfile was created named key.enc for further use in this script by the user: \"\n\t\tcreateLog(etext, 'logs/macupdate.log')", "def add(cls, config: Dict) -> None:\n id_ = config[\"id\"]\n client_file = f\"/etc/ceph/ceph.{id_}.keyring\"\n\n # Create client\n cmd = [\"ceph\", \"auth\", \"get-or-create\", f\"{id_}\"]\n [cmd.append(f\"{k} '{v}'\") for k, v in config.get(\"caps\", {}).items()]\n cnt_key, err = cls.shell(args=cmd)\n\n def put_file(client, file_name, content, file_mode, sudo=True):\n file_ = client.remote_file(sudo=sudo, file_name=file_name, file_mode=file_mode)\n file_.write(content)\n file_.flush()\n file_.close()\n\n nodes_ = config.get(\"nodes\", config.get(\"node\"))\n default_version = str(cls.cluster.rhcs_version.version[0])\n use_cdn = cls.cluster.use_cdn\n if nodes_:\n if not isinstance(nodes_, list):\n nodes_ = [{nodes_: {}}]\n\n def setup(host):\n name = list(host.keys()).pop()\n _build = list(host.values()).pop()\n _node = get_node_by_id(cls.cluster, name)\n if _build.get(\"release\"):\n rhcs_version = _build[\"release\"]\n if not isinstance(rhcs_version, str):\n rhcs_version = str(rhcs_version)\n elif use_cdn:\n rhcs_version = default_version\n else:\n rhcs_version = \"default\"\n\n rhel_version = _node.distro_info[\"VERSION_ID\"][0]\n log.debug(\n f\"RHCS version : {rhcs_version} on host {_node.hostname}\\n\"\n f\"with RHEL major version as : {rhel_version}\"\n )\n enable_cmd = \"subscription-manager repos --enable=\"\n disable_all = [\n r\"subscription-manager repos --disable=*\",\n r\"yum-config-manager --disable \\*\",\n ]\n cmd = 'subscription-manager repos --list-enabled | grep -i \"Repo ID\"'\n cdn_ceph_repo = {\n \"7\": {\"4\": [\"rhel-7-server-rhceph-4-tools-rpms\"]},\n \"8\": {\n \"4\": [\"rhceph-4-tools-for-rhel-8-x86_64-rpms\"],\n \"5\": [\"rhceph-5-tools-for-rhel-8-x86_64-rpms\"],\n },\n \"9\": {\n \"5\": [\"rhceph-5-tools-for-rhel-9-x86_64-rpms\"],\n \"6\": [\"rhceph-6-tools-for-rhel-9-x86_64-rpms\"],\n },\n }\n\n rhel_repos = {\n \"7\": [\"rhel-7-server-rpms\", \"rhel-7-server-extras-rpms\"],\n \"8\": [\n \"rhel-8-for-x86_64-baseos-rpms\",\n \"rhel-8-for-x86_64-appstream-rpms\",\n ],\n \"9\": [\n \"rhel-9-for-x86_64-appstream-rpms\",\n \"rhel-9-for-x86_64-baseos-rpms\",\n ],\n }\n\n # Collecting already enabled repos\n out, _ = _node.exec_command(sudo=True, cmd=cmd, check_ec=False)\n enabled_repos = list()\n if out:\n out = out.strip().split(\"\\n\")\n for entry in out:\n repo = entry.split(\":\")[-1].strip()\n enabled_repos.append(repo)\n log.debug(f\"Enabled repos on the system are : {enabled_repos}\")\n\n if rhcs_version != \"default\":\n # Disabling all the repos and enabling the ones we need to install the ceph client\n for cmd in disable_all:\n _node.exec_command(sudo=True, cmd=cmd, timeout=1200)\n\n # Enabling the required CDN repos\n for repos in rhel_repos[rhel_version]:\n _node.exec_command(sudo=True, cmd=f\"{enable_cmd}{repos}\")\n\n for repos in cdn_ceph_repo[rhel_version][rhcs_version]:\n _node.exec_command(sudo=True, cmd=f\"{enable_cmd}{repos}\")\n\n # Clearing the release preference set and cleaning all yum repos\n # Observing selinux package dependency issues for ceph-base\n wa_cmds = [\"subscription-manager release --unset\", \"yum clean all\"]\n for wa_cmd in wa_cmds:\n _node.exec_command(sudo=True, cmd=wa_cmd)\n\n # Copy the keyring to client\n _node.exec_command(sudo=True, cmd=\"mkdir -p /etc/ceph\")\n put_file(_node, client_file, cnt_key, \"w\")\n\n if config.get(\"copy_ceph_conf\", True):\n # Get minimal ceph.conf\n ceph_conf, err = cls.shell(\n args=[\"ceph\", \"config\", \"generate-minimal-conf\"]\n )\n # Copy the ceph.conf to client\n put_file(_node, \"/etc/ceph/ceph.conf\", ceph_conf, \"w\")\n\n # Copy admin keyring to client node\n if config.get(\"copy_admin_keyring\"):\n admin_keyring, _ = cls.shell(\n args=[\"ceph\", \"auth\", \"get\", \"client.admin\"]\n )\n put_file(\n _node, \"/etc/ceph/ceph.client.admin.keyring\", admin_keyring, \"w\"\n )\n\n # Install ceph-common\n if config.get(\"install_packages\"):\n for pkg in config.get(\"install_packages\"):\n _node.exec_command(\n cmd=f\"yum install -y --nogpgcheck {pkg}\", sudo=True\n )\n if config.get(\"git_clone\", False):\n log.info(\"perform cloning operation\")\n role = config.get(\"git_node_role\", \"client\")\n ceph_object = cls.cluster.get_ceph_object(role)\n node_value = ceph_object.node\n utils.perform_env_setup(config, node_value, cls.cluster)\n\n out, _ = _node.exec_command(cmd=\"ls -ltrh /etc/ceph/\", sudo=True)\n log.info(out)\n\n # Hold local copy of the client key-ring in the installer node\n if config.get(\"store-keyring\"):\n put_file(cls.installer, client_file, cnt_key, \"w\")\n\n with parallel() as p:\n for node in nodes_:\n if not isinstance(node, dict):\n node = {node: {}}\n p.spawn(\n setup,\n node,\n )\n time.sleep(20)", "def create_lock_file():\n\n print(\"Creating lock file\")", "def add_key(self, device, key):\n if not self.enabled:\n return\n self.keys[device] = key\n fh = open(self.path, \"w\")\n json.dump(self.keys, fh)\n fh.close()\n os.chmod(self.path, 0o600)", "def encrypt(project_id, location_id, key_ring_id, crypto_key_id,\n plaintext_file_name, ciphertext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read data from the input file.\n with io.open(plaintext_file_name, 'rb') as plaintext_file:\n plaintext = plaintext_file.read()\n\n # Use the KMS API to encrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.encrypt(\n name=name,\n body={'plaintext': base64.b64encode(plaintext).decode('ascii')})\n response = request.execute()\n ciphertext = base64.b64decode(response['ciphertext'].encode('ascii'))\n\n # Write the encrypted data to a file.\n with io.open(ciphertext_file_name, 'wb') as ciphertext_file:\n ciphertext_file.write(ciphertext)\n\n print('Saved ciphertext to {}.'.format(ciphertext_file_name))", "def createAllKP():\n\tif not os.path.exists(keysDir):\n\t\tos.makedirs(keysDir)\n\tfor info in conf_HVM:\n\t\tkeyName = 'Key-'+info['region']+'-'+info['zone']\n\t\ttry:\n\t\t\tos.remove(keysDir+'/'+keyName+'.pem')\n\t\texcept OSError:\n\t\t\tpass\n\t\tprint \"Key creation :\",keyName\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\t# check if the key pair exists\n\t\tkps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName]\n\t\tif kps:\n\t\t\tec2.delete_key_pair(keyName)\t\n\t\tkey = ec2.create_key_pair(keyName)\n\t\tkey.save(keysDir)", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def save(self, filepath, client_data_list, new_passphrase=None):\n import struct\n\n plain_text = json.dumps(\n client_data_list, sort_keys=True, indent=4, separators=(',', ': '),\n cls=ClientDataEncoder)\n header = b''.join([\n struct.pack(\"!I\", self.__magic_number),\n struct.pack(\"!I\", self.__file_version),\n struct.pack(\"!I\", self.__key_stretches),\n struct.pack(\"!I\", self.__magic_number)])\n data = b''.join([\n header,\n bytes(plain_text, 'utf-8')])\n if new_passphrase is not None:\n self.__key = self._produce_key(new_passphrase)\n self.__iv = self._produce_iv(self.__key)\n cypher_text = self._encrypt(data)\n with open(filepath, 'wb') as f:\n f.write(header)\n f.write(cypher_text)", "def writeKeyToFile(key, filename):\n logging.info(\"Writing key to file: %r\" % filename)\n flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT | getattr(os, \"O_BIN\", 0)\n fd = os.open(filename, flags, 0400)\n os.write(fd, key)\n os.fsync(fd)\n os.close(fd)", "def store_apikey_in_keyring(platform_id='public', # type: str\n base_url=None, # type: str\n keyring_entries_username=KR_DEFAULT_USERNAME, # type: str\n apikey=None, # type: str\n ):\n client = ODSClient(platform_id=platform_id, base_url=base_url, keyring_entries_username=keyring_entries_username)\n client.store_apikey_in_keyring(apikey=apikey)", "def create_key ():", "def write_key(self):\n\t key = Fernet.generate_key()\n\t with open(\"key.key\", \"wb\") as key_file:\n\t key_file.write(key)", "def put(self):\n\n args = self.put_parser.parse_args(strict=True)\n ecgfile = args.data\n\n # Save the file on disk using a uuid\n filename = \"%s%s.dat\" % (self.file_prefix, uuid())\n ecgfile.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n\n # Create a new database record for this file\n with open_session() as session:\n ecgdata = ECGData(filename)\n session.add(ecgdata)\n session.flush()\n session.commit()\n\n print(ecgdata.id)\n\n return gen_response({\"id\": ecgdata.id})", "def gpg(c):\n buf = io.BytesIO()\n tf = tarfile.TarFile(fileobj=buf, mode=\"w\")\n tf.add(\"etc/gpgkeys\", arcname=\"gpgkeys\", recursive=True)\n tf.close()\n buf.seek(0)\n\n with utils.remote_tmp_dir(conn) as tmp_dir:\n upload_path = utils.join(tmp_dir, \"gpgkeys.tar\")\n\n with conn.sftp() as sftp:\n try:\n sftp.putfo(buf, upload_path)\n finally:\n buf.close()\n\n conn.run(f\"tar -xf {upload_path} -C /etc/salt\")", "def insert_key(newKey, filename, ssh):\n ssh.exec_command(\"grep -q -F '%s' %s || echo %s >> %s\" % (newKey, filename, newKey, filename))", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def create_ssh_key_file(username: str, ssh_key: bytes, ip_address: str):\n\n if not os.path.exists(\"./ansible/keys\"):\n os.mkdir(\"./ansible/keys\")\n\n with open(f\"./ansible/keys/admin_{ip_address}.pem\", \"w\") as ssh_key_file:\n ssh_key_file.write(ssh_key.decode())\n\n os.system(f\"chmod 400 ./ansible/keys/admin_{ip_address}.pem\")", "def enter_dict(update_dict, path=\"current_request.txt\"):\n\n request_dict = RequestFileCom.file_to_dict(path)\n\n # Make sure no one changes the file content before finishes to update it.\n RequestFileCom.mutex.acquire()\n\n for key in update_dict:\n\n request_dict[key] = update_dict[key]\n\n request_file_string = \"\"\n\n for key in request_dict:\n\n request_file_string += key + \"::\" + request_dict[key] + \"\\n\"\n\n # Update the file.\n with open(path, \"w\") as f:\n\n f.write(request_file_string)\n\n RequestFileCom.mutex.release()\n\n request_dict = RequestFileCom.file_to_dict(path)", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def store_file(filename1, filename2):\n print 'Splitting ', filename1, ' into encoded comments for keys'\n file_list = read_file_into_list(filename1)\n output_file = open(filename2, 'w')\n counter_length = len(file_list)\n counter = 0\n for chunk in file_list:\n print 'Creating key ', counter, ' of ', counter_length\n counter = counter + 1\n key_id = create_key(chunk)\n output_file.write(send_key(key_id)+'\\n')\n print '--> key has been created and uploaded'\n print 'File has been successfully uploaded to ', KEYSERVER", "def store_file(filename1, filename2):\n\tprint 'Splitting ', filename1, ' into encoded comments for keys'\n\n\t# watch out return value is the name of the newly packed file now\n\tfilename1 = pack_file_to_disk(filename1)\n\n\tfile_list = read_file_into_list(filename1)\n\toutput_file = open(filename2, 'w')\n\tcounter_length = len(file_list)\n\tcounter = 0\n\tfor chunk in file_list:\n\t\tprint 'Creating key ', counter, ' of ', counter_length\n\t\tcounter = counter + 1\n\t\tkey_id = create_key(chunk)\n\t\toutput_file.write(send_key(key_id)+'\\n')\n\t\toutput_file.flush()\n\t\tprint '--> key has been created and uploaded'\n\tprint 'File has been successfully uploaded to ', KEYSERVER\n\n\t# TODO adjust with args\n\t# tempfile is created at packing stage\n\tos.system('tempfile.tmp')", "def _write(self):\n raw_data = {'file_version': 1}\n raw_creds = []\n raw_data['data'] = raw_creds\n for (cred_key, cred) in self._data.items():\n raw_key = dict(cred_key)\n raw_cred = json.loads(cred.to_json())\n raw_creds.append({'key': raw_key, 'credential': raw_cred})\n self._locked_json_write(raw_data)", "def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})", "def put(self, key, value):\n self.execute_command('sudo -i bash -c \\'echo -n \"{0}\" > {1}{2}\\''\n .format(value, self._store_path, key))", "def update_lockfile(project_dir: Path, command: Dict[str, Any]) -> None:\n lock_path = project_dir / PROJECT_LOCK\n if not lock_path.exists():\n srsly.write_yaml(lock_path, {})\n data = {}\n else:\n data = srsly.read_yaml(lock_path)\n data[command[\"name\"]] = get_lock_entry(project_dir, command)\n srsly.write_yaml(lock_path, data)" ]
[ "0.5704393", "0.5466244", "0.54504627", "0.5367327", "0.5365103", "0.536254", "0.53573316", "0.53409374", "0.5322666", "0.532163", "0.52746147", "0.5234076", "0.5223036", "0.519377", "0.519335", "0.5180726", "0.5177008", "0.5154591", "0.515178", "0.51287216", "0.5128504", "0.5123393", "0.51146615", "0.50991017", "0.5096991", "0.50906086", "0.50759155", "0.50606626", "0.50602573", "0.5055669" ]
0.61470187
0
Test the policy on 100 games and output the wins, losses, and ties for all games. Additionally, display 5 of these games.
def test(policy,env): wins = 0 losses = 0 ties = 0 num_invalid_moves = 0 game_num = 1 n = 100 while n > 0: state = env.reset() done = False display_game = False if n % 20 == 0: display_game = True print("________________________________") print("GAME {} DISPLAYED".format(game_num)) game_num += 1 env.render() saved_rewards = [] while not done: action, logprob = select_action(policy, state) state, status, done = env.play_against_random(action,display_game) reward = get_reward(status) saved_rewards.append(reward) if status == "win": wins += 1 if status == "lose": losses += 1 if status == "tie": ties += 1 for reward in saved_rewards: if reward == Invalid_reward: num_invalid_moves += 1 n -= 1 print("Wins: {}, Losses: {}, Ties: {}, Invalid: {}" \ .format(wins,losses,ties,num_invalid_moves)) return wins, losses, ties
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_game_stats(games_won=games_won):\r\n for i in games_won: # set loop condition\r\n if games_won[i] != 1: # argument for if games is pluralized\r\n print (i + ' has won ' + str(games_won[i])+ ' games')\r\n else:\r\n print (i + ' has won ' + str(games_won[i])+ ' game')", "def print_game_stats(games_won=games_won):\n for k,v in games_won.items():\n print(k)\n print(v)\n if v == 1:\n print(f'{k} has won {v} game')\n else:\n print(f'{k} has won {v} games')", "def pretty_print_game (self, game_list):\n hand_tally = 0\n for game in game_list:\n hand_tally += int (game[2])\n\n game_result = \"won\" if hand_tally > 0 else (\"lost\" if hand_tally < 0 else \"tied\")\n\n print (\"I \" + game_result + \"!\")\n for game in game_list:\n print (game)\n print (\"-----------------------------------------------------------------\")", "def opponents_score(self):\n if self.opponent_wickets == 10:\n var1 = \"All Out\"\n return str('{0} {1}').format(self.opponent_runs, var1)\n else:\n var1 = self.opponent_wickets\n return str('{0}-{1}').format(self.opponent_runs, var1)", "def playManyGames(number):\n wins = 0\n losses = 0\n winRolls = 0\n lossRolls = 0\n player = Player()\n for count in range(number):\n hasWon = player.rollDice()\n rolls = 0\n if player.winner:\n wins += 1\n winRolls += player.rollsCount\n elif player.loser:\n losses += 1\n lossRolls += player.rollsCount\n print(\"The total number of wins is\", wins)\n print(\"The total number of losses is\", losses)\n print(\"The average number of rolls per win is %0.2f\" % \\\n (winRolls / wins))\n print(\"The average number of rolls per loss is %0.2f\" % \\\n (lossRolls / losses))\n print(\"The winning percentage is %0.3f\" % (wins / number))", "def playGames(self, num, sonete,secuence,mcts,verbose=False):\n eps_time = AverageMeter()\n bar = Bar('Arena.playGames', max=num)\n end = time.time()\n eps = 0\n maxeps = int(num)\n finalScore1=0\n finalScore2=0\n\n num = int(num/2)\n oneWon = 0\n twoWon = 0\n draws = 0\n gameResults=[]\n global nround\n actions=[]\n self.player1, self.player2 = self.player1, self.player1\n board = self.game.getInitBoard()\n for i in range(100):\n nround = i\n #action,sonete = self.playGame(sonete,sequences,nround,verbose=verbose)\n pi = mcts.getActionProb(sonete, temp=1)\n #actions.append(action)\n\n eps_time.update(time.time() - end)\n end = time.time()\n\n\n return actions#finalScore1, finalScore2#oneWon, twoWon, draws", "def display_stats(self):\n print(\"Simulation took: {:.2f} seconds to execute\".format(time.time() - self.start_time))\n for i, win in enumerate(self.wins):\n average = 0\n if win:\n average = float(self.tries[i]) / win\n print(\"Player {} wins: {} with (average number of rounds: {:.2f})\".format(i+1, win, average))", "def play_craps(number_of_games):\n for i in range(number_of_games):\n # Keep track of number of rolls till a win or loss occurs.\n number_of_rolls = 0\n\n die_values = roll_dice() # first roll\n number_of_rolls += 1\n\n # determine game status and point, based on first roll\n sum_of_dice = sum(die_values)\n if sum_of_dice in (7, 11): # win\n game_status = 'WON'\n elif sum_of_dice in (2, 3, 12): # lose\n game_status = 'LOST'\n else: # remember point\n game_status = 'CONTINUE'\n my_point = sum_of_dice\n\n # continue rolling until player wins or loses\n while game_status == 'CONTINUE':\n die_values = roll_dice()\n number_of_rolls += 1\n sum_of_dice = sum(die_values)\n\n if sum_of_dice == my_point: # win by making point\n game_status = 'WON'\n elif sum_of_dice == 7: # lose by rolling 7\n game_status = 'LOST'\n\n # Change number of rolls to 13 if it took more than that to get to a win/loss.\n # This is because we sum all wins/losses after 12 rolls into one number.\n if number_of_rolls > 13:\n number_of_rolls = 13\n\n # Keep track of win or loss\n if game_status == 'WON':\n win_frequency[number_of_rolls - 1] += 1\n else:\n loss_frequency[number_of_rolls - 1] += 1", "def showWorstStats(self) :\n Scenario.messageWorstStats()\n self.showWorstGainWon()\n self.showWorstBetUse()\n self.showNbLevelLose()", "def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result", "def play_game(game,standings_):\n rand_nmr = random.random()\n\n standings_.loc[standings_.TEAMS==game['Home'],'MP'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'MP'] += 1\n\n if rand_nmr < game['Prob Home']:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away'],'L'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'A'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home']][\"h2h\"].apply(lambda x:x.append(game['Away']))\n\n return 0\n\n elif rand_nmr < game['Prob Home'] + game['Prob Draw']:\n # all draws end in 0-0 this can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'D'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'D'] += 1\n\n return 1\n\n else:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Away'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home'],'A'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'L'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away']][\"h2h\"].apply(lambda x:x.append(game['Home']))\n\n return 2", "def tally_results(self, verbose=False):\n\t\tself.tally={\n\t\t\tself.player1.name: {\n\t\t\t\t'wins': 0,\n\t\t\t\t'draws': 0,\n\t\t\t\t'losses': 0\n\t\t\t},\n\t\t\tself.player2.name: {\n\t\t\t\t'wins': 0,\n\t\t\t\t'draws': 0,\n\t\t\t\t'losses': 0\n\t\t\t}\n\t\t}\n\t\tfor key, value in self.winner_dict.items():\n\t\t\tself.tally[self.player1.name]['wins']+=1 if value == self.player1.name else 0\n\t\t\tself.tally[self.player1.name]['draws']+=1 if value is None else 0\n\t\t\tself.tally[self.player1.name]['losses']+=1 if value == self.player2.name else 0\n\t\t\tself.tally[self.player2.name]['wins']+=1 if value == self.player2.name else 0\n\t\t\tself.tally[self.player2.name]['draws']+=1 if value is None else 0\n\t\t\tself.tally[self.player2.name]['losses']+=1 if value == self.player1.name else 0\n\t\tif verbose:\n\t\t\tprint('\\n--- FINAL RESULT ---\\n')\n\t\t\ttally_pretty=pd.DataFrame(self.tally).to_markdown()\n\t\t\tprint(tally_pretty)\n\t\t\tif self.tally[self.player1.name]['wins'] == self.tally[self.player2.name]['wins']:\n\t\t\t\tprint('\\nIt\\'s a draw!\\n')\n\t\t\telse:\n\t\t\t\twinner=self.player1.name if self.tally[self.player1.name]['wins'] > self.tally[self.player2.name]['wins'] else self.player2.name\n\t\t\t\tprint('\\n{player} wins the game!\\n'.format(player=winner))", "def get_win_prob(self, state, playerid):\n evaluator = Evaluator()\n\n def get_card_class(card_int_list):\n res = [Card.new(Card.int_to_str(c)) for c in card_int_list if c != -1]\n return res\n\n def WinProbability(hand, board):\n rank = evaluator.evaluate(board, hand)\n percentage = 1.0 - evaluator.get_five_card_rank_percentage(rank)\n return percentage\n\n hand_cards = get_card_class(state.player_states[playerid].hand)\n board_cards = get_card_class(state.community_card)\n if any([True for h in hand_cards if h in board_cards]):\n Card.print_pretty_cards(hand_cards)\n Card.print_pretty_cards(board_cards)\n num_players = len([ p for p in state.player_states if not p.emptyplayer])\n\n win = 0\n round = 0\n\n board_cards_to_draw = 5 - len(board_cards) # 2\n rest_cards = self._pick_unused_card(board_cards + hand_cards)\n #print(\"rest cards\")\n #Card.print_pretty_cards(rest_cards)\n \n #choiced = random.sample(unused, card_num)\n \n for i in range(self.simulation_number):\n\n unused_cards = random.sample(rest_cards, (num_players - 1) * 2 + board_cards_to_draw)\n board_sample = unused_cards[len(unused_cards)-board_cards_to_draw:]\n unused_cards = unused_cards[:len(unused_cards)-board_cards_to_draw]\n\n opponents_hole = [unused_cards[2 * i:2 * i + 2] for i in range(num_players - 1)]\n\n try:\n opponents_score = [WinProbability(hole, board_sample) for hole in opponents_hole]\n my_rank = WinProbability(hand_cards, board_sample)\n if my_rank >= max(opponents_score):\n win += 1\n round+=1\n except Exception as inst:# Exception, e:\n #print e.message\n continue\n #print(\"Win:{}\".format(win))\n #print('round:{}'.format(round))\n if round == 0: \n if len(board_cards) > 1:\n try:\n return WinProbability(board_cards, hand_cards)\n except:\n return 0.6\n else: \n return 0.6\n win_prob = win / float(round)\n return win_prob", "def testHighscore(self):\n \n game = Game.objects.get(title='game0')\n gameplayeds = game.gameplayed_set\n ply_group = Group.objects.get(name='Player')\n for i in range(4):\n user = ply_group.user_set.get(username='ply{}'.format(i))\n gameplayed = gameplayeds.get(user=user)\n gameplayed.gameScore = i\n gameplayed.save()\n \n response = self.client.get(\n reverse('api:game-buyers', args=['v1', 'game0']),\n {'order_by': 'gameScore'},\n format='json'\n )\n self.assertEquals(response.status_code, 200)\n content = self.parser.parse(BytesIO(response.content))\n for i in range(4):\n self.assertEquals(content['results'][i]['user'], 'ply{}'.format(i))\n \n response = self.client.get(\n reverse('api:game-buyers', args=['v1', 'game0']),\n {'order_by': '-gameScore'},\n format='json'\n )\n self.assertEquals(response.status_code, 200)\n content = self.parser.parse(BytesIO(response.content))\n for i in range(4):\n self.assertEquals(content['results'][i]['user'], 'ply{}'.format(3 - i))", "def get_win_prob(self,hand_cards, board_cards,simulation_number,num_players):\n win = 0\n round=0\n evaluator = HandEvaluator()\n for i in range(simulation_number):\n\n board_cards_to_draw = 5 - len(board_cards) # 2\n board_sample = board_cards + self._pick_unused_card(board_cards_to_draw, board_cards + hand_cards)\n unused_cards = self._pick_unused_card((num_players - 1)*2, hand_cards + board_sample)\n opponents_hole = [unused_cards[2 * i:2 * i + 2] for i in range(num_players - 1)]\n\n try:\n opponents_score = [pow(evaluator.evaluate_hand(hole, board_sample), num_players) for hole in opponents_hole]\n # hand_sample = self._pick_unused_card(2, board_sample + hand_cards)\n my_rank = pow(evaluator.evaluate_hand(hand_cards, board_sample),num_players)\n if my_rank >= max(opponents_score):\n win += 1\n #rival_rank = evaluator.evaluate_hand(hand_sample, board_sample)\n round+=1\n except Exception, e:\n print e.message\n continue\n # The large rank value means strong hand card\n print \"Win:{}\".format(win)\n win_prob = win / float(round)\n print \"win_prob:{}\".format(win_prob)\n return win_prob", "def print_results(results, number_universe, flag_fair, players, neutral_player, max_turn):\n nb_games = len(results)\n\n # print parameters\n print(\"=== Parameters ===\")\n print(f\"Number of players : {len(players)}\")\n for p in players:\n print(f\" - player {p.name}\")\n print(f'Number of different universes : {number_universe}')\n print(f'Number of games played : {nb_games}')\n print(f'Number of games played per universe : {int(nb_games/number_universe)} ({\"\" if flag_fair else \"no \"}fair game : permutation players {\"\" if flag_fair else \"not \"}enabled)')\n print(f'Game unfinished if this number of turn is reached : {max_turn}')\n\n # preparation\n result_player = {} # {class Player:int, ..., None:int}\n list_turn = []\n nb_unfinished_games = 0\n for r in results.values():\n if r[\"winner\"] is not None: # the game has a winner\n list_turn.append(r[\"turn\"])\n else:\n nb_unfinished_games += 1\n nb = result_player.get(r[\"winner\"], 0)\n nb += 1\n result_player[r[\"winner\"]] = nb\n\n # print results\n print()\n print(\"=== Results ===\")\n if nb_unfinished_games == nb_games:\n print(\"All games are unfinished\")\n return\n print(\"End of games at turn (finished game only)\")\n print(f\" - mean : {round(mean(list_turn), 1)}\")\n print(f\" - median : {median(list_turn)}\")\n print(\"Percent of victory (number of victory)\")\n for p in players:\n nb_victory = result_player.get(p.number, 0)\n percent = round(nb_victory / nb_games * 100, 1)\n print(f\" - player {p.name} : {percent}% ({nb_victory})\")\n nb_victory = result_player.get(neutral_player.number, 0)\n percent = round(result_player.get(neutral_player.number, 0) / nb_games * 100, 1)\n print(f\"(- draw : {percent}% ({nb_victory}) )\")\n print(f\"(- unfinished : {round(nb_unfinished_games/nb_games*100, 1)}% ({nb_unfinished_games}) )\")\n return", "def final_strategy_test():\r\n print('-- Testing final_strategy --')\r\n print('Win rate:', compare_strategies(final_strategy))", "def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )", "def print_outcomes(MultipleGameSets, strategy_name):\n\n # mean and confidence interval text of patient survival time\n reward_mean_PI_text = Format.format_estimate_interval(\n estimate=MultipleGameSets.get_mean_total_reward(),\n interval=MultipleGameSets.get_PI_total_reward(alpha=P.ALPHA),\n deci=1)\n\n # print survival time statistics\n print(strategy_name)\n print(\" Estimate of mean rewards for gamblers and {:.{prec}%} prediction interval:\".format(1 - P.ALPHA, prec=0),\n reward_mean_PI_text)", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))", "def test_ai_playes_randomly():\n\n\tplayed = [game.ai_turn() for _ in range(5000)]\n\n\trocks = played.count('rock')\n\tpapers = played.count('paper')\n\tscissors = played.count('scissors')\n\n\tprint('Rocks: ', rocks,'\\nPaper: ', papers,'\\nScissors: ', scissors)\n\n\tassert played.count('rock') > 200\n\tassert played.count('paper') > 200\n\tassert played.count('scissors') > 200", "def showBestStats(self) :\n Scenario.messageBestStats()\n self.showBestStatLevelReached()\n self.showNbCoupFindFirstAttempt()\n self.showBestGainWon()\n self.showBestBetUse()\n self.showNbLevelWon()", "def test(self, num_test=1000):\n\n self.num_test = num_test\n self.player_wins = 0\n self.opponent_wins = 0\n self.optimal_wins = 0\n self.optimal_losses = 0\n\n self.game.restart()\n\n for test in range(num_test):\n self.game.deal_cards()\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='exploit')\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n (self.game.set_player_action(player_action)\n .set_opponent_action(opponent_action))\n player_score, opponent_score = self.game.get_scores()\n\n if player_score > opponent_score:\n self.player_wins += 1\n elif opponent_score > player_score:\n self.opponent_wins += 1\n\n optimal_result = self.game.get_optimal_result()\n if optimal_result > 0:\n self.optimal_wins += 1\n elif optimal_result < 0:\n self.optimal_losses += 1\n\n print(\"Testing done!\")", "def playGame(self):\n BET_AMOUNT=1\n bets = 0\n win = 0\n loss=0\n result = 0\n cash=0\n for i in range(self.trials):\n cash=self.stake\n #loop continue till stake amount goes to 0 or stake amount reaches the goal\n while cash > 0 or cash == self.GOAL:\n bets+=1 #increments bet by 1\n result=random.randint(0,1)\n if result == 1:\n cash+=BET_AMOUNT\n win+=1\n else:\n cash-=BET_AMOUNT\n loss+=1\n\n print(\"No of times games won=\",win)\n print(\"No of times games lost=\",loss)\n print(\"Percentage of game won=\",100.0 * win / trials) #calculates winning percentage\n print(\"Percentage of game lost=\",100.0 * loss / trials) #calculates loss percentage", "def playerStandings():\n # place all players in a dictionary\n player_dict = {}\n conn, c = connect()\n c.execute(\"\"\"SELECT * FROM players;\"\"\")\n for row in c.fetchall():\n player_dict[row[0]] = [row[1], 0, 0]\n\n # count the number of win and matches in for all matches\n c.execute(\"\"\"SELECT winner, loser FROM matches;\"\"\")\n for row in c.fetchall():\n if row[0] in player_dict:\n player_dict[row[0]][1] += 1\n player_dict[row[0]][2] += 1\n if row[1] in player_dict:\n player_dict[row[1]][2] += 1\n\n # compile win counts as the key to dictionary\n win_count = {}\n for i in player_dict:\n wins = player_dict[i][1]\n if wins in win_count:\n win_count[wins].append((i, player_dict[i][0],\n wins, player_dict[i][2]))\n else:\n win_count[wins] = [(i, player_dict[i][0],\n wins, player_dict[i][2])]\n\n # compile output list\n output_list = []\n for i in sorted(win_count.keys(), reverse=True):\n for j in win_count[i]:\n output_list.append(j)\n\n return output_list", "def play_n_game(n_games, n_toss):\n results_list = []\n for _ in range(n_games):\n results_list.append(play_one_game(n_toss))\n dict_proba = {}\n for j in range (n_toss + 1):\n if results_list.count(j) != 0:\n dict_proba[j] = results_list.count(j)/n_games\n else:\n continue\n return dict_proba", "def print_outcomes(sim_output, strategy_name):\n\n # mean and confidence interval text of game reward\n reward_mean_CI_text = Format.format_estimate_interval(\n estimate=sim_output.get_ave_reward(),\n interval=sim_output.get_CI_reward(alpha=P.alpha),\n deci=1)\n\n # print game reward statistics\n print(strategy_name)\n print(\" Estimate of the mean game reward and {:.{prec}%} confidence interval:\".format(1 - P.alpha, prec=0),\n reward_mean_CI_text)", "def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))", "def evaluateWinner(self):\n\t\tif self.pots[-1] == 0:\n\t\t\tself.pots.pop()\n\t\tlivePlayers = self.getLivePlayers()\t\n\t\tfor i in range(len(self.pots)):\n\t\t\tplayers = self.getPlayersInPot(i, livePlayers)\n\t\t\tevaluations = []\n\t\t\tfor x in players:\n\t\t\t\tcombined = x.hand + self.communityCards\n\t\t\t\tevaluations.append((x, self.evaluator.getRankOfSeven(\tcombined[0], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[1], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[2], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[3], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[4], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[5], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[6] )))\n\t\t\twinners = self.getWinners(evaluations, i)\n\t\t\tself.handOutMoney(winners, i)\n\t\t\tself.potwinQ.append(winners[0].name)", "def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()" ]
[ "0.6959197", "0.69120795", "0.6784505", "0.6746052", "0.6450407", "0.63989484", "0.6303797", "0.62905324", "0.6278678", "0.62770176", "0.6225944", "0.6218578", "0.62119985", "0.61761725", "0.61730874", "0.617251", "0.61612713", "0.61560404", "0.61399037", "0.61386585", "0.61249965", "0.6104419", "0.61025745", "0.60881394", "0.60770327", "0.605299", "0.60470366", "0.6046694", "0.6020354", "0.60144436" ]
0.7144467
0
Calculate all primes up to limit using the sieve of Eratosthenes method
def eratosthenes(limit): if isinstance(limit, (int, float)) and limit == int(limit): limit = int(limit) else: raise ValueError primes = [] mask = [1]*(limit+1) for i in range(2, limit+1): if mask[i]: primes.append(i) for j in range(i*i, limit+1, i): mask[j] = 0 return np.asarray(primes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes", "def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)", "def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1", "def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)", "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def sieve(upto):\n return list(prime_numbers(upto))", "def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res", "def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes", "def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]", "def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]", "def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]", "def sieve(max):\n\tprimes = [False]*max\n\tfor i in range(2, int(math.sqrt(len(primes)))):\n\t\tif primes[i] == False:\n\t\t\tfor j in range(i*i, max, i):\n\t\t\t\tprimes[j] = True\n\tcount = 0\n\tprint(\"Prime numbers under \", max, \":\", sep='')\n\tfor j in range(2, max):\n\t\tif primes[j] == False:\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tprint(j)\n\t\t\telse:\n\t\t\t\tprint(j, end='\\t')\n\tprint()", "def main():\n limit = 1000\n max_primes = 0\n max_b, max_c = 0, 0\n is_prime = sieve_of_eratosthenes_bool(limit * 100)\n primes = sieve_of_eratosthenes(limit)\n for c in primes:\n for b in range(-c, limit, 2):\n for n in count(1):\n res = n * n + b * n + c\n if res < 1 or not is_prime[res]:\n if max_primes < n:\n max_primes = n\n max_b, max_c = b, c\n print(max_primes, max_b, max_c, end='\\n')\n break\n print(max_b, max_c, max_b * max_c)", "def get_primes(n):\n\n return list(primes_sieve(n))", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False", "def sieve_of_eratosthenes(n: int) -> List[int]:\n\n prime = [True for i in range(n+1)] #initiate array named prime with all value True, ie everynumber [0,n] are prime\n p = 2\n while (p * p <= n):\n # If prime[p] is not\n # changed, then it is a prime\n if (prime[p] == True): #if any number is prime then its multiple must be composite\n # Update all multiples of p to be not prime \n for i in range(p * p, n+1, p):\n prime[i] = False\n p += 1\n\n\n '''\n till here the status of code is:\n 0:prime\n 1:prime\n 2:prime\n 3:prime\n 5:prime\n 7:prime\n 11:prime\n .\n .\n .\n\n But 0 and 1 are not prime, so we will have to count numbers from 2\n '''\n\n return [i for i, p in enumerate(prime[2:], 2) if p]", "def eratosthenes(x):\n multiples = []\n for i in range(2, x+1):\n if i not in multiples:\n print (i)\n for j in range(i*i, x+1, i):\n multiples.append(j)", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes", "def seive_of_eratosthenes(n):\n sieve = [ True for i in range(n+1) ]\n def markOff(pv):\n for i in range(pv+pv, n+1, pv):\n sieve[i] = False\n markOff(2)\n for i in range(3, n+1):\n if sieve[i]:\n markOff(i)\n return [ i for i in range(2, n+1) if sieve[i] ]" ]
[ "0.83246547", "0.81167096", "0.7945687", "0.76976126", "0.7671113", "0.7646691", "0.7636615", "0.75908536", "0.75533134", "0.75512606", "0.7477832", "0.7464496", "0.7456727", "0.73623115", "0.7354597", "0.733797", "0.73195255", "0.7304559", "0.72923464", "0.72923464", "0.7290933", "0.72840416", "0.7281324", "0.7275013", "0.727279", "0.7262254", "0.72617006", "0.7255621", "0.724308", "0.7227427" ]
0.8271184
1
Calculate all primes up to limit using the sieve of Eratosthenes method while conserving memory
def eratosthenes_mem(limit): if isinstance(limit, (int, float)) and limit == int(limit): limit = int(limit) else: raise ValueError primes = [2] multiples = [2] limit += 1 for candidate in range(3, limit): if candidate not in multiples: primes.append(candidate) multiples.append(2*candidate) for i, m in enumerate(multiples): if m <= candidate: multiples[i] += primes[i] return np.asarray(primes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes", "def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)", "def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1", "def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)", "def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes", "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def sieve(upto):\n return list(prime_numbers(upto))", "def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)", "def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes", "def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res", "def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]", "def get_primes(n):\n\n return list(primes_sieve(n))", "def main():\n limit = 1000\n max_primes = 0\n max_b, max_c = 0, 0\n is_prime = sieve_of_eratosthenes_bool(limit * 100)\n primes = sieve_of_eratosthenes(limit)\n for c in primes:\n for b in range(-c, limit, 2):\n for n in count(1):\n res = n * n + b * n + c\n if res < 1 or not is_prime[res]:\n if max_primes < n:\n max_primes = n\n max_b, max_c = b, c\n print(max_primes, max_b, max_c, end='\\n')\n break\n print(max_b, max_c, max_b * max_c)", "def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]", "def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False", "def sieve(max):\n\tprimes = [False]*max\n\tfor i in range(2, int(math.sqrt(len(primes)))):\n\t\tif primes[i] == False:\n\t\t\tfor j in range(i*i, max, i):\n\t\t\t\tprimes[j] = True\n\tcount = 0\n\tprint(\"Prime numbers under \", max, \":\", sep='')\n\tfor j in range(2, max):\n\t\tif primes[j] == False:\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tprint(j)\n\t\t\telse:\n\t\t\t\tprint(j, end='\\t')\n\tprint()", "def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]", "def eratosthenes(x):\n multiples = []\n for i in range(2, x+1):\n if i not in multiples:\n print (i)\n for j in range(i*i, x+1, i):\n multiples.append(j)", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result", "def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n", "def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]", "def seive_of_eratosthenes(n):\n sieve = [ True for i in range(n+1) ]\n def markOff(pv):\n for i in range(pv+pv, n+1, pv):\n sieve[i] = False\n markOff(2)\n for i in range(3, n+1):\n if sieve[i]:\n markOff(i)\n return [ i for i in range(2, n+1) if sieve[i] ]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True]*n\n for p in range(2, n):\n if sieve[p]:\n yield p\n for i in range(p*p, n, p):\n sieve[i] = False" ]
[ "0.8264546", "0.8237874", "0.78761345", "0.768567", "0.76232296", "0.76230603", "0.7604848", "0.75785387", "0.7548938", "0.7480768", "0.74737644", "0.7463756", "0.7346134", "0.7335119", "0.7326132", "0.7315538", "0.7275974", "0.7252524", "0.7247536", "0.7241287", "0.7239895", "0.7208653", "0.71955127", "0.7193224", "0.71929246", "0.71851695", "0.7184413", "0.71654445", "0.71654445", "0.7163752" ]
0.8354203
0
Plots primes up to n using func(n)
def prime_dist(func, n): x = func(n) y = list(range(len(x))) for i in range(len(x)): y[i] = (i+1)*np.log(x[i]) fig = plt.figure() ax = fig.add_subplot(111) ax.set_ylabel(r'k $\cdot$ log(p$_k$)') ax.set_xlabel(r'p$_k$') plt.suptitle('Distribution of primes', fontsize=15) ax.plot(x, y, lw=2, color='#FE4365') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_sieve(n, x, poly={}, lin={}, label=True, shade=True):\n v = list(range(x + 1)) # integers 0, 1, ..., x\n if n == 0:\n v = prime_range(x)\n else:\n for p in prime_divisors(n):\n v = [k for k in v if k % p != 0 or k == p]\n # eliminate non-prime multiples of p\n v = set(v)\n j = 0\n w = [(0, j)]\n for i in range(1, x + 1):\n w.append((i, j))\n if i in v:\n j += 1\n w.append((i, j))\n w.append((i, 0))\n w.append((0, 0))\n if n == 0:\n t = \"Primes\"\n pos = x, .7 * j\n elif n == 1:\n t = \"All Numbers\"\n pos = x, 1.03 * j\n else:\n P = prime_divisors(n)\n if len(P) == 1:\n t = \"Sieve by %s\" % P[0]\n else:\n t = \"Sieve by %s\" % (', '.join([str(_) for _ in P]))\n pos = x, 1.05 * j\n F = line(w[:-2], **lin)\n if shade:\n F += polygon(w, **poly)\n if label:\n F += text(t, pos, horizontal_alignment=\"right\", rgbcolor='black')\n return F", "def get_primes(n):\n\n return list(primes_sieve(n))", "def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list", "def primfact(e):\n for n in range(2, e):\n for x in range(2, n):\n if n % x == 0:\n break\n else:\n print n,", "def n_primes(n):\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,\n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,\n 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,\n 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,\n 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,\n 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,\n 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,\n 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,\n 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,\n 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,\n 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,\n 953, 967, 971, 977, 983, 991, 997][:n]\n\n if len(primes) < n:\n big_number = 2000\n while 'Not enough primes':\n primes = primes_from_2_to(big_number)[:n]\n if len(primes) == n:\n break\n big_number += 1000\n\n return primes", "def palin_primes(n, m):\r\n if n == m:\r\n if is_palin(str(m)):\r\n if is_prime(m, i_unchanging):\r\n print(m)\r\n\r\n elif is_palin(str(n)):\r\n if is_prime(n, i_unchanging):\r\n print(str(n))\r\n palin_primes(n + 1, m)\r\n else:\r\n palin_primes(n + 1, m)\r\n else:\r\n palin_primes(n + 1, m)", "def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]", "def proportion_of_primes(bound, **args):\n v = []\n k = 0.0\n for n in range(1, bound + 1):\n if is_prime(n):\n k += 1\n v.append((n, k / n))\n return plot_step_function(v, **args)", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def list_primes(n):\n primeList = []\n for i in range(n):\n if is_prime(i):\n primeList.append(i)\n return primeList", "def get_n_primes(n):\n\n primes = [' ']\n num = 2\n while len(primes) < n + 1:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def primes(n):\n sqrtN=n**0.5\n odds=[2]\n odds+=[i for i in range(3,n) if i%2>0]\n\n for i in odds:\n if i!=0 and i<=sqrtN:\n for j in odds[odds.index(i)+1:]:\n if j%i==0:\n odds[odds.index(j)]=0\n return [i for i in odds if i!=0]", "def primes(n, DEBUG=False):\n\n return [x[0] for x in enumerate(_sieve(n, DEBUG=DEBUG)[0:n+1]) if x[1]]", "def print_next_prime(number):\n index = number\n while True:\n index += 1\n if is_prime(index):\n print(index)", "def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def primes(n):\n sieve = [True]*n\n for p in range(2, n):\n if sieve[p]:\n yield p\n for i in range(p*p, n, p):\n sieve[i] = False", "def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf", "def evansPrimes(n):\n assert n>1\n primes = []\n for i in range(1,n+1):\n sums = 0\n for j in range(1,i):\n sums += evansMod(i,j)*j\n if sums == 1:\n primes.append(i)\n #print(primes) #for testing only\n return primes", "def prime_factorization(n):\n\t\n\tprimes = []\n\t\n\twhile not n % 2:\n\t\tprimes.append(2)\n\t\tn //= 2\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\twhile not n % possible_factor:\n\t\t\tprimes.append(i)\n\t\t\tn //= possible_factor\n\t\n\tif n > 1:\n\t\tprimes.append(n)\n\treturn primes", "def primeGen(n):\n primes = [2, 3, 5, 7, 11]\n if n in xrange(1, len(primes) + 1):\n return primes[:n]\n else:\n banlist = []\n count = 6\n while count <= n:\n Next = (primes[-2] + primes[-1]) - primes[-3]\n if not is_prime(Next):\n count -= 1\n banlist.append(Next)\n count += 1\n primes.append(Next)\n filterout(banlist, primes)\n return primes", "def primepower_dots(xmin, xmax, fontsize=7, drop=10):\n g = Graphics()\n for n in range(max(xmin, 2), ceil(xmax) + 1):\n F = factor(n)\n if len(F) == 1:\n g += point((n, 0), pointsize=50 * log(F[0][0]), rgbcolor=(1, 0, 0))\n if fontsize > 0:\n g += text(str(n), (n, -drop), fontsize=fontsize, rgbcolor='black')\n g.xmin(xmin)\n g.xmax(xmax)\n return g", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def primes(count):\n\n prime_numbers = [2]\n next_num = 3 \n\n def is_prime(next_num):\n if next_num % 2 == 0:\n return False \n \n for i in range(3, next_num, 2):\n if next_num % i == 0:\n return False \n return True \n\n while count > len(prime_numbers): \n if is_prime(next_num): \n prime_numbers.append(next_num)\n next_num += 1\n\n return prime_numbers" ]
[ "0.71396583", "0.6347114", "0.6325962", "0.62111086", "0.62088567", "0.6179862", "0.61741006", "0.61727583", "0.6157313", "0.615701", "0.61407703", "0.6134305", "0.61313236", "0.61262596", "0.61203164", "0.611937", "0.611937", "0.60875714", "0.5940775", "0.59295005", "0.5918219", "0.59084845", "0.5907307", "0.5883462", "0.588033", "0.58710575", "0.5867615", "0.58650446", "0.5855541", "0.5851055" ]
0.76307726
0
This method is in charge of loading the VTK file in order to obtain an unprojected set of particle positions
def loadVTK(self, filename, folder): import vtk print('Extracting Dataset') start = time.time() reader = vtk.vtkPolyDataReader() reader.SetFileName(folder + filename) reader.Update() polydata = reader.GetOutput() n = polydata.GetNumberOfPoints() self.data = np.array([0, 0, 0]) for i in range(0, n, 1): vraw = list(polydata.GetPoint(i)) inRange = np.all([vraw[0] > self.ranges[0,0], vraw[0] < self.ranges[0,1], vraw[1] > self.ranges[1,0], vraw[1] < self.ranges[1,1], vraw[2] > self.ranges[2,0], vraw[2] < self.ranges[2,1]]) if inRange: self.data = np.vstack((self.data, np.array(vraw))) if i % 50000 == 0: print(' Out of the ' + str(n) + ' particles in the dataset, ' + str(i) + ' (' + str(round(i*100/n, 3)) + ' %) have been processed, and ' + str(len(self.data) - 1) + ' have been stored.') self.data = self.data[1:, :] rangeStr = '_x[' + str(self.ranges[0,0]) + ',' + str(self.ranges[0,1]) + ']_y[' + str(self.ranges[1,0]) + ',' + str(self.ranges[1,1]) + ']_z[' + str(self.ranges[1,0]) + ',' + str(self.ranges[1,1]) + '].npy' np.save(folder + 'VoronoiData' + rangeStr, self.data) print('Elapsed Time: ' + str(round(time.time() - start, 3)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_particle_ic(self, file_name):\n \n\n data = np.genfromtxt(file_name, names = True)\n\n self.N_part = np.size(data['x'])\n\n self.pos = np.array([data['x'], data['y'], data['z']])\n self.pos = self.pos.T.reshape(self.N_part,3)\n self.vel = np.array([data['vx'], data['vy'], data['vz']])\n self.vel = self.vel.T.reshape(self.N_part,3)\n \n self.M_part = data['m'][0] # assuming all particles have same mass\n\n _my_print('loaded %6i particles from '%(self.N_part) + file_name)\n return", "def read_groups_particles(filename):\n \n f = open(filename,'r')\n\n Ntot = fromstring(f.read(4),int32)[0]\n Pos\t = fromstring(f.read(3*4*Ntot),float32)\n Pos.shape = (Ntot,3)\n f.close()\n \n return Pos", "def load_n3d_coords(file_path): \n \n import core.nuc_io as io\n\n seq_pos_dict = {}\n coords_dict = {} \n \n with io.open_file(file_path) as file_obj:\n chromo = None\n \n for line in file_obj:\n \n data = line.split()\n n_items = len(data)\n \n if not n_items:\n continue\n \n elif data[0] == '#':\n continue\n \n elif n_items == 3:\n chromo, n_coords, n_models = data\n \n #if chromo.lower()[:3] == 'chr':\n # chromo = chromo[3:]\n \n if chromo in coords_dict:\n raise Exception('Duplicate chromosome \"%s\" records in file %s' % (chromo, file_path))\n \n n_coords = int(n_coords)\n n_models = int(n_models)\n \n chromo_seq_pos = np.empty(n_coords, int)\n chromo_coords = np.empty((n_models, n_coords, 3), float)\n \n coords_dict[chromo] = chromo_coords\n seq_pos_dict[chromo] = chromo_seq_pos\n \n check = (n_models * 3) + 1\n i = 0\n \n elif not chromo:\n raise Exception('Missing chromosome record in file %s' % file_path)\n \n elif n_items != check:\n msg = 'Data size in file %s does not match Position + Models * Positions * 3'\n raise Exception(msg % file_path)\n \n else:\n chromo_seq_pos[i] = int(data[0])\n \n coord = [float(x) for x in data[1:]]\n coord = np.array(coord).reshape(n_models, 3)\n chromo_coords[:,i] = coord\n i += 1\n \n return seq_pos_dict, coords_dict", "def LoadSphere():\n return vtkInterface.PolyData(spherefile)", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def vaex_vertices_from_plyfile(filename):\n xyz = vertex_dict_from_plyfile(filename)\n return vx.from_dict(xyz)", "def import_gpos( self ):\n if not path.isfile(self.from_file):\n # no file there\n self.gpos_file = array([], 'd')\n return\n import xml.dom.minidom\n doc = xml.dom.minidom.parse(self.from_file)\n names = []\n xyz = []\n for el in doc.getElementsByTagName('pos'):\n names.append(el.getAttribute('subgrid'))\n xyz.append(list(map(lambda a : float(el.getAttribute(a)), 'xyz')))\n self.gpos_file = array(xyz, 'd').swapaxes(0, 1)\n self.subgrids = array(names)", "def readXYZPos(self,phys,xyzname):\r\n XYZReader.XYZReader(self.checkPath(xyzname)).read(phys.myXYZ)\r\n\tphys.posvec.resize(phys.myXYZ.coords.size())\r\n for ii in range(0, phys.myXYZ.coords.size()*3, 3):\r\n phys.positions[ii] = phys.myXYZ.coords[ii]\r\n phys.positions[ii+1] = phys.myXYZ.coords[ii+1]\r\n phys.positions[ii+2] = phys.myXYZ.coords[ii+2]", "def read_ply_xyz(filename):\n assert(os.path.isfile(filename))\n with open(filename, 'rb') as f:\n plydata = PlyData.read(f)\n num_verts = plydata['vertex'].count\n vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)\n vertices[:,0] = plydata['vertex'].data['x']\n vertices[:,1] = plydata['vertex'].data['y']\n vertices[:,2] = plydata['vertex'].data['z']\n return vertices", "def read_positions():\n return np.genfromtxt(\"POSITIONS.OUT\").transpose()", "def loadVtk(self, fname):\n\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(filename)\n reader.Update()\n self._vtk = reader.GetOutput()", "def read_xyz_file(filename, num_spatial_dimensions):\n print(\"Reading data from XYZ file.\")\n\n particle_positions = []\n frame_number = 0\n line_number = 0\n frame_particles = 0\n with open(filename, 'r') as input_file:\n for line in input_file:\n if line_number == 0:\n # Check for blank line at end of file\n if line != \"\":\n frame_particles = int(line)\n particle_positions.append(np.zeros((frame_particles, num_spatial_dimensions)))\n elif line_number == 1:\n pass\n else:\n for dimension in range(num_spatial_dimensions):\n particle_positions[frame_number][line_number-2][dimension] = line.split()[1:][dimension]\n line_number += 1\n # If we have reached the last particle in the frame, reset counter for next frame\n if line_number == (frame_particles + 2):\n line_number = 0\n frame_number += 1\n\n print(\"XYZ read complete.\")\n\n return particle_positions", "def make_from_file(filehandle):\n lines = filehandle.readlines()\n label = str(lines[0].rstrip('\\n'))\n mass = float(lines[1].rstrip('\\n'))\n position = list(lines[2].rstrip('\\n').split(','))\n velocity = list(lines[3].rstrip('\\n').split(','))\n particle = Particle3D(label=label, mass=mass, position=position, velocity=velocity)\n filehandle.close()\n return particle", "def load_point_cloud(filename, min_norm_normal=1e-5, dtype=torch.float64):\n v, _, n = pcu.load_mesh_vfn(filename, dtype=np.float64)\n v, idx, _ = pcu.deduplicate_point_cloud(v, 1e-15, return_index=True) # Deduplicate point cloud when loading it\n n = n[idx]\n\n # Some meshes have non unit normals, so build a binary mask of points whose normal has a reasonable magnitude\n # We use this mask to remove bad vertices\n mask = np.linalg.norm(n, axis=-1) > min_norm_normal\n\n # Keep the good points and normals\n x = v[mask].astype(np.float64)\n n = n[mask].astype(np.float64)\n n /= np.linalg.norm(n, axis=-1, keepdims=True)\n\n return torch.from_numpy(x).to(dtype), torch.from_numpy(n).to(dtype)", "def load_velodyne_points(filename):\n points = np.fromfile(filename, dtype=np.float32).reshape(-1, 4)\n points[:, 3] = 1.0 # homogeneous\n return points", "def read_texture_file(filename):\n \n # Deal with compressed files.\n import os\n if (os.path.splitext(filename)[1] == '.gz'):\n import gzip\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'r')\n\n # Stuff everything into a dict and a list\n # for now. Sort this out later (we will probably \n # want to have objects at some point\n header_data = {}\n particles = []\n\n header_lines = 5\n particle_header_lines = 9\n \n for line in f:\n if header_lines == 5:\n header_data['theia_lun'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 4:\n header_data['npartsallo'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 3:\n header_data['npartsused'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 2:\n header_data['n_expected_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 1:\n header_data['nseen_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 0:\n if particle_header_lines == 9:\n this_particle = {}\n this_particle['process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 8:\n this_particle['particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 7:\n this_particle['old_particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 6:\n this_particle['old_process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 5:\n this_particle['particle_class'] = line.strip()\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 4:\n this_particle['particle_position'] = np.array(\n [line[0:12], line[12:24], line[24:36]])\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 3:\n this_particle['idata_count'] = int(line)\n if this_particle['idata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particle_header_lines = particle_header_lines - 2\n elif particle_header_lines == 2:\n this_particle['particle_idata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+12] for i in xrange(0, len(line.rstrip('\\r\\n')), 12)]\n )\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 1:\n this_particle['rdata_count'] = int(line)\n if this_particle['rdata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particles.append(this_particle)\n particle_header_lines = 9\n elif particle_header_lines == 0:\n this_particle['particle_rdata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+14] for i in xrange(0, len(line.rstrip('\\r\\n')), 14)]\n )\n particles.append(this_particle)\n particle_header_lines = 9\n f.close()\n\n return header_data, particles", "def Load_PositionFile(position_filename):\n positions = pd.read_table(position_filename, delimiter=',', header=None)\n positions.columns = ['x','y']\n return positions", "def loadpts_traj(tnum, skip=40, filt=None):\n pts = []\n print('loading file: ', tnum)\n traj = md.load(DCD_PROT(tnum), top=PDB_PROT, stride=skip)\n if filt is not None:\n traj.atom_slice(filt, inplace=True)\n return traj.xyz", "def loadNodes(self, fname):\r\n with open(fname, \"r\") as fp:\r\n\r\n # Read in the header\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"SPECGRID\":\r\n self.SPECGRID = np.array(fp.readline().split()[0:3], dtype=int)\r\n if item[0] == \"COORDSYS\":\r\n self.COORDSYS = fp.readline().split()\r\n if item[0] == \"COORD\":\r\n break\r\n\r\n # Read in the coordinates\r\n self.coords = []\r\n for line in fp:\r\n if line.split()[-1] != \"/\":\r\n item = line.split()\r\n for c in item:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n self.coords.append(cc[-1])\r\n else:\r\n self.coords.append(c)\r\n else:\r\n if len(line.split()) > 1:\r\n item = line.split()\r\n for i in range(len(item) - 1):\r\n cc = item[i]\r\n if '*' in cc:\r\n ccc = cc.split('*')\r\n for j in range(int(ccc[0])):\r\n self.coords.append(ccc[-1])\r\n else:\r\n self.coords.append(c)\r\n break\r\n else:\r\n break\r\n\r\n # Read in ZCORN\r\n self.zcorn = []\r\n i = 0\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ZCORN\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n self.zcorn += line.split()\r\n else:\r\n self.zcorn += line.split()[0:-1]\r\n break\r\n if len(self.zcorn) > 0:\r\n break\r\n\r\n # Read in (in)active cells\r\n self.active = []\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ACTNUM\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n c = line.split()\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(float(cc[0])):\r\n self.active += cc[-1]\r\n else:\r\n self.active += c\r\n else:\r\n self.active += line.split()[0:-1]\r\n break\r\n\r\n self.coords = np.array(self.coords, dtype=float)\r\n print(self.coords)\r\n\r\n # In Petrel...\r\n self.ne = self.SPECGRID[0] # x i\r\n self.nn = self.SPECGRID[1] # y j\r\n self.nz = self.SPECGRID[2] # z k\r\n\r\n # build grid\r\n self.buildGrid(plot=False)\r\n self.buildActiveCells(plot=False)\r\n self.buildZGrid(plot=False)\r\n # self.calculateVolumes(plot=False)\r\n #\r\n # Convert to VTK\r\n self.GridType = \"vtkStructuredGrid\"\r\n self.Grid = vtk.vtkStructuredGrid()\r\n self.Grid.SetDimensions(self.ne+1, self.nn+1, self.nz+1)\r\n vtk_points = vtk.vtkPoints()\r\n ve = 1.\r\n\r\n for iz in range(self.nz):\r\n if iz == 0:\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZT[iz][ix,iy] )\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZB[iz][ix,iy] )\r\n self.Grid.SetPoints(vtk_points)\r\n\r\n # Add in active cells\r\n ac = vtk.vtkIntArray()\r\n ac.SetName( \"ActiveCells\" )\r\n for iac in self.ActiveCells.flatten( order='F' ):\r\n ac.InsertNextTuple1( iac )\r\n self.Grid.GetCellData().AddArray(ac)", "def load_poses(self):\n print('Loading poses for sequence ' + self.sequence + '...')\n\n pose_file = os.path.join(self.pose_path, self.sequence + '.txt')\n\n # Read and parse the poses\n try:\n self.T_w_cam0 = []\n with open(pose_file, 'r') as f:\n for line in f.readlines():\n T = np.fromstring(line, dtype=float, sep=' ')\n T = T.reshape(3, 4)\n T = np.vstack((T, [0, 0, 0, 1]))\n self.T_w_cam0.append(T)\n print('done.')\n\n except FileNotFoundError:\n print('Ground truth poses are not avaialble for sequence ' +\n self.sequence + '.')", "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def cfdReadPointsFile(self):\r\n\r\n with open(self.pointsFile,\"r\") as fpid:\r\n \r\n print('Reading points file ...')\r\n points_x=[]\r\n points_y=[]\r\n points_z=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n self.numberOfNodes = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\"\")\r\n tline=tline.replace(\")\",\"\")\r\n tline=tline.split()\r\n \r\n points_x.append(float(tline[0]))\r\n points_y.append(float(tline[1]))\r\n points_z.append(float(tline[2]))\r\n \r\n ## (array) with the mesh point coordinates \r\n self.nodeCentroids = np.array((points_x, points_y, points_z), dtype=float).transpose()", "def load_from_planetoid_files(dataset_name, path):\n\n def _sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\n def _parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n def _load_file(name):\n \"\"\"Load from data file.\"\"\"\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)\n\n x = _load_file('x')\n y = _load_file('y')\n tx = _load_file('tx')\n ty = _load_file('ty')\n allx = _load_file('allx')\n ally = _load_file('ally')\n graph = _load_file('graph')\n\n filename = 'ind.{}.test.index'.format(dataset_name)\n filename = os.path.join(path, filename)\n test_idx_reorder = _parse_index_file(filename)\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_name == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph).\n # Find isolated nodes, add them as zero-vecs into the right position.\n test_idx_range_full = range(\n min(test_idx_reorder),\n max(test_idx_reorder) + 1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range - min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range - min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y) + 500)\n\n train_mask = _sample_mask(idx_train, labels.shape[0])\n val_mask = _sample_mask(idx_val, labels.shape[0])\n test_mask = _sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return (adj, features, y_train, y_val, y_test, train_mask, val_mask,\n test_mask, labels)", "def fill_3d_gt_poses(dataset_file, train_set):\n with open(dataset_file, 'rb') as handle:\n dataset = pickle.load(handle)\n\n if train_set:\n subjects = subjects_train\n else:\n subjects = subjects_test\n\n for subject in subjects:\n data_path = Path('data/') / subject / 'MyPoseFeatures' / 'D3_Positions_mono'\n files = list(sorted(data_path.glob('*.cdf')))\n assert len(files) > 0 # something is wrong with data paths...\n for file in files:\n cdf_file = cdflib.CDF(file)\n poses_3d = cdf_file[0].squeeze()\n assert poses_3d.shape[1] == 96\n # select 17 joints:\n joints = [0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27]\n poses_3d = poses_3d.reshape(-1, 32, 3)[:, joints]\n poses_3d = poses_3d.swapaxes(1, 2).reshape(-1, 3*17)\n # select every 4th frame\n indices = np.arange(3, len(poses_3d), 4)\n poses_3d = poses_3d[indices, :]\n\n # extract action, subaction and cam from filename\n filename = str(file.stem)\n if ' ' in filename:\n action, rest_info = filename.split(\" \")\n else:\n action, rest_info = filename.split(\".\")\n\n # rename for consistency:\n # TakingPhoto -> Photo, WalkingDog -> WalkDog\n if action == 'TakingPhoto':\n action = 'Photo'\n if action == 'WalkingDog':\n action = 'WalkDog'\n\n # take care of inconsistent naming...\n if subject == 'S1':\n if action == 'Eating':\n # S1 Eating (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Sitting':\n # S1 Sitting (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'SittingDown':\n # S1 SittingDown (., 2)\n rest_info = fix_dot_2(rest_info)\n\n if subject == 'S5':\n if action == 'Directions':\n # S5 Directions (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Discussion':\n # S5 Discussion (2, 3)\n rest_info = fix_2_3(rest_info)\n if action == 'Greeting':\n # S5 Greeting (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Photo':\n # S5 Photo (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Waiting':\n # S5 Waiting (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S6':\n if action == 'Eating':\n # S6 Eating (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Posing':\n # S6 Posing (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Sitting':\n # S6 Sitting (1,2)\n rest_info = fix_1_2(rest_info)\n if action == 'Waiting':\n # S6 Waiting (., 3)\n rest_info = fix_dot_3(rest_info)\n\n if subject == 'S7':\n if action == 'Phoning':\n # S7 Phoning (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Waiting':\n # S7 Waiting (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Walking':\n # S7 Walking (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S8':\n if action == 'WalkTogether':\n # S8 WalkTogether (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S9':\n if action == 'Discussion':\n # S9 discussion (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S11':\n if action == 'Discussion':\n rest_info = fix_1_2(rest_info)\n if action == 'Greeting':\n # S11 greeting (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Phoning':\n # S11 phoning (2,3)\n rest_info = fix_2_3(rest_info)\n if action == 'Smoking':\n # S11 smoking (., 2)\n if '2.' in rest_info:\n # replace 2. with .\n rest_info = fix_dot_2(rest_info)\n\n assert rest_info[:2] == '1.' or '.' not in rest_info\n if '.' not in rest_info:\n subact = '0'\n cam = rest_info\n else:\n subact = '1'\n cam = rest_info.split('.')[-1]\n\n if subject == 'S5' and subact == '1' and action == 'Waiting' and cam == '55011271':\n continue\n if subject == 'S11' and subact == '0' and action == 'Directions' and cam == '54138969':\n continue\n\n used_frames = len(dataset[subject][action][subact][cam]['imgpath'])\n assert used_frames <= len(poses_3d)\n poses_3d = poses_3d[:used_frames]\n dataset[subject][action][subact][cam]['3d_gt'] = poses_3d\n\n if train_set:\n out_file = c.train_file\n else:\n out_file = c.test_file\n with open(out_file, 'wb') as handle:\n pickle.dump(dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def InterpolateSurfaceVectors():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(Centroids1,Vectors1,Centroids2)\r\n # Make the data sparser to display better.\r\n C1,V1 = SparseData(Centroids1,Vectors1,0.2)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.2)\r\n\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/NormalVectorInterpolation.dat\",Vectors2,header = header,comments='')", "def read(self):\n stable_poses = []\n f = open(self.filepath_, \"r\")\n data = [line.split() for line in f]\n for i in range(len(data)):\n if len(data[i]) > 0 and data[i][0] == \"p\":\n p = float(data[i][1])\n r = [[data[i+1][1], data[i+1][2], data[i+1][3]], [data[i+2][0], data[i+2][1],\n data[i+2][2]], [data[i+3][0], data[i+3][1], data[i+3][2]]]\n r = np.array(r).astype(np.float64)\n x0 = np.array([data[i+4][1], data[i+4][2], data[i+4][3]]).astype(np.float64)\n stable_poses.append(sp.StablePose(p, r, x0))\n return stable_poses", "def prepare_data(self, file):\n maps = np.load(file)\n pred, gt, not_care = maps[-3:]\n return self.get_polygon(pred), self.get_polygon(gt), self.get_polygon(not_care)", "def load_point_cloud(self, filename):\n f = sio.loadmat(filename)\n data = f['blob'][:]\n data -= np.mean(data, 0)\n data /= np.amax(abs(data))\n label = DataHandler.get_label_from_filename(filename)\n if self.use_softmax:\n l = np.zeros([2])\n l[label] = 1\n label = l\n return data, label", "def onevtkfile():\n basedir = '/home/amit/WorkSpace/UCLA/simulations/PhaseDiagram/RawData'\n with hp.File('VTKFile.h5', 'w') as onefile:\n allvtk = np.empty((600, 500, 3, 216), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n vtkfilepath = '{}/Run{}/VTKFile-{}.h5'.format(basedir, i, j+1)\n with hp.File(vtkfilepath, 'r') as vtkfile:\n for t in range(500):\n allvtk[j, t, i, :] = vtkfile['T{}/Points'.format(2*t)][:].ravel()\n onefile.create_dataset('Points', data=allvtk, chunks=(1, 50, 3, 216), \n compression='gzip', compression_opts=9)", "def _load_serialized_mesh(filename):\n print 'Loading mesh data from NPZ file', filename\n npzfile = np.load(filename)\n\n k = npzfile['k'].item()\n initial_point = npzfile['initial_point']\n initial_face_index = npzfile['initial_face_index'].item()\n\n all_vertices = npzfile['all_vertices']\n triangles = npzfile['triangles']\n face_local_bases = npzfile['face_local_bases']\n neighbor_faces = npzfile['neighbor_faces']\n\n return [k, initial_point, initial_face_index,\n all_vertices, triangles, face_local_bases, neighbor_faces]" ]
[ "0.6929919", "0.62454665", "0.6166364", "0.61089855", "0.6038509", "0.5972155", "0.594291", "0.58736014", "0.5779551", "0.5759098", "0.57093155", "0.57034266", "0.57004243", "0.56888515", "0.56257796", "0.5618636", "0.56165904", "0.5567601", "0.5542718", "0.55128044", "0.55081797", "0.5503673", "0.55031824", "0.5475489", "0.5465841", "0.5436384", "0.54043716", "0.53981173", "0.53885883", "0.53831434" ]
0.6832213
1