query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
The function is meant to make use of the sqoop export functionality to export data from hive to mysql db. | def hive_to_mysql(hive_conn, username, password, host, port, export_dir,
table_name):
# the input fields terminated by parameter is to specify
os.system("sqoop export --connect jdbc:mysql://{0}:{1}/hive --username " \
"{2} --password {3} --table {4} --export-dir {5} --input-fields-terminated-by " \
"'\001'".format(host,port,username,password,table_name,export_dir)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(dest_dir, db_host, db_port, db_name, db_schema, db_username, db_password, ssl_mode,\n force, cores, memory_per_core, default_partition_col, partition_col,\n nr_partitions):\n partition_col_dict = {k: v for k, v in partition_col}\n nr_partitions_dict = {k: v for k, v in nr_partitions}\n\n dest_dir_path = Path(dest_dir)\n dest_dir_path.mkdir(exist_ok=True, parents=True)\n\n db_params = PostgresDBParams(user=db_username, host=db_host, password=db_password,\n port=db_port, db=db_name, schema=db_schema, ssl_mode=ssl_mode)\n\n with PostgresDBConnectionWrapper(db_params) as db_wrapper:\n tables = db_wrapper.list_tables()\n\n spark_cfg = spark_wrapper.default_spark_config(cores, memory_per_core, use_utc=True)\n with spark_wrapper.create_spark_session_from_config(spark_cfg) as spark:\n dumper = PostgresTableDumper(db_params, spark)\n for t in tables:\n logging.info('Dumping table %s', t)\n\n tbl_path = Path(dest_dir_path, t)\n\n if not tbl_path.exists() and not force:\n default_col = None\n\n if default_partition_col:\n cols = db_wrapper.list_columns(t)\n if default_partition_col in cols:\n default_col = default_partition_col\n else:\n logging.warning(\n \"Default partition column %s not found among columns [%s]\",\n default_partition_col, ','.join(cols))\n\n p_col = partition_col_dict.get(t, default_col)\n nr_part = nr_partitions_dict.get(t, None)\n\n dumper.dump_table(t, tbl_path, p_col, nr_part)\n else:\n logging.info('Path %s already exists, not dumping table %s',\n tbl_path, t)\n\n counts_match = row_counts_match(tbl_path, t, db_wrapper, spark)\n\n if counts_match:\n logging.info(\"Counts for %s match\", t)\n else:\n logging.error(\"Counts for %s don't match\", t)",
"def exportTable(self, localHDFSpath, hiveDB, hiveTable):\n\t\tlogging.debug(\"Executing copy_operations.exportTable()\")\n\n\t\tif localHDFSpath == None:\n\t\t\tlogging.error(\"You need to specify a local HDFS path\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\t\tif hiveDB == None: hiveDB = self.Hive_DB\n\t\tif hiveTable == None: hiveTable = self.Hive_Table\n\n\t\tlocalHDFSpath = (localHDFSpath + \"/\"+ hiveDB + \"/\" + hiveTable).replace('$', '').replace(' ', '')\n#\t\tremoteHDFSpath = (remoteHDFSpath + \"/\"+ hiveDB + \"/\" + hiveTable).replace('$', '').replace(' ', '')\n\n\t\tlogging.info(\"Deleting local HDFS directory before export\")\n\n\t\thdfsDeleteCommand = [\"hdfs\", \"dfs\", \"-rm\", \"-r\", \"-skipTrash\", localHDFSpath]\n\t\tsh_session = subprocess.Popen(hdfsDeleteCommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\t\thdfsDeleteoutput = \"\"\n\n\t\t# Print Stdout and stderr while distcp is running\n\t\twhile sh_session.poll() == None:\n\t\t\trow = sh_session.stdout.readline().decode('utf-8').rstrip()\n\t\t\tif row != \"\" and \"No such file or directory\" not in row:\n\t\t\t\tlogging.info(row)\n\t\t\t\thdfsDeleteoutput += row + \"\\n\"\n\t\t\t\tsys.stdout.flush()\n\n\t\t# Print what is left in output after distcp is finished\n\t\tfor row in sh_session.stdout.readlines():\n\t\t\trow = row.decode('utf-8').rstrip()\n\t\t\tif row != \"\" and \"No such file or directory\" not in row:\n\t\t\t\tlogging.info(row)\n\t\t\t\thdfsDeleteoutput += row + \"\\n\"\n\t\t\t\tsys.stdout.flush()\n\n\t\tself.common_operations.connectToHive(forceSkipTest=True)\n\n\t\tlogging.info(\"Exporting table\")\t\n#\t\tquery = \"export table `%s`.`%s` to '%s'\"%(hiveDB, hiveTable, localHDFSpath)\n\t\tquery = \"export table %s.%s to '%s'\"%(hiveDB, hiveTable, localHDFSpath)\n\t\tself.common_operations.executeHiveQuery(query)\n\n\t\tlogging.debug(\"Executing copy_operations.exportTable() - Finished\")",
"def export_table_to_cloudstorage(fc,description,fileNamePrefix):\n \n task = ee.batch.Export.table.toCloudStorage(\n collection = ee.FeatureCollection(fc),\n description = description,\n bucket = GCS_BUCKET,\n fileNamePrefix = GCS_OUTPUT_PATH + fileNamePrefix,\n fileFormat = \"CSV\"\n )\n task.start()",
"def do_export_schema(self):\n export_schema = self.get_arg_value(\"export_schema\")\n\n if export_schema:\n row = {\"schemas\": self.final_schemas}\n self.write_rows(rows=row)\n del row",
"def export_db(self, export_location: Path) -> None:\n raise NotImplementedError",
"def exportTable(self, tablename, fileformat, path, filename, cols=None):\n\n # Path to the output file\n fpath = os.path.join(path, filename)\n\n # Read data:\n if cols is list:\n options = ','.join(cols)\n else:\n options = cols\n\n df = self.readDBtable(tablename, selectOptions=options)\n\n # ######################\n # Export results to file\n if fileformat == 'pkl':\n df.to_pickle(fpath)\n\n else:\n df.to_excel(fpath)\n\n return",
"def writeToJDBC(df,tableName,spark):\n #df.table(tableName).write.jdbc(config.jdbcUrl,tableName,config.connectionProperties)\n #df = df.na.fill(0)\n mode= \"overwrite\"\n #print(\"jdbcURL: \",config.jdbcUrl,\"\\ntable Name :\",tableName,\"\\nmode:\",mode,\"\\nconnection property\",config.connectionProperties,\"\\n\")\n try:\n \n df.write.jdbc(url=config.jdbcUrl, table=tableName, mode=mode, properties=config.connectionProperties)\n print(\"Inserting data into PostgreSQL...\", \"\\n\")\n except Exception as e:\n print(e)",
"def exportTable(self):\n try:\n self.createTable() #Create a table\n self.insertData() #Insert the daily settings\n print('Database has been exported to ' + self.destination + '\\\\'+ self.database + '\\n') #Export the table\n except:\n print('Enigma table already exists for this database. Please choose another database.') #Otherwise inform the user that the table exists\n self.reset() #Prompt a new input for the database name\n self.exportTable() #Try and export the new database using recursion",
"def from_df_to_hdfs(f_output_table,\n f_out_file,\n f_path=None):\n if f_path is None:\n f_path = \"/user/{0}/\".format(get_user())\n if f_out_file.split(\".\")[-1] == \"csv\":\n f_output_table.write.save(\n path= f_path + f_out_file,\n mode=\"overwrite\",\n format=\"com.databricks.spark.csv\",\n header=\"true\")\n else:\n f_output_table.write.saveAsTable(f_out_file, mode=\"overwrite\")",
"def copyImportSchemaToDestinations(self, tableID=None, hiveDB=None, hiveTable=None, connectionAlias=None, copyDAGnoSlave=False, deployMode=False):\n\t\tlocalSession = self.configDBSession()\n\n\t\tif self.copyDestinations == None:\t\n\t\t\tif deployMode == False:\n\t\t\t\tlogging.warning(\"There are no destination for this table to receive a copy\")\n\t\t\telse:\n\t\t\t\tlogging.warning(\"There are no destination for this deployment\")\n\t\t\treturn\n\n\t\tif tableID == None and hiveDB == None and hiveTable == None and connectionAlias == None:\n\t\t\t# This happens during a normal import\n\t\t\ttableID = self.import_config.table_id\n\t\t\thiveDB = self.import_config.Hive_DB\n\t\t\thiveTable = self.import_config.Hive_Table\n\t\t\tconnectionAlias = self.import_config.connection_alias\n\t\t\tprintDestination = True\n\t\telse:\n\t\t\t# This happens during a \"manage --copyAirflowImportDAG\" or during deployment. \n\t\t\t# And then the destination is specified in cmd and not needed to be printed\n\t\t\tprintDestination = False\n\n\t\tfor destAndMethod in self.copyDestinations:\n\t\t\tdestination = destAndMethod.split(';')[0]\n\t\t\tmethod = destAndMethod.split(';')[1]\n\t\t\tif self.connectRemoteDBImportInstance(instance = destination):\n\t\t\t\tif printDestination == True:\n\t\t\t\t\tlogging.info(\"Copy schema definitions for %s.%s to instance '%s'\"%(hiveDB, hiveTable, destination))\n\t\t\t\telse:\n\t\t\t\t\tif deployMode == False:\n\t\t\t\t\t\tlogging.info(\"Copy schema definitions for %s.%s\"%(hiveDB, hiveTable))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogging.info(\"Deploying schema definitions for %s.%s\"%(hiveDB, hiveTable))\n\t\t\t\tremoteSession = self.remoteInstanceConfigDBSession()\n\n\t\t\t\tjdbcConnections = aliased(configSchema.jdbcConnections)\n\t\t\t\timportTables = aliased(configSchema.importTables)\n\t\t\t\timportColumns = aliased(configSchema.importColumns)\n\t\t\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\t\t\t# Check if if we are going to sync the credentials for this destination\n\t\t\t\tresult = (localSession.query(\n\t\t\t\t\t\tdbimportInstances.sync_credentials\n\t\t\t\t\t)\n\t\t\t\t\t.select_from(dbimportInstances)\n\t\t\t\t\t.filter(dbimportInstances.name == destination)\n\t\t\t\t\t.one())\n\n\t\t\t\tif result[0] == 1:\n\t\t\t\t\tsyncCredentials = True\n\t\t\t\telse:\n\t\t\t\t\tsyncCredentials = False\n\n\t\t\t\t# Check if the table exists on the remote DBImport instance\n\t\t\t\tresult = (remoteSession.query(\n\t\t\t\t\t\timportTables\n\t\t\t\t\t)\n\t\t\t\t\t.filter(importTables.hive_db == hiveDB)\n\t\t\t\t\t.filter(importTables.hive_table == hiveTable)\n\t\t\t\t\t.count())\n\n\t\t\t\tif result == 0:\n\t\t\t\t\t# Table does not exist in target system. Lets create a skeleton record\n\t\t\t\t\tnewImportTable = configSchema.importTables(\n\t\t\t\t\t\thive_db = hiveDB,\n\t\t\t\t\t\thive_table = hiveTable,\n\t\t\t\t\t\tdbalias = connectionAlias,\n\t\t\t\t\t\tsource_schema = '',\n\t\t\t\t\t\tsource_table = '')\n\t\t\t\t\tremoteSession.add(newImportTable)\n\t\t\t\t\tremoteSession.commit()\n\n\t\t\t\t# Get the table_id from the table at the remote instance\n\t\t\t\tremoteImportTableID = (remoteSession.query(\n\t\t\t\t\t\timportTables.table_id,\n\t\t\t\t\t\timportTables.dbalias\n\t\t\t\t\t)\n\t\t\t\t\t.select_from(importTables)\n\t\t\t\t\t.filter(importTables.hive_db == hiveDB)\n\t\t\t\t\t.filter(importTables.hive_table == hiveTable)\n\t\t\t\t\t.one())\n\n\t\t\t\tremoteTableID =\tremoteImportTableID[0]\n\t\t\t\tjdbcConnection = remoteImportTableID[1]\n\n\n\t\t\t\t##################################\n\t\t\t\t# Update jdbc_connections\n\t\t\t\t##################################\n\n\t\t\t\tself.copyJdbcConnectionToDestination(jdbcConnection=jdbcConnection, deployMode=deployMode, destination=destination)\n\n\t\t\t\t##################################\n\t\t\t\t# Update import_colums \n\t\t\t\t##################################\n\n\t\t\t\t# Read the entire import_table row from the source database\n\t\t\t\tsourceAllColumnDefinitions = pd.DataFrame(localSession.query(configSchema.importColumns.__table__)\n\t\t\t\t\t.filter(configSchema.importColumns.table_id == tableID)\n\t\t\t\t\t.order_by(configSchema.importColumns.column_order)\n\t\t\t\t\t)\n\n\t\t\t\ttargetAllColumnDefinitions = pd.DataFrame(remoteSession.query(configSchema.importColumns.__table__)\n\t\t\t\t\t.filter(configSchema.importColumns.table_id == remoteTableID)\n\t\t\t\t\t.order_by(configSchema.importColumns.column_order)\n\t\t\t\t\t)\n\n\t\t\t\tif not sourceAllColumnDefinitions.empty:\n\t\t\t\t\t# sourceAllColumnDefinitions might be empty if the table data only exists in import_tables and not in import_columns\n\n\t\t\t\t\tif targetAllColumnDefinitions.empty:\n\t\t\t\t\t\t# If the target DF is empty, it means that the table does not exist in the target system. So to be able to continue with the merge, we need the columns \n\t\t\t\t\t\t# to be presented. So we set them to the same as the sourceDefinition\n\t\t\t\t\t\ttargetAllColumnDefinitions = pd.DataFrame(data=None, columns=sourceAllColumnDefinitions.columns)\n\n\t\t\t\t\tsourceAllColumnDefinitions.rename(columns={'table_id':'source_table_id', 'column_id':'source_column_id'}, inplace=True)\t\n\t\t\t\t\ttargetAllColumnDefinitions.rename(columns={'table_id':'target_table_id', 'column_id':'target_column_id'}, inplace=True)\t\n\t\t\t\t\t#sourceAllColumnDefinitions = sourceAllColumnDefinitions.replace({np.nan: None})\n\t\t\t\t\t#targetAllColumnDefinitions = targetAllColumnDefinitions.replace({np.nan: None})\n\n#\t\t\t\t\tpd.set_option('display.max_columns', None)\n#\t\t\t\t\tprint(\"DEBUG 01\")\n#\t\t\t\t\tprint(sourceAllColumnDefinitions)\n#\t\t\t\t\tprint(\"===============================================\")\n#\t\t\t\t\tprint(targetAllColumnDefinitions)\n#\t\t\t\t\tprint(\"===============================================\")\n\t\t\t\t\t# Get the difference between source and target column definitions\n\t\t\t\t\tcolumnDifference = pd.merge(sourceAllColumnDefinitions, targetAllColumnDefinitions, on=None, how='outer', indicator='Exist')\t\t\t\n#\t\t\t\t\tprint(columnDifference)\n#\t\t\t\t\tprint(\"DEBUG 02\")\n\t\t\t\t\tcolumnDifferenceLeftOnly = columnDifference[columnDifference.Exist == \"left_only\"]\n\t\t\t\t\tcolumnDifferenceLeftOnly = columnDifferenceLeftOnly.replace({np.nan: None})\n\n\t\t\t\t\tfor columnIndex, columnRow in columnDifferenceLeftOnly.iterrows():\n\t\t\t\t\t\tsourceColumnName = columnRow[\"source_column_name\"]\n\n\t\t\t\t\t\t# Check if column exists in target database\n\t\t\t\t\t\tif len(targetAllColumnDefinitions.loc[targetAllColumnDefinitions['source_column_name'] == sourceColumnName]) == 0:\n\t\t\t\t\t\t\tlogging.debug(\"Source Column Name '%s' does not exists in target\"%(sourceColumnName))\n\t\t\t\t\t\t\tnewImportColumn = configSchema.importColumns(\n\t\t\t\t\t\t\t\ttable_id = remoteTableID,\n\t\t\t\t\t\t\t\tcolumn_name = columnRow['column_name'],\n\t\t\t\t\t\t\t\thive_db = hiveDB,\n\t\t\t\t\t\t\t\thive_table = hiveTable,\n\t\t\t\t\t\t\t\tsource_column_name = columnRow['source_column_name'],\n\t\t\t\t\t\t\t\tcolumn_type = '',\n\t\t\t\t\t\t\t\tsource_column_type = '',\n\t\t\t\t\t\t\t\tlast_update_from_source = str(columnRow['last_update_from_source']))\n\t\t\t\t\t\t\tremoteSession.add(newImportColumn)\n\t\t\t\t\t\t\tremoteSession.commit()\n\n\t\t\t\t\t\t# Get the table_id from the table at the remote instance\n\t\t\t\t\t\tremoteImportColumnID = (remoteSession.query(\n\t\t\t\t\t\t\t\timportColumns.column_id\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t.select_from(importColumns)\n\t\t\t\t\t\t\t.filter(importColumns.table_id == remoteTableID)\n\t\t\t\t\t\t\t.filter(importColumns.source_column_name == columnRow['source_column_name'])\n\t\t\t\t\t\t\t.one())\n\n\t\t\t\t\t\tremoteColumnID = remoteImportColumnID[0]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Create dictonary to be used to update the values in import_table on the remote Instance\n\t\t\t\t\t\tupdateDict = {}\n\t\t\t\t\t\tfor name, values in columnRow.iteritems():\n\n\t\t\t\t\t\t\tif name in (\"source_table_id\", \"source_column_id\", \"source_column_name\", \"target_table_id\", \"target_column_id\", \"hive_db\", \"hive_table\", \"Exist\"):\n\t\t\t\t\t\t\t\tcontinue\n\t\n#\t\t\t\t\t\t\tprint(\"%s = %s\"%(name, values))\n\t\t\t\t\t\t\tvalue = str(values)\n\t\t\t\t\t\t\tif value == \"None\" and name != \"anonymization_function\":\n\t\t\t\t\t\t\t\t# The 'anonymization_function' column contains the text 'None' if it doesnt anonymize anything. \n\t\t\t\t\t\t\t\t# It's a Enum, so it's ok. But we need to handle it here\n\t\t\t\t\t\t\t\tvalue = None\n\t\n\t\t\t\t\t\t\tupdateDict[\"%s\"%(name)] = value \n\n\t\t\t\t\t\t# Update the values in import_table on the remote instance\n\t\t\t\t\t\t(remoteSession.query(configSchema.importColumns)\n\t\t\t\t\t\t\t.filter(configSchema.importColumns.column_id == remoteColumnID)\n\t\t\t\t\t\t\t.update(updateDict))\n\t\t\t\t\t\tremoteSession.commit()\n\n\t\t\t\t##################################\n\t\t\t\t# Update import_tables\n\t\t\t\t##################################\n\n\t\t\t\t# Read the entire import_table row from the source database\n\t\t\t\tsourceTableDefinition = pd.DataFrame(localSession.query(configSchema.importTables.__table__)\n\t\t\t\t\t.filter(configSchema.importTables.table_id == tableID)\n\t\t\t\t\t)\n\n\t\t\t\t# Table to update with values from import_table source\n\t\t\t\tremoteTableDefinition = (remoteSession.query(configSchema.importTables.__table__)\n\t\t\t\t\t.filter(configSchema.importTables.table_id == remoteTableID)\n\t\t\t\t\t.one()\n\t\t\t\t\t)\n\n\t\t\t\t# Create dictonary to be used to update the values in import_table on the remote Instance\n\t\t\t\tupdateDict = {}\n\t\t\t\tjdbcConnection = \"\"\n\t\t\t\tfor name, values in sourceTableDefinition.iteritems():\n\t\t\t\t\tif name in (\"table_id\", \"hive_db\", \"hive_table\", \"copy_finished\", \"copy_slave\"):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tvalue = str(values[0])\n\t\t\t\t\tif value == \"None\":\n\t\t\t\t\t\tvalue = None\n\n\t\t\t\t\tupdateDict[\"%s\"%(name)] = value \n\n\t\t\t\tif deployMode == False:\n\t\t\t\t\tif copyDAGnoSlave == True:\n\t\t\t\t\t\tupdateDict[\"copy_slave\"] = 0\n\t\t\t\t\t\tupdateDict[\"copy_finished\"] = None\n\t\t\t\t\telse:\n\t\t\t\t\t\tupdateDict[\"copy_slave\"] = 1\n\t\t\t\t\t\tif method == \"Synchronous\":\n\t\t\t\t\t\t\tupdateDict[\"copy_finished\"] = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')) \n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tupdateDict[\"copy_finished\"] = None\n\t\t\t\telse:\n\t\t\t\t\tupdateDict[\"copy_slave\"] = 0\n\t\t\t\t\tupdateDict[\"copy_finished\"] = None\n\n\t\t\t\t# Update the values in import_table on the remote instance\n\t\t\t\t(remoteSession.query(configSchema.importTables)\n\t\t\t\t\t.filter(configSchema.importTables.table_id == remoteTableID)\n\t\t\t\t\t.update(updateDict))\n\t\t\t\tremoteSession.commit()\n\t\t\t\tremoteSession.close()\n\n\t\t\telse:\n\t\t\t\tlogging.warning(\"Connection failed! No data will be copied to instance '%s'\"%(destination))\n\n\t\tlocalSession.close()",
"def _do_query_extract(self, extract_data):\n import tempfile\n import uuid\n import os\n import sqlite3\n import unicodecsv as csv\n\n p = extract_data['_partition'] # Set in _make_partition_dict\n\n file_name = extract_data.get('name', None)\n \n if file_name:\n file_ = self.bundle.filesystem.path('extracts', file_name)\n else:\n file_ = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) )\n\n if extract_data.get('query', False):\n query = extract_data['query']\n else:\n\n source_table = extract_data.get('source_table', False)\n \n if not source_table:\n source_table = p.table.name\n \n extract_table = extract_data.get('extract_table', False)\n \n if not extract_table:\n extract_table = source_table\n \n query = self.bundle.schema.extract_query(source_table,extract_table )\n\n where = extract_data.get('extract_where', False)\n \n if where:\n query = query + \" WHERE \"+where\n\n self.bundle.log(\"Running CSV extract from a query\")\n self.bundle.log(\" Partition: {}\".format(p.name))\n self.bundle.log(\" Source table: {}\".format(source_table))\n self.bundle.log(\" Extract Table: {}\".format(extract_table))\n self.bundle.log(\" Query: {}\".format(query.replace('\\n',' ')))\n self.bundle.log(\" Name: {}\".format(extract_data['name'])) \n self.bundle.log(\" Output: {}\".format(file_)) \n\n #self.bundle.log(query)\n\n conn = sqlite3.connect(p.database.path)\n\n lr = self.bundle.init_log_rate(100000,\"Extract to {}\".format(file_name))\n\n with open(file_, 'w') as f:\n conn.row_factory = sqlite3.Row\n \n try:\n rows = conn.execute(query)\n except:\n print query\n raise\n \n \n first = rows.fetchone()\n \n if not first:\n raise Exception(\"Got no data from query: {}\".format(query))\n \n writer = csv.writer(f)\n\n writer.writerow(first.keys())\n writer.writerow(tuple(first))\n \n for row in rows:\n lr()\n writer.writerow(tuple(row))\n\n return file_",
"def export(ft: FusionTableHandler, client: bigquery.Client, allTables=True, tableIds: list = None):\n schemas = dict()\n if allTables:\n all_tables = []\n request = ft.table.list(fields=\"items(name,tableId,description,columns(name,columnId,description,type,formatPattern))\")\n while request is not None:\n response = request.execute()\n all_tables.extend(response.get('items', []))\n request = ft.table.list_next(request, response)\n\n schemas.update(decode_fusionTable_schema(all_tables))\n elif not tableIds:\n return\n else:\n raise NotImplementedError()\n\n jobs = []\n for (tableId, tableRef) in create_tables(client, schemas).items():\n rows = download_table_data(ft, tableId, tableRef)\n job: bigquery.LoadJob = upload_table_data(client, tableRef, write_table_data(tableId, rows))\n job.add_done_callback(lambda job, ftId=tableId: print(f'Load job {\"finished\" if not job.error_result else \"failed\"} for FT {ftId}'))\n jobs.append(job)\n\n while True:\n if all(job.running() == False for job in jobs):\n print('Done exporting')\n break\n elif any(job.error_result for job in jobs):\n for job in jobs:\n if job.running():\n job.cancel()",
"def exportDB(self):\n sourcesession=svc.connect(self.__source,accessMode=coral.access_Update)\n destsession=svc.connect(self.__dest,accessMode = coral.access_Update)\n try:\n dbcp=DBCopy(sourcesession,destsession,1024)\n if self.__all:\n dbcp.copyDB()\n elif self.__inv:\n dbcp.copyInventory()\n elif len(self.__tree) != 0:\n dbcp.copyTrees([self.__tree])\n del sourcesession\n del destsession\n except Exception, e:\n print str(e)\n del sourcesession\n del destsession",
"def export(self, filename):\n columns = self.cursor.execute(f'''pragma table_info(job)''').fetchall()\n\n columns_to_export = [col[1] for col in columns\n if self._validate_column(col[1])]\n\n self._export_from_db(columns_to_export, filename)\n self.con.close()",
"def export_records_as_tabular_data(spark, ct_id):\n\n # hydrate CombineBackgroundTask\n ct = CombineBackgroundTask.objects.get(pk=int(ct_id))\n\n # reconstitute fm_export_config_json\n fm_config = json.loads(ct.task_params['fm_export_config_json'])\n\n # clean base path\n output_path = \"file:///%s\" % ct.task_params['output_path'].lstrip(\n 'file://').rstrip('/')\n\n # write DataFrame to S3\n if ct.task_params.get('s3_export', False) and ct.task_params.get('s3_export_type', None) == 'spark_df':\n\n # dynamically set credentials\n spark.sparkContext._jsc.hadoopConfiguration().set(\n \"fs.s3a.access.key\", settings.AWS_ACCESS_KEY_ID)\n spark.sparkContext._jsc.hadoopConfiguration().set(\n \"fs.s3a.secret.key\", settings.AWS_SECRET_ACCESS_KEY)\n\n # determine column subset\n col_subset = ['*']\n\n # loop through keys and export\n rdds = []\n for folder_name, job_ids in ct.task_params['job_dict'].items():\n\n # handle single job_id\n if len(job_ids) == 1:\n rdds.extend([get_job_as_df(spark, job_ids[0]).select(\n ['document', 'combine_id', 'record_id']).rdd])\n\n # handle multiple jobs\n else:\n rdds.extend(\n [get_job_as_df(spark, job_id).select(['document', 'combine_id', 'record_id']).rdd for job_id in\n job_ids])\n\n # union all\n batch_rdd = spark.sparkContext.union(rdds)\n\n # convert rdd\n kvp_batch_rdd = _convert_xml_to_kvp(batch_rdd, fm_config)\n\n # repartition to records per file\n kvp_batch_rdd = kvp_batch_rdd.repartition(\n math.ceil(kvp_batch_rdd.count() / settings.TARGET_RECORDS_PER_PARTITION))\n\n # convert to dataframe\n kvp_batch_df = spark.read.json(kvp_batch_rdd)\n\n # write to bucket as jsonl\n kvp_batch_df.write.mode('overwrite').json(\n 's3a://%s/%s' % (ct.task_params['s3_bucket'], ct.task_params['s3_key']))\n\n # write to disk\n else:\n\n # loop through potential output folders\n for folder_name, job_ids in ct.task_params['job_dict'].items():\n\n # handle single job_id\n if len(job_ids) == 1:\n\n # get Job records as df\n batch_rdd = get_job_as_df(spark, job_ids[0]).select(\n ['document', 'combine_id', 'record_id']).rdd\n\n # handle multiple jobs\n else:\n\n rdds = [get_job_as_df(spark, job_id).select(['document', 'combine_id', 'record_id']).rdd for job_id in\n job_ids]\n batch_rdd = spark.sparkContext.union(rdds)\n\n # convert rdd\n kvp_batch_rdd = _convert_xml_to_kvp(batch_rdd, fm_config)\n\n # repartition to records per file\n kvp_batch_rdd = kvp_batch_rdd.repartition(\n math.ceil(kvp_batch_rdd.count()/int(ct.task_params['records_per_file'])))\n\n # handle json\n if ct.task_params['tabular_data_export_type'] == 'json':\n _write_tabular_json(\n spark, kvp_batch_rdd, output_path, folder_name, fm_config)\n\n # handle csv\n if ct.task_params['tabular_data_export_type'] == 'csv':\n _write_tabular_csv(spark, kvp_batch_rdd,\n output_path, folder_name, fm_config)",
"def store_hive_table(data, directory, file_name):\n table_name = directory + \".\" + file_name\n data.write.saveAsTable(table_name)",
"def utility():\n import clite\n \n try:\n arguments = clite.CLIte(['--login', '--script', '--save'])\n except (clite.CLIteHelpRequestedError, clite.CLIteMandatoryError):\n print UTILITY_HELP_STR\n return\n \n host, db, user, pswd = load_login(arguments['--login'])\n PH = PostHaste(host, db, user, pswd)\n PH.open(arguments['--script'])\n PH.run()\n \n try:\n dataframe = PH.as_named_DataFrame()\n except ValueError:\n #no schema defined in script\n dataframe = PH.as_DataFrame()\n except IndexError:\n dataframe = 'None'\n \n print dataframe\n \n if arguments['--save'] == 'None':\n return \n \n dataframe.to_csv(arguments['--save'])",
"def export_table():\n\n from yaml import dump\n parser = argparse.ArgumentParser(\n description='export a table to a yaml file')\n parser.add_argument('--table', dest='table',\n help='the table to export')\n args, env = initialize_command(parser)\n\n table = _find_table(args.table)\n columns = table.c.keys()\n data = []\n # iter through the table\n for row in table.select().execute():\n c = {}\n assert len(columns) == len(row),\\\n 'The number of columns should match the length of the row'\n for i in range(len(columns)):\n column = table.c[columns[i]]\n cell = row[i]\n c[column.name] = cell\n data.append(c)\n print dump(data)\n\n # close the env\n env['closer']()",
"def execute(self, context): \n aws_hook = AwsHook(self.aws_credentials)\n credentials = aws_hook.get_credentials()\n redshift = PostgresHook(self.redshift_conn_id)\n execution_date = context['execution_date']\n \n self.log.info(f\"Truncating {self.table}\")\n redshift.run(f\"TRUNCATE TABLE {self.table}\")\n \n \n self.log.info(f\"Inserting data into {self.table}\")\n s3_path = f\"s3://{self.s3_bucket}/{self.s3_key}\"\n\n if self.s3_key == \"log_data\":\n year = execution_date.year\n month = execution_date.month\n \n s3_path = '/'.join([s3_path, str(year), str(month)])\n \n formatted_sql = StageToRedshiftOperator.copy_sql.format(\n self.table,\n s3_path,\n credentials.access_key,\n credentials.secret_key,\n self.file_format,\n self.format_path\n )\n \n redshift.run(formatted_sql)",
"def export_sql(meta, data, output):\n\n tables = [table for table in meta.sorted_tables if table.name in data]\n preparer = IdentifierPreparer(meta.bind.dialect)\n prepare_column = lambda column: preparer.format_column(column, name=column.name)\n output_file = open(output, 'w')\n\n for table in tables:\n columns = ', '.join([ prepare_column(column) for column in table.columns.values() ])\n for row in data[table.name].values():\n values = list(map(_transform, list(row.values())))\n insert = \"INSERT INTO %s (%s) VALUES (%s);\\n\" % (\n preparer.format_table(table, name=table.name),\n columns,\n ', '.join(values)\n )\n output_file.write(insert)\n\n output_file.close()",
"def copyExportSchemaToDestination(self, filterDBalias, filterSchema, filterTable, destination, deployMode=False):\n\t\tlocalSession = self.configDBSession()\n\t\texportTables = aliased(configSchema.exportTables)\n\t\texportColumns = aliased(configSchema.exportColumns)\n\t\tjdbcConnections = aliased(configSchema.jdbcConnections)\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tif self.connectRemoteDBImportInstance(instance = destination):\n\t\t\tremoteSession = self.remoteInstanceConfigDBSession()\n\n\t\t\t# Check if if we are going to sync the credentials for this destination\n\t\t\tresult = (localSession.query(\n\t\t\t\t\tdbimportInstances.sync_credentials\n\t\t\t\t)\n\t\t\t\t.select_from(dbimportInstances)\n\t\t\t\t.filter(dbimportInstances.name == destination)\n\t\t\t\t.one())\n\n\t\t\tif result[0] == 1:\n\t\t\t\tsyncCredentials = True\n\t\t\telse:\n\t\t\t\tsyncCredentials = False\n\n\t\t\tfilterDBalias = filterDBalias.replace('*', '%')\n\t\t\tfilterSchema = filterSchema.replace('*', '%')\n\t\t\tfilterTable = filterTable.replace('*', '%')\n\n\t\t\tresult = pd.DataFrame(localSession.query(\n\t\t\t\t\texportTables.table_id,\n\t\t\t\t\texportTables.hive_db,\n\t\t\t\t\texportTables.hive_table,\n\t\t\t\t\texportTables.dbalias,\n\t\t\t\t\texportTables.target_schema,\n\t\t\t\t\texportTables.target_table\n\t\t\t\t)\n\t\t\t\t.filter(exportTables.dbalias.like(filterDBalias))\n\t\t\t\t.filter(exportTables.target_schema.like(filterSchema))\n\t\t\t\t.filter(exportTables.target_table.like(filterTable))\n\t\t\t\t)\n\n\t\t\tfor index, row in result.iterrows():\n\t\t\t\tif deployMode == False:\n\t\t\t\t\tlogging.info(\"Copy schema definitions for %s.%s\"%(row['hive_db'], row['hive_table']))\n\t\t\t\telse:\n\t\t\t\t\tlogging.info(\"Deploying schema definitions for %s.%s\"%(row['hive_db'], row['hive_table']))\n\n\t\t\t\t##################################\n\t\t\t\t# Update jdbc_connections\n\t\t\t\t##################################\n\n\t\t\t\t# Check if the jdbcConnection exists on the remote DBImport instance\n\t\t\t\tresult = (remoteSession.query(\n\t\t\t\t\t\tjdbcConnections\n\t\t\t\t\t)\n\t\t\t\t\t.filter(jdbcConnections.dbalias == row['dbalias'])\n\t\t\t\t\t.count())\n\n\t\t\t\tif result == 0:\n\t\t\t\t\tnewJdbcConnection = configSchema.jdbcConnections(\n\t\t\t\t\t\tdbalias = row['dbalias'],\n\t\t\t\t\t\tjdbc_url = '')\n\t\t\t\t\tremoteSession.add(newJdbcConnection)\n\t\t\t\t\tremoteSession.commit()\n\n\t\t\t\t# Read the entire import_table row from the source database\n\t\t\t\tsourceJdbcConnection = pd.DataFrame(localSession.query(configSchema.jdbcConnections.__table__)\n\t\t\t\t\t.filter(configSchema.jdbcConnections.dbalias == row['dbalias'])\n\t\t\t\t\t)\n\n\t\t\t\t# Table to update with values from import_table source\n\t\t\t\tremoteJdbcConnection = (remoteSession.query(configSchema.jdbcConnections.__table__)\n\t\t\t\t\t.filter(configSchema.jdbcConnections.dbalias == row['dbalias'])\n\t\t\t\t\t.one()\n\t\t\t\t\t)\n\n\t\t\t\t# Create dictonary to be used to update the values in import_table on the remote Instance\n\t\t\t\tupdateDict = {}\n\t\t\t\tfor name, values in sourceJdbcConnection.iteritems():\n\t\t\t\t\tif name == \"dbalias\":\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif syncCredentials == False and name in (\"credentials\", \"private_key_path\", \"public_key_path\"):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tvalue = str(values[0])\n\t\t\t\t\tif value == \"None\":\n\t\t\t\t\t\tvalue = None\n\n\t\t\t\t\tupdateDict[\"%s\"%(name)] = value \n\n\n\t\t\t\t# Update the values in import_table on the remote instance\n\t\t\t\t(remoteSession.query(configSchema.jdbcConnections)\n\t\t\t\t\t.filter(configSchema.jdbcConnections.dbalias == row['dbalias'])\n\t\t\t\t\t.update(updateDict))\n\t\t\t\tremoteSession.commit()\n\n\t\t\t\t##################################\n\t\t\t\t# Update export_tables\n\t\t\t\t##################################\n\n\t\t\t\t# Check if the table exists on the remote DBImport instance\n\t\t\t\tresult = (remoteSession.query(\n\t\t\t\t\t\texportTables\n\t\t\t\t\t)\n\t\t\t\t\t.filter(exportTables.dbalias == row['dbalias'])\n\t\t\t\t\t.filter(exportTables.target_schema == row['target_schema'])\n\t\t\t\t\t.filter(exportTables.target_table == row['target_table'])\n\t\t\t\t\t.count())\n\n\t\t\t\tif result == 0:\n\t\t\t\t\t# Table does not exist in target system. Lets create a skeleton record\n\t\t\t\t\tnewExportTable = configSchema.exportTables(\n\t\t\t\t\t\tdbalias = row['dbalias'],\n\t\t\t\t\t\ttarget_schema = row['target_schema'],\n\t\t\t\t\t\ttarget_table = row['target_table'],\n\t\t\t\t\t\thive_db = row['hive_db'],\n\t\t\t\t\t\thive_table = row['hive_table'])\n\t\t\t\t\tremoteSession.add(newExportTable)\n\t\t\t\t\tremoteSession.commit()\n\n\t\t\t\t# Get the table_id from the table at the remote instance\n\t\t\t\tremoteExportTableID = (remoteSession.query(\n\t\t\t\t\t\texportTables.table_id\n\t\t\t\t\t)\n\t\t\t\t\t.select_from(exportTables)\n\t\t\t\t\t.filter(exportTables.dbalias == row['dbalias'])\n\t\t\t\t\t.filter(exportTables.target_schema == row['target_schema'])\n\t\t\t\t\t.filter(exportTables.target_table == row['target_table'])\n\t\t\t\t\t.one())\n\n\t\t\t\tremoteTableID =\tremoteExportTableID[0]\n\n\t\t\t\t# Read the entire export_table row from the source database\n\t\t\t\tsourceTableDefinition = pd.DataFrame(localSession.query(configSchema.exportTables.__table__)\n\t\t\t\t\t.filter(configSchema.exportTables.table_id == row['table_id'])\n\t\t\t\t\t)\n\n\t\t\t\t# Table to update with values from import_table source\n\t\t\t\tremoteTableDefinition = (remoteSession.query(configSchema.exportTables.__table__)\n\t\t\t\t\t.filter(configSchema.exportTables.table_id == remoteTableID)\n\t\t\t\t\t.one()\n\t\t\t\t\t)\n\n\t\t\t\t# Create dictonary to be used to update the values in import_table on the remote Instance\n\t\t\t\tupdateDict = {}\n\t\t\t\tfor name, values in sourceTableDefinition.iteritems():\n\t\t\t\t\tif name in (\"table_id\", \"dbalias\", \"target_schema\", \"target_table\", \"sqoop_last_execution\"):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tvalue = str(values[0])\n\t\t\t\t\tif value == \"None\":\n\t\t\t\t\t\tvalue = None\n\n\t\t\t\t\tupdateDict[\"%s\"%(name)] = value \n\n\n\t\t\t\t# Update the values in import_table on the remote instance\n\t\t\t\t(remoteSession.query(configSchema.exportTables)\n\t\t\t\t\t.filter(configSchema.exportTables.table_id == remoteTableID)\n\t\t\t\t\t.update(updateDict))\n\t\t\t\tremoteSession.commit()\n\n\t\t\t\t##################################\n\t\t\t\t# Update export_colums \n\t\t\t\t##################################\n\n\t\t\t\t# Read the entire export_columns row from the source database\n\t\t\t\tsourceAllColumnDefinitions = pd.DataFrame(localSession.query(configSchema.exportColumns.__table__)\n\t\t\t\t\t.filter(configSchema.exportColumns.table_id == row['table_id'])\n\t\t\t\t\t)\n\n\t\t\t\tfor columnIndex, columnRow in sourceAllColumnDefinitions.iterrows():\n\n\t\t\t\t\t# Check if the column exists on the remote DBImport instance\n\t\t\t\t\tresult = (remoteSession.query(\n\t\t\t\t\t\t\texportColumns\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.filter(exportColumns.table_id == remoteTableID)\n\t\t\t\t\t\t.filter(exportColumns.column_name == columnRow['column_name'])\n\t\t\t\t\t\t.count())\n\n\t\t\t\t\tif result == 0:\n\t\t\t\t\t\t# Create a new row in exportColumns if it doesnt exists\n\t\t\t\t\t\tnewExportColumn = configSchema.exportColumns(\n\t\t\t\t\t\t\ttable_id = remoteTableID,\n\t\t\t\t\t\t\tcolumn_name = columnRow['column_name'],\n\t\t\t\t\t\t\tlast_update_from_hive = str(columnRow['last_update_from_hive']))\n\t\t\t\t\t\tremoteSession.add(newExportColumn)\n\t\t\t\t\t\tremoteSession.commit()\n\n\t\t\t\t\t# Get the table_id from the table at the remote instance\n\t\t\t\t\tremoteExportColumnID = (remoteSession.query(\n\t\t\t\t\t\t\texportColumns.column_id\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.select_from(exportColumns)\n\t\t\t\t\t\t.filter(exportColumns.table_id == remoteTableID)\n\t\t\t\t\t\t.filter(exportColumns.column_name == columnRow['column_name'])\n\t\t\t\t\t\t.one())\n\t\n\t\t\t\t\tremoteColumnID = remoteExportColumnID[0]\n\n\t\t\t\t\t# Read the entire export_columnis row from the source database\n\t\t\t\t\tsourceColumnDefinition = pd.DataFrame(localSession.query(configSchema.exportColumns.__table__)\n\t\t\t\t\t\t.filter(configSchema.exportColumns.column_id == columnRow['column_id'])\n\t\t\t\t\t\t)\n\n\t\t\t\t\t# Table to update with values from export_columns source\n\t\t\t\t\tremoteColumnDefinition = (remoteSession.query(configSchema.exportColumns.__table__)\n\t\t\t\t\t\t.filter(configSchema.exportColumns.column_id == remoteColumnID)\n\t\t\t\t\t\t.one()\n\t\t\t\t\t\t)\n\n\t\t\t\t\t# Create dictonary to be used to update the values in export_table on the remote Instance\n\t\t\t\t\tupdateDict = {}\n\t\t\t\t\tfor name, values in sourceColumnDefinition.iteritems():\n\t\t\t\t\t\tif name in (\"table_id\", \"column_id\", \"column_name\"):\n\t\t\t\t\t\t\tcontinue\n\t\n\t\t\t\t\t\tvalue = str(values[0])\n\t\t\t\t\t\tif value == \"None\":\n\t\t\t\t\t\t\tvalue = None\n\t\n\t\t\t\t\t\tupdateDict[\"%s\"%(name)] = value \n\n\t\t\t\t\t# Update the values in export_table on the remote instance\n\t\t\t\t\t(remoteSession.query(configSchema.exportColumns)\n\t\t\t\t\t\t.filter(configSchema.exportColumns.column_id == remoteColumnID)\n\t\t\t\t\t\t.update(updateDict))\n\t\t\t\t\tremoteSession.commit()\n\n\n\n\t\t\tremoteSession.close()\n\t\telse:\n\t\t\tlogging.warning(\"Connection failed! No data will be copied to instance '%s'\"%(destination))\n\n\t\tlocalSession.close()",
"def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)",
"def _dump_table(table: Model, directory: Path, format_: str):\n try:\n table.select().tuples()\n table.fields()\n dataset = tablib.Dataset(*table.select().tuples(), headers=table.fields())\n except:\n print(table._meta.database.get_columns(table.table_name()))\n\n if directory is not None:\n print(f\" Dumping {table.table_name()}...\")\n out_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n out_file.write_text(dataset.export(format_))\n print(\" Done.\")\n print(\"=====================\")\n else:\n print(dataset.export(\"csv\"))",
"def export_to(short_name):\r\n (app, owner, n_tasks, n_task_runs,\r\n overall_progress, last_activity) = app_by_shortname(short_name)\r\n title = app_title(app, gettext(\"Export\"))\r\n loading_text = gettext(\"Exporting data..., this may take a while\")\r\n\r\n try:\r\n require.app.read(app)\r\n except HTTPException:\r\n if app.hidden:\r\n raise abort(403)\r\n else: # pragma: no cover\r\n raise\r\n\r\n def respond():\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n app=app,\r\n owner=owner)\r\n\r\n def gen_json(table):\r\n n = db.session.query(table)\\\r\n .filter_by(app_id=app.id).count()\r\n sep = \", \"\r\n yield \"[\"\r\n for i, tr in enumerate(db.session.query(table)\r\n .filter_by(app_id=app.id).yield_per(1), 1):\r\n item = json.dumps(tr.dictize())\r\n if (i == n):\r\n sep = \"\"\r\n yield item + sep\r\n yield \"]\"\r\n\r\n def format_csv_properly(row):\r\n keys = sorted(row.keys())\r\n values = []\r\n for k in keys:\r\n values.append(row[k])\r\n return values\r\n\r\n\r\n def handle_task(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def handle_task_run(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def get_csv(out, writer, table, handle_row):\r\n for tr in db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .yield_per(1):\r\n handle_row(writer, tr)\r\n yield out.getvalue()\r\n\r\n def respond_json(ty):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n try:\r\n table = tables[ty]\r\n except KeyError:\r\n return abort(404)\r\n return Response(gen_json(table), mimetype='application/json')\r\n\r\n def create_ckan_datastore(ckan, table, package_id):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n new_resource = ckan.resource_create(name=table,\r\n package_id=package_id)\r\n ckan.datastore_create(name=table,\r\n resource_id=new_resource['result']['id'])\r\n ckan.datastore_upsert(name=table,\r\n records=gen_json(tables[table]),\r\n resource_id=new_resource['result']['id'])\r\n\r\n def respond_ckan(ty):\r\n # First check if there is a package (dataset) in CKAN\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n msg_1 = gettext(\"Data exported to \")\r\n msg = msg_1 + \"%s ...\" % current_app.config['CKAN_URL']\r\n ckan = Ckan(url=current_app.config['CKAN_URL'],\r\n api_key=current_user.ckan_api)\r\n app_url = url_for('.details', short_name=app.short_name, _external=True)\r\n\r\n try:\r\n package, e = ckan.package_exists(name=app.short_name)\r\n if e:\r\n raise e\r\n if package:\r\n # Update the package\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_update(app=app, user=owner, url=app_url,\r\n resources=package['resources'])\r\n\r\n ckan.package = package\r\n resource_found = False\r\n for r in package['resources']:\r\n if r['name'] == ty:\r\n ckan.datastore_delete(name=ty, resource_id=r['id'])\r\n ckan.datastore_create(name=ty, resource_id=r['id'])\r\n ckan.datastore_upsert(name=ty,\r\n records=gen_json(tables[ty]),\r\n resource_id=r['id'])\r\n resource_found = True\r\n break\r\n if not resource_found:\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n else:\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_create(app=app, user=owner, url=app_url)\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n #new_resource = ckan.resource_create(name=ty,\r\n # package_id=package['id'])\r\n #ckan.datastore_create(name=ty,\r\n # resource_id=new_resource['result']['id'])\r\n #ckan.datastore_upsert(name=ty,\r\n # records=gen_json(tables[ty]),\r\n # resource_id=new_resource['result']['id'])\r\n flash(msg, 'success')\r\n return respond()\r\n except requests.exceptions.ConnectionError:\r\n msg = \"CKAN server seems to be down, try again layer or contact the CKAN admins\"\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n except Exception as inst:\r\n if len(inst.args) == 3:\r\n t, msg, status_code = inst.args\r\n msg = (\"Error: %s with status code: %s\" % (t, status_code))\r\n else: # pragma: no cover\r\n msg = (\"Error: %s\" % inst.args[0])\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n finally:\r\n return respond()\r\n\r\n def respond_csv(ty):\r\n # Export Task(/Runs) to CSV\r\n types = {\r\n \"task\": (\r\n model.task.Task, handle_task,\r\n (lambda x: True),\r\n gettext(\r\n \"Oops, the application does not have tasks to \\\r\n export, if you are the owner add some tasks\")),\r\n \"task_run\": (\r\n model.task_run.TaskRun, handle_task_run,\r\n (lambda x: type(x.info) == dict),\r\n gettext(\r\n \"Oops, there are no Task Runs yet to export, invite \\\r\n some users to participate\"))}\r\n try:\r\n table, handle_row, test, msg = types[ty]\r\n except KeyError:\r\n return abort(404)\r\n\r\n out = StringIO()\r\n writer = UnicodeWriter(out)\r\n t = db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .first()\r\n if t is not None:\r\n if test(t):\r\n writer.writerow(sorted(t.info.keys()))\r\n\r\n return Response(get_csv(out, writer, table, handle_row),\r\n mimetype='text/csv')\r\n else:\r\n flash(msg, 'info')\r\n return respond()\r\n\r\n export_formats = [\"json\", \"csv\"]\r\n if current_user.is_authenticated():\r\n if current_user.ckan_api:\r\n export_formats.append('ckan')\r\n\r\n ty = request.args.get('type')\r\n fmt = request.args.get('format')\r\n if not (fmt and ty):\r\n if len(request.args) >= 1:\r\n abort(404)\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n ckan_name=current_app.config.get('CKAN_NAME'),\r\n app=app,\r\n owner=owner)\r\n if fmt not in export_formats:\r\n abort(415)\r\n return {\"json\": respond_json, \"csv\": respond_csv, 'ckan': respond_ckan}[fmt](ty)",
"def mysql_import():\n # first make another copy of the db\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db_temp.sql\")\n # then import from the backup\n run(\"mysql -u database_user -p -D database_name < ~/tmp/exported_db.sql\")",
"def export(self):\n f = open(self.database, 'w')\n for line in self.conn.iterdump():\n f.write(line)\n self.c.close()",
"def export_database(self):\n base_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='CSV (*.csv)')\n database.export_to_csv(DB_PATH, base_path[0])",
"def test_sql_to_csv():\n csv_outfile = 'optwrf_database.csv'\n db_conn = conn_to_db('optwrf.db')\n sql_to_csv(csv_outfile, db_conn)\n close_conn_to_db(db_conn)\n assert os.path.exists(csv_outfile) == 1",
"def export_tables(output=None):\n # Get list of tables\n tables = Base.metadata.tables\n\n if output:\n # Export tables to JSON\n tables = table_models_map.keys()\n for t in tables:\n print(\"Exporting %s ...\" % t)\n \n result = [i for i in db_session.query(table_models_map[t]['model']).all()]\n serialized = table_models_map[t]['serializer'](result, many=True)\n\n # Write to JSON file\n with open(output + \"/\" + t + \".json\", 'w') as outfile:\n json.dump(serialized.data, outfile, sort_keys=True, indent=2)\n\n else:\n print(\"[!] output folder not specified. Aborted.\")",
"def saveastable(file, warehouse_dir):\n \n file1_path = os.path.join(files_2017_path,file)\n file2_path = os.path.join(files_2018_path,file)\n df1 = spark.read.load(\n file1_path,\n format='csv',\n sep=',',\n inferSchema=True,\n header=True\n )\n\n df2 = spark.read.load(\n file2_path,\n format='csv',\n sep=',',\n inferSchema=True,\n header=True\n )\n\n df = df1.unionAll(df2)\n \n tablename = os.path.splitext(i)[0]\n tblwarehouse_dir = os.path.join(warehouse_dir,tablename)\n df.write.saveAsTable(tablename, mode = 'overwrite', path = tblwarehouse_dir )\n print(\" Table created for - \",tablename)"
] | [
"0.6315057",
"0.6245598",
"0.6017175",
"0.5787101",
"0.5758232",
"0.573403",
"0.56615156",
"0.5621154",
"0.5613762",
"0.56111676",
"0.55346966",
"0.55025053",
"0.54620385",
"0.5452798",
"0.5428737",
"0.5426115",
"0.54060113",
"0.5380885",
"0.5377392",
"0.5359161",
"0.5295265",
"0.52518547",
"0.5224329",
"0.52114356",
"0.52054673",
"0.52042836",
"0.51816016",
"0.5175044",
"0.51697856",
"0.51612103"
] | 0.78318924 | 0 |
Create new map item | def create(self, mapItem: MapItem) -> int:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_map(self):\n self.wizard = NewMap(self)",
"def add(self, item):\n self._dict[item] = item",
"def add_map(new_prot, new_target, map_path, map_type):\n hotspot_map = HotspotMap.objects.get_or_create(\n map_type=map_type, target_id=new_target, prot_id=new_prot\n )[0]\n hotspot_map.map_info.save(os.path.basename(map_path), File(open(map_path, encoding='utf-8')))\n return hotspot_map",
"def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"",
"def new_map(self):\n self.map = Map()\n self.player.roomId = 0\n return self.map",
"def _add_non_object(self, item_type, item_dict):\n\n # Map item.\n if item_type == 'clock':\n # Map clock.\n self._add_clock_to_map(self.prepend_key, item_dict)\n\n elif item_type == 'module':\n # Map module.\n self._add_module_to_map(self.prepend_key, item_dict)\n\n elif item_type == 'class':\n # Map class.\n self._add_class_to_map(self.prepend_key, item_dict)\n\n elif item_type in self.NO_MAP:\n # No mapping.\n pass\n\n else:\n s = 'No add method for {} item type.'.format(item_type)\n raise TypeError(s)\n\n # Add to beginning of model.\n self.model_dict[self.prepend_key] = item_dict\n\n # Update prepend key.\n self._update_prepend_key()",
"def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'",
"def create(self, key, value):\n raise NotImplementedError",
"def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return",
"def new():\n return ResearchMap()",
"def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)",
"def create_new_map(self):\n return GameMap(self, mapgenfuncs.empty_box, width=self.width, height=self.height)",
"def addObject(self, name, object):\n self.map[name] = object",
"def test_new_item(self):\n new_item = geocode_table(name=\"address_content\",\n longitude=42.123456,\n latitude=0.123456,\n distance=123456)\n assert new_item.name == \"address_content\"\n assert new_item.longitude == 42.123456\n assert new_item.latitude == 0.123456\n assert new_item.distance == 123456",
"def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)",
"def __init__(self):\n self.map = {}",
"def post(self):\n check_content_type('application/json')\n map_object = Map()\n app.logger.info('Payload = %s', api.payload)\n map_object.deserialize(api.payload)\n map_object.save()\n app.logger.info('Map with new key [%s] saved!', map_object.key)\n return map_object.serialize(), status.HTTP_201_CREATED",
"def create_entry(entry):\n Entry.create(**entry)\n return entry",
"def create_map(self):\n self.map = MapContainer(\n parent=self,\n style={\n 'top': self.margin[0],\n 'right': self.margin[1],\n 'bottom': self.margin[2],\n 'left': self.margin[3],\n 'aspect': 1.0,\n 'align': 'center',\n 'vertical-align': 'center' \n },\n map_size=self.map_size\n )\n self.add_node(self.map)",
"def add_item(self,itm):\n itms = self.get_items_list()\n if len(itms) != self.items: self.items = len(itms)\n if self.items >= self.rooms * MAX_ITEMS_PER_ROOM:\n return None\n k = itm\n x = 0\n while k in itms:\n x += 1\n k = '%s_%d'%(itm,x)\n itm_rec = SuiGallery.make_item_record(itm)\n itm_rec['z'] = self.items;\n itms[k] = itm_rec\n self.put_items_list(itms)\n self.items += 1\n return {'items':self.items,'k':k,'id':itm,'x':itm_rec['x'],'y':itm_rec['y'],'z':itm_rec['z']}",
"def create_map(json_game_map):\n room_hash = {}\n\n for room in constants.ROOMS:\n # Set name, description, and neighbors\n room_hash[room] = Room.Room()\n room_hash[room].set_name(room)\n room_hash[room].set_short_description(constants.ROOMS[room]['short_description'])\n room_hash[room].set_long_description(constants.ROOMS[room]['long_description'])\n room_hash[room].set_north(constants.ROOMS[room]['north'])\n room_hash[room].set_south(constants.ROOMS[room]['south'])\n room_hash[room].set_east(constants.ROOMS[room]['east'])\n room_hash[room].set_west(constants.ROOMS[room]['west'])\n room_hash[room].set_locked(constants.ROOMS[room]['locked'])\n\n # Set features in the room\n for feature in constants.ROOMS[room]['features']:\n new_feature = Feature.Feature()\n new_feature.set_name(constants.ROOMS[room]['features'][feature]['name'])\n new_feature.set_description(constants.ROOMS[room]['features'][feature]['description'])\n room_hash[room].add_feature(new_feature)\n\n # If it is not a loaded game\n if not json_game_map:\n # Set items in the room\n for item in constants.ROOMS[room]['items']:\n new_item = Item.Item()\n new_item.set_name(constants.ROOMS[room]['items'][item]['name'])\n new_item.set_description(constants.ROOMS[room]['items'][item]['description'])\n if \"hidden\" in constants.ROOMS[room]['items'][item]:\n if constants.ROOMS[room]['items'][item][\"hidden\"] == \"true\":\n new_item.set_hidden(True)\n room_hash[room].add_item(new_item)\n \n #Set monsters in the room\n for monster in constants.ROOMS[room]['monsters']:\n if constants.ROOMS[room]['monsters'] != \"None\":\n new_monster = Monster.Monster()\n new_monster.set_name(constants.ROOMS[room]['monsters'][monster]['name'])\n new_monster.set_lvl(constants.ROOMS[room]['monsters'][monster]['lvl'])\n new_monster.set_description(constants.ROOMS[room]['monsters'][monster]['description'])\n room_hash[room].add_monster(new_monster)\n\n # If it is a loaded game\n else:\n # Set items in the room\n for item in json_game_map[room]:\n if item == \"visited\":\n room_hash[room].set_visited(json_game_map[room][item])\n elif item == \"locked\":\n room_hash[room].set_locked(json_game_map[room][item])\n #Set undefeated monster in the room\n elif item == \"Lich\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Armored Skeleton\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Animated Armor\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Skeleton\":\n new_monster = Monster.Monster()\n new_monster.set_name(json_game_map[room][item]['Name'])\n new_monster.set_lvl(json_game_map[room][item]['Lvl'])\n new_monster.set_description(json_game_map[room][item]['Description'])\n new_monster.set_defeated_status(json_game_map[room][item]['Defeated'])\n room_hash[room].add_monster(new_monster)\n elif item == \"Features\":\n for feature in json_game_map[room][item]:\n room_hash[room].get_features()[feature].set_interacted_with(json_game_map[room][item][feature]['Interacted With'])\n else:\n new_item = Item.Item()\n new_item.set_name(json_game_map[room][item]['Name'])\n new_item.set_description(json_game_map[room][item]['Description'])\n if \"Hidden\" in json_game_map[room][item]:\n if json_game_map[room][item][\"Hidden\"]:\n new_item.set_hidden(True)\n room_hash[room].add_item(new_item)\n\n return room_hash",
"def create_item(world: World, item_id: str, x: int, y: int, *args):\n item_id = ITEMS[item_id]\n if item_id == \"coin\":\n item = Coin()\n elif item_id == \"star\":\n item = Star()\n else:\n item = DroppedItem(item_id)\n\n world.add_item(item, x * BLOCK_SIZE, y * BLOCK_SIZE)",
"def addObjectMap(self,fromMod,toMod,objectMap):\n if self.objectMaps == None: self.loadObjectMaps()\n self.objectMaps[(fromMod,toMod)] = objectMap",
"def post(self, key):\n app.logger.info(\"Request to Retrieve a map_object with key [%s]\", key)\n map_object = Map.append(key, api.payload)\n # map_object = Map.find(key)\n # if not map_object:\n # raise NotFound(\"Map with key '{}' was not found.\".format(key))\n # # map_object.add_map_item(api.payload)\n # # return map_object.serialize(), status.HTTP_200_OK\n return map_object, status.HTTP_200_OK",
"def New(*args, **kargs):\n obj = itkMapContainerULLQEMPF3GQEULLULLBBT.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkMapContainerULLQEMPF2GQEULLULLBBT.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map",
"def make_item_record(cls,itm,x=350,y=200,z=1,sx=1,sy=1,ms=''):\n return {'id':int(itm),'x':x,'y':y,'z':z,'sx':sx,'sy':sy,'ms':ms}",
"def Dictionary_create(nMarkers, markerSize):\n pass",
"def item_duplicate():\n return {'name':'chair',\n 'value':300}"
] | [
"0.663403",
"0.6446372",
"0.62781554",
"0.61970216",
"0.618926",
"0.6137825",
"0.6078885",
"0.5993192",
"0.5991251",
"0.5985686",
"0.59658325",
"0.5953617",
"0.59227896",
"0.590874",
"0.58784914",
"0.58716595",
"0.5857414",
"0.5817465",
"0.5817132",
"0.5809343",
"0.580722",
"0.5802213",
"0.57949954",
"0.57786465",
"0.57757896",
"0.57742053",
"0.5770733",
"0.57685965",
"0.5754974",
"0.5753449"
] | 0.7733658 | 0 |
Update map item in database | def update(self, mapItem: MapItem):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_item(self, table, item):",
"def update(self, mapper_info: dict):\n self.update_from_dict(\n [\n \"form_id\",\n \"form_name\",\n \"form_revision_number\",\n \"process_key\",\n \"process_name\",\n \"status\",\n \"comments\",\n \"modified_by\",\n ],\n mapper_info,\n )\n self.commit()",
"def test_map_update_updates(self):\r\n partition = uuid4()\r\n cluster = 1\r\n TestQueryUpdateModel.objects.create(\r\n partition=partition, cluster=cluster,\r\n text_map={\"foo\": '1', \"bar\": '2'})\r\n TestQueryUpdateModel.objects(\r\n partition=partition, cluster=cluster).update(\r\n text_map__update={\"bar\": '3', \"baz\": '4'})\r\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})",
"def test_map_update_updates(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'})\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_map__update={\"bar\": '3', \"baz\": '4'})\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})",
"def test_mapfield_update(self):\n\n class Member(EmbeddedDocument):\n gender = StringField()\n age = IntField()\n\n class Club(Document):\n members = MapField(EmbeddedDocumentField(Member))\n\n Club.drop_collection()\n\n club = Club()\n club.members[\"John\"] = Member(gender=\"M\", age=13)\n club.save()\n\n Club.objects().update(set__members={\"John\": Member(gender=\"F\", age=14)})\n\n club = Club.objects().first()\n assert club.members[\"John\"].gender == \"F\"\n assert club.members[\"John\"].age == 14",
"def updateItem(self, object):\n pass",
"def put(self, item): \n self.__db.rpush(self.key, item)",
"def update(self, key, value):\n if key in self.map:\n self.map[key] = value",
"def update(self):\n db.session.commit()",
"def update(self):\n db.session.commit()",
"def _update_database_map(self, path):\n if path:\n filename = path + '/APD_MAP.txt'\n else:\n filename = 'APD_MAP.txt'\n filepointer = open(filename, 'w')\n for invariom, molecule in self.map.items():\n filepointer.write(invariom + ':' + molecule + '\\n')\n filepointer.close()",
"def update_items(self, items_us, id):\n\n items_db = self.execute(TABELLE['items']['select']['by_id'], (id,))\n\n for key in items_us.keys():\n items_db[key.lower()] += items_us[key]\n\n # print(items_db)\n\n self.execute(TABELLE['items']['update'], (\n items_db['c'],\n items_db['nc'],\n items_db['r'],\n items_db['ur'],\n items_db['l'],\n items_db['e'],\n items_db['u'],\n items_db['ue'],\n id\n ))",
"def model_update(self, db):\n db.session.commit()",
"def update_item(self, id: str, user: User, **kwargs) -> None:",
"def test_map_update_none_deletes_key(self):\r\n # partition = uuid4()\r\n # cluster = 1\r\n # TestQueryUpdateModel.objects.create(\r\n # partition=partition, cluster=cluster,\r\n # text_map={\"foo\": '1', \"bar\": '2'})\r\n # TestQueryUpdateModel.objects(\r\n # partition=partition, cluster=cluster).update(\r\n # text_map__update={\"bar\": None})\r\n # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n # self.assertEqual(obj.text_map, {\"foo\": '1'})\r",
"def upsert_location(self, location):",
"def update(id, data):\n db = core.connect()\n theShift = db[id]\n theShift.update(data)\n theShift[\"modified\"] = utils.utctime()\n db[id] = theShift\n return db[id]",
"def update(self, key, value):\n self._lock.acquire()\n self._db[key] = value\n self._lock.release()\n logging.debug(\"inserted key %s with value %s into db\", key, value)",
"def _update_single_item(self, location, update):\r\n\r\n # See http://www.mongodb.org/display/DOCS/Updating for\r\n # atomic update syntax\r\n result = self.collection.update(\r\n {'_id': location.to_deprecated_son()},\r\n {'$set': update},\r\n multi=False,\r\n upsert=True,\r\n # Must include this to avoid the django debug toolbar (which defines the deprecated \"safe=False\")\r\n # from overriding our default value set in the init method.\r\n safe=self.collection.safe\r\n )\r\n if result['n'] == 0:\r\n raise ItemNotFoundError(location)",
"def put(self, key):\n app.logger.info('Request to Update a map_object with key [%s]', key)\n check_content_type('application/json')\n map_object = Map.get_value_with_key(key)\n if not map_object:\n # api.abort(404, \"Map with key '{}' was not found.\".format(key))\n raise NotFound('Map with key [{}] was not found.'.format(key))\n # data = request.get_json()\n data = api.payload\n app.logger.info(data)\n map_object.deserialize(data)\n map_object.key = key\n map_object.save()\n return map_object.serialize(), status.HTTP_200_OK",
"def put(self, args, item):\n db.session.merge(self.Meta.model(**args))\n db.session.commit()\n\n return self.Meta.model.query.get(item.id)",
"def _update_map(self):\n # Creates an instance of DungeonMap using the id of Dungeon\n self._map = DungeonMap(self._id)\n # Calls function to update rooms\n self._map._update_rooms()",
"def update(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.update(self.__class__.__name__, data['id'], data)\n\n self.__dict__.update(saved_data)",
"def _modify_item(item, update_dict):\n for k in update_dict:\n item[k] = str(update_dict[k])\n\n return item",
"def _do_upsert(self, conn, item, spider):\n id = self._get_id(item)\n now = datetime.utcnow().replace(microsecond=0).isoformat(' ')\n\n conn.execute(\"\"\"SELECT EXISTS(\n SELECT 1 FROM products WHERE id = %s\n )\"\"\", (id, ))\n ret = conn.fetchone()[0]\n\n if ret:\n conn.execute(\"\"\"\n UPDATE products\n SET url=%s, title=%s, picture=%s, price=%s, brand=%s, store=%s, id_store=%s, updated=%s, tag1=%s, tag2=%s, tag3=%s, tag4=%s, tag5=%s\n WHERE id=%s\n \"\"\", (item['url'], item['title'], item['picture'], item['price'], item['brand'], item['store'], item['id_store'], now, item['tag1'], item['tag2'] , item['tag3'], item['tag4'], item['tag5'], id))\n spider.log(\"Item updated in db: %s %r\" % (id, item))\n else:\n conn.execute(\"\"\"\n INSERT INTO products (id, url, title, picture, price, brand, store, id_store, updated, tag1, tag2, tag3, tag4, tag5)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\", (id, item['url'], item['title'], item['picture'], item['price'], item['brand'], item['store'], item['id_store'], now, item['tag1'], item['tag2'] , item['tag3'], item['tag4'], item['tag5']))\n spider.log(\"Item stored in db: %s %r\" % (id, item))",
"def update(self):\n self.__execute(self.pkgin_bin, \"update\")",
"def set(aMap,key,value):\n\tbucket=get_bucket(aMap,key)\n\ti,k,v=get_slot(aMap,key)\n\t\n\tif i>=0:\n\t\t#key 存在,替换\n\t\tbucket[i]=(key,value)\n\telse:\n\t\tbucket.append((key,value))",
"def test_map_update_remove(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"bar\"},\n text_map__update={\"foz\": '4', \"foo\": '2'}\n )\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '2', \"foz\": '4'})\n\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"foo\", \"foz\"}\n )\n self.assertEqual(\n TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map,\n {}\n )",
"def test_update_saved_app_map_search(self):\n pass",
"def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr(type(self), 'field_names')\n for key, value in mapping.items():\n if key in field_names:\n setattr(self, key, value)"
] | [
"0.6819395",
"0.6547382",
"0.6486562",
"0.64180374",
"0.6259836",
"0.6173413",
"0.61376715",
"0.6136338",
"0.6133015",
"0.6133015",
"0.61063683",
"0.6041662",
"0.59641296",
"0.5876489",
"0.5864035",
"0.58607227",
"0.5849195",
"0.58311784",
"0.58310145",
"0.5828876",
"0.58115005",
"0.5787902",
"0.57853776",
"0.57820493",
"0.5766254",
"0.574339",
"0.574198",
"0.5736602",
"0.57177436",
"0.57106423"
] | 0.77123857 | 0 |
Delete Map item from database and all his translates | def delete(self, mapitem_id: int):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete():\n\t# Create session\n\ts = Session()\n\ts.query(Lookup).delete(synchronize_session=False)\n\ts.commit()",
"def test_delete_saved_app_map_search(self):\n pass",
"def delete(self, key):\n app.logger.info('Request to Delete a map_object with key [%s]', key)\n map_object = Map.get_value_with_key(key)\n if map_object:\n map_object.delete()\n return 'Map deleted', status.HTTP_204_NO_CONTENT",
"def basemap_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()",
"def delete_mapping(project, img):\n with BMI(_username, _password, project) as bmi:\n ret = bmi.umount_image(img)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo('Success')\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def __delitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n q = q.filter(PAW2_DBObject.key == key)\n assert q.delete(synchronize_session=False) == 1\n session.commit()",
"def test_map_update_remove(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"bar\"},\n text_map__update={\"foz\": '4', \"foo\": '2'}\n )\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '2', \"foz\": '4'})\n\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove={\"foo\", \"foz\"}\n )\n self.assertEqual(\n TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map,\n {}\n )",
"def delete(self):\n items = ShopcartItem.find_by_shopcartid(self.id)\n\n for item in items:\n item.delete()\n\n db.session.delete(self)\n db.session.commit()",
"def removeObjectMap(self,fromMod,toMod):\n if self.objectMaps == None: self.loadObjectMaps()\n del self.objectMaps[(fromMod,toMod)]",
"def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)",
"def del_all(self, items):\n for item in items:\n item.key.delete()\n logger.debug(\"Deleted all the items\")",
"def test_map_remove_rejects_non_sets(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition,\n cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'}\n )\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(\n text_map__remove=[\"bar\"]\n )",
"def post_delete(self, *args, **kw):\n id_atributo = int(args[0])\n transaction.begin()\n attr = AtributosPorTipoItem.por_id(id_atributo)\n DBSession.delete(attr)\n transaction.commit()\n flash(\"Atributo Eliminado\")\n redirect(\"./\")",
"def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)",
"def remove(self, item):\n del self._dict[item]",
"def test_delete_voltage_map_item(self):\n pass",
"def remove(self) -> None:\n self.map.remove_ent(self)",
"def DBDeleteLangRecords( lang ):\n log.info(\"Deleting old '%s' records...\", lang)\n return DBExecute(DBConjugations, \"DELETE FROM conjugations WHERE LanguageCode = ?\", lang)",
"def delete(self):\r\n self.domain.delete_item(self)",
"def test_delete_saved_app_map_search_for_user(self):\n pass",
"def delete_article(cls, key):\n article_key = \"article:\" + str(key)\n hashmap = db.delete(article_key)",
"def delete(self, key):\n self.map.pop(key, None)",
"def delete_item(self):\n for item in self.selection():\n origin_name = self.set(item)[\"1\"]\n origin_url = self.set(item)[\"2\"]\n for row in s.item:\n if row[\"item\"] == origin_name and row[\"url\"] == origin_url:\n s.item.remove(row)\n self.delete(item)",
"def delete():",
"def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')",
"def __delitem__(self, key):\n try:\n del self._maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))",
"def remove_translated_ids(id, event=True):",
"def delete(self, id):\n lm = h.eagerload_morpheme_language_model(Session.query(MorphemeLanguageModel)).get(id)\n if lm:\n lm_dict = lm.get_dict()\n backup_morpheme_language_model(lm_dict)\n Session.delete(lm)\n Session.commit()\n lm.remove_directory()\n return lm\n else:\n response.status_int = 404\n return {'error': 'There is no morpheme language model with id %s' % id}",
"def remove_database_entries(genome_ids):\n \n for genome_id in genome_ids:\n genome_name = str(session.query(Genome).filter_by(id=genome_id).first().name) #need to set the genome's name before removing things so that we may use it later\n genome_id = int(genome_id) #ensuring no unicode buffer errors\n try:\n session.query(GeographicLocation).filter_by(genome_id=genome_id).delete()\n session.query(S_16).filter_by(genome_id=genome_id).delete()\n session.query(Prokka).filter_by(genome_id=genome_id).delete()\n session.query(Toxin).filter_by(genome_id=genome_id).delete()\n session.query(Contig).filter_by(genome_id=genome_id).delete()\n session.query(Genome).filter_by(id=genome_id).delete()\n flash(\"Genome '\"+genome_name+\"' removed from the database\")\n except:\n session.rollback()\n flash(\"Failed to remove genome '\"+genome_name+\"' from the database\")\n try:\n session.commit()\n except:\n session.rollback()\n flash(\"Error removing genomes\")\n return None"
] | [
"0.60695505",
"0.60244024",
"0.5943307",
"0.58938456",
"0.5815377",
"0.58143026",
"0.57671386",
"0.5745031",
"0.57079214",
"0.57064176",
"0.569948",
"0.5697754",
"0.5660007",
"0.56400824",
"0.5629807",
"0.56289786",
"0.5627679",
"0.56257826",
"0.56040335",
"0.5601899",
"0.5598531",
"0.5596995",
"0.5590957",
"0.5575108",
"0.5552221",
"0.55352235",
"0.55326366",
"0.5531798",
"0.5486822",
"0.5479223"
] | 0.7070288 | 0 |
Get map item from database | def get(self, mapitem_id: int) -> MapItem:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mapdata():\n return getmapdata(db, MyTable)",
"def get_db_item(self, key, item_key):\n return self.get_db_items(key).get(item_key)",
"def get_map_item(self, idx, col=0, absolute=False):\n\n return self.itemDataMap[self.itemIndexMap[idx] if not absolute else idx][self.get_real_col(col)]",
"def __getitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).one()",
"def _get_mapping_record(self):\n return self.__mapping_record",
"def __getitem__(self, key):\n for db in self.db:\n if db.name == key:\n return db\n raise IndexError",
"def retrieve_from_db(self):\n pass",
"def __getitem__(self, param):\n return self._maps[param]",
"def get(self, key):\n if key in self._db:\n return self._db[key]\n else:\n return None",
"def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()",
"def __getitem__(self, business_id):\n return self.db.get(business_id, None)",
"def get(self, key):\n if self.db is None:\n self._init()\n return self.db[key]",
"def _get_map_record(self):\n return self.mapper.map_record(self.binding_record)",
"def __getitem__(self, item):\n return self.row[item]",
"def get(self, key: str) -> Any:\n return self.db.get(key)",
"def _dictfetchone(self):\n data = self._dictfetchall()\n if data:\n return data[0]\n return {}",
"def get(aMap,key,default=None):\n\ti,k,v=get_slot(aMap,key,default=default)",
"def read(self, key):\n if key not in self.db:\n raise LookupError(\"No record for key \\\"%s\\\" exists.\" % key)\n return self.db[key]",
"def db_row(self):\n return self._model_cls.query.get(self._pk)",
"def search_db(self, key, item):\n db = self.check_db()\n data = [record for record in db if record[key] == item]\n if data:\n return data[0]\n else:\n return False",
"def find(cls, key):\r\n return cls.query().get(key)",
"def get(aMap, key, default=None):\n\ti, k, v = get_slot(aMap, key, default=default)\n\treturn v",
"def get(aMap, key, default=None):\n\ti, k, v = get_slot(aMap, key, default)\n\treturn v",
"def check_item(self, item, key, db):\n data = [record for record in db if record[key] == item]\n return data",
"def get(self, **args ):\n # Make sure its a valid argument\n for key in args.keys():\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n query = STD.select('*')\n query = query.where( args )\n item = query.list()\n\n # If a list return make sure there is only one item\n if isinstance(item, collections.Iterable):\n if len(item) > 1:\n raise NotUnique(\"More than one items found\")\n if len(item) == 0:\n print \"No items found\"\n return None\n else:\n item = item[0]\n return item",
"def cell_map_from_database(self) -> None:\n for row in self.session.query(DatamapItem).all():\n self.cell_map.append(\n Cell(\n datamap_id=row.id,\n cell_key=row.key,\n cell_value=None,\n template_sheet=row.bicc_sheet,\n bg_colour=None,\n fg_colour=None,\n number_format=None,\n verification_list=None,\n cell_reference=row.bicc_cellref))",
"def getItem(self, column, position):\n return self.data[column, position]",
"def get(cls, pk):\n return DBSession().query(cls).get(pk)",
"def get_map(self):\n return self.map",
"def get_map_from_id(self, id_m):\n return self.id_to_map_dict[id_m]"
] | [
"0.6950481",
"0.6852836",
"0.65755594",
"0.65000963",
"0.64465505",
"0.6368746",
"0.6318184",
"0.6285017",
"0.62202585",
"0.61964667",
"0.61231244",
"0.605938",
"0.604496",
"0.60364556",
"0.60216033",
"0.5915674",
"0.591564",
"0.5902572",
"0.58843106",
"0.5884206",
"0.58806974",
"0.585112",
"0.5831473",
"0.58285236",
"0.58207387",
"0.5819575",
"0.58075565",
"0.5799886",
"0.5797793",
"0.5789109"
] | 0.74543875 | 0 |
Get list of map items for selected lang | def get_all(self, lang: str = None):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_item_concept_mapping(self, lang):\n concepts = self.filter(active=True, lang=lang)\n return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))",
"def get_langs(id):",
"def list(self):\n for key, value in self.languages.iteritems():\n print key, value",
"def wikiLanguages():\n return languages",
"def __getitem__(self, lang):\n return self.__registry[lang]",
"def get_languages(request):\n if request.is_ajax():\n publications = Publication.objects.all()\n language_list = []\n for pub in publications:\n languages = pub.languages.all()\n for lang in languages:\n language_list.append({\"pub\": pub.pk,\"pub_name\":pub.name,\"lang\": u\"%s\" % lang.pk,\"name_lang\":lang.alias})\n data = simplejson.dumps(language_list)\n return HttpResponse(data)",
"def languages():\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/getLangs')\n return r.json['dirs']",
"def movies_lang(dataset, index_, lang_):\r\n movies_=[]\r\n for row in dataset.values():\r\n if(row[index_] == lang_):\r\n movies_.append(row[13])\r\n explore_data(movies_,0,5,False)\r\n return movies_",
"def grepo(request):\n return {\n \"GREPO_LANGUAGES\": Language.objects.all().values_list(\"name\", flat=True)\n }",
"def get_concept_item_mapping(self, concepts=None, lang=None):\n if concepts is None:\n concepts = self.filter(active=True)\n if lang is not None:\n concepts = concepts.filter(lang=lang)\n if lang is None:\n languages = set([concept.lang for concept in concepts])\n if len(languages) > 1:\n raise Exception('Concepts has multiple languages')\n lang = list(languages)[0]\n item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query)\n for concept in concepts], lang)\n return dict(zip([c.pk for c in concepts], item_lists))",
"def list_of_langs(data):\n lang_codes = []\n for lang_data in data:\n lang_codes.append(lang_data.get('value'))\n return lang_codes",
"def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp",
"def get_report_translations(request):\n\n id_report = request.GET.get('id_report',None)\n if id_report is not None:\n languages = []\n lang = Report.objects.filter(id_report = id_report)\n for el in lang:\n if el.language not in languages:\n languages.append(el.language)\n\n json_resp = {}\n # print(languages)\n json_resp['languages'] = languages\n return JsonResponse(json_resp)",
"def by_xling_id_get_langs(self, xling_id, langs):\n all_answers = self.by_xling_id[xling_id]\n filtered_answers = []\n for lang in langs:\n selected_answer = None\n for a in all_answers:\n if a.language == lang:\n selected_answer = a\n filtered_answers.append(selected_answer)\n return filtered_answers",
"def allLocales(self):\n return util.parseLocales(urlopen(self.all_url).read())",
"def snippets_by_language(request, slug):\n language = get_object_or_404(Language, slug__exact=slug)\n return list_detail.object_list(request,\n queryset=Snippet.objects.get_by_language(slug),\n extra_context={ 'object': language },\n template_name='cab/language_detail.html',\n **base_generic_dict)",
"def languages(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'languages')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def getPossibleLangs(self):\n lst = {}\n for e in self._values:\n for lang in e.getLangCodes():\n lst[ lang ] = 1\n return lst.keys()",
"def select_translate(self, target_id: int, type: int, lang: str) -> dict:\n sql = \"SELECT name,value FROM translates WHERE target_id = \" + str(target_id)\n sql += \" AND type = '\" + str(type) + \"'\"\n sql += \" AND lang = '\" + lang + \"'\"\n\n result = self.cursor.execute(sql).fetchall()\n\n data = {}\n for row in result:\n data[row['name']] = row['value']\n\n return data",
"async def get_multilingual(filename: str):\n query_result = {\"langList\": []}\n database = get_db()\n query_displayname = database.AQLQuery(\n query=main_queries.QUERY_MULTILINGUAL_LANGS,\n bindVars={\n \"filename\": filename\n },\n rawResults=True\n )\n query_result = {\"langList\": query_displayname.result[0]}\n return query_result",
"def get_translated_ids(id):",
"def get_languages(self):\n language_list = []\n url = '%s%s/languages.xml' % (self.URL_API, self.API_KEY)\n data = urllib.urlopen(url)\n root = cElementTree.parse(data).getroot()\n for language in root.iter('Language'):\n language_list.append(language.find('abbreviation').text)\n return language_list",
"def get_all_menu():",
"def getData(language=None):",
"def getAvailableLanguages(self):\n url = \"http://www.youtube.com/api/timedtext?v=%s&type=list\" % self.video_id\n xml = urllib2.urlopen(url)\n tree = ET.parse(xml)\n root = tree.getroot()\n languages = {}\n for child in root:\n languages[child.attrib[\"lang_code\"]] = child.attrib[\"lang_translated\"]\n return languages",
"def scrap_one( lang: str, page: Page ) -> list:\n log.info( \"(%s, %s)\", lang, page )\n\n lm = importlib.import_module(\"conjugator.\" + lang)\n\n try:\n items = lm.scrap( page )\n\n except json.decoder.JSONDecodeError as e:\n log.error( \"(%s): HTTP-response: parse error: \", page.label, exc_info=1 )\n return []\n\n return items",
"def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })",
"def getLocales(self):\n pass",
"def get_language_list_gui():\n _ = get_gettext()\n language = {}\n language['connect'] = _(\"Connect\")\n language['ip'] = _(\"IP\")\n language['netmask'] = _(\"Netmask\")\n language['gateway'] = _('Gateway')\n language['dns'] = _('DNS')\n language['use_static_ip'] = _('Use Static IPs')\n language['use_static_dns'] = _('Use Static DNS')\n language['use_encryption'] = _('Use Encryption')\n language['advanced_settings'] = _('Advanced Settings')\n language['wired_network'] = _('Wired Network')\n language['wired_network_instructions'] = _('To connect to a wired network,'\n ' you must create a network profile. To create a network profile, type a'\n ' name that describes this network, and press Add.')\n language['automatic_connect'] = _('Automatically connect to this network')\n language['secured'] = _('Secured')\n language['unsecured'] = _('Unsecured')\n language['channel'] = _('Channel')\n language['preferences'] = _('Preferences')\n language['wpa_supplicant_driver'] = _('WPA Supplicant Driver')\n language['wireless_interface'] = _('Wireless Interface')\n language['wired_interface'] = _('Wired Interface')\n language['hidden_network'] = _('Hidden Network')\n language['hidden_network_essid'] = _('Hidden Network ESSID')\n language['connected_to_wireless'] = _('Connected to $A at $B (IP: $C)')\n language['connected_to_wired'] = _('Connected to wired network (IP: $A)')\n language['not_connected'] = _('Not connected')\n language['no_wireless_networks_found'] = _('No wireless networks found.')\n language['killswitch_enabled'] = _('Wireless Kill Switch Enabled')\n language['key'] = _('Key')\n language['username'] = _('Username')\n language['password'] = _('Password')\n language['anonymous_identity'] = _('Anonymous Identity')\n language['identity'] = _('Identity')\n language['authentication'] = _('Authentication')\n language['path_to_pac_file'] = _('Path to PAC File')\n language['select_a_network'] = _('Choose from the networks below:')\n language['connecting'] = _('Connecting...')\n language['wired_always_on'] = _('Always show wired interface')\n language['auto_reconnect'] = _('Automatically reconnect on connection loss')\n language['create_adhoc_network'] = _('Create an Ad-Hoc Network')\n language['essid'] = _('ESSID')\n language['use_wep_encryption'] = _('Use Encryption (WEP only)')\n language['before_script'] = _('Run script before connect')\n language['after_script'] = _('Run script after connect')\n language['disconnect_script'] = _('Run disconnect script')\n language['script_settings'] = _('Scripts')\n language['use_ics'] = _('Activate Internet Connection Sharing')\n language['madwifi_for_adhoc'] = _('Check if using madwifi/atheros drivers')\n language['default_wired'] = _('Use as default profile (overwrites any previous default)')\n language['use_debug_mode'] = _('Enable debug mode')\n language['use_global_dns'] = _('Use global DNS servers')\n language['use_default_profile'] = _('Use default profile on wired autoconnect')\n language['show_wired_list'] = _('Prompt for profile on wired autoconnect')\n language['use_last_used_profile'] = _('Use last used profile on wired autoconnect')\n language['choose_wired_profile'] = _('Select or create a wired profile to connect with')\n language['wired_network_found'] = _('Wired connection detected')\n language['stop_showing_chooser'] = _('Stop Showing Autoconnect pop-up temporarily')\n language['display_type_dialog'] = _('Use dBm to measure signal strength')\n language['scripts'] = _('Scripts')\n language['invalid_address'] = _('Invalid address in $A entry.')\n language['global_settings'] = _('Use these settings for all networks sharing this essid')\n language['encrypt_info_missing'] = _('Required encryption information is missing.')\n language['enable_encryption'] = _('This network requires encryption to be enabled.')\n language['wicd_auto_config'] = _('Automatic (recommended)')\n language[\"gen_settings\"] = _(\"General Settings\")\n language[\"ext_programs\"] = _(\"External Programs\")\n language[\"dhcp_client\"] = _(\"DHCP Client\")\n language[\"wired_detect\"] = _(\"Wired Link Detection\")\n language[\"route_flush\"] = _(\"Route Table Flushing\")\n language[\"backend\"] = _(\"Backend\")\n language[\"backend_alert\"] = _(\"Changes to your backend won't occur until the daemon is restarted.\")\n language['0'] = _('0')\n language['1'] = _('1')\n language['2'] = _('2')\n language['3'] = _('3')\n language['4'] = _('4')\n language['5'] = _('5')\n language['6'] = _('6')\n language['7'] = _('7')\n language['8'] = _('8')\n language['9'] = _('9')\n language['interface_down'] = _('Putting interface down...')\n language['resetting_ip_address'] = _('Resetting IP address...')\n language['interface_up'] = _('Putting interface up...')\n language['setting_encryption_info'] = _('Setting encryption info')\n language['removing_old_connection'] = _('Removing old connection...')\n language['generating_psk'] = _('Generating PSK...')\n language['generating_wpa_config'] = _('Generating WPA configuration file...')\n language['flushing_routing_table'] = _('Flushing the routing table...')\n language['configuring_interface'] = _('Configuring wireless interface...')\n language['validating_authentication'] = _('Validating authentication...')\n language['setting_broadcast_address'] = _('Setting broadcast address...')\n language['setting_static_dns'] = _('Setting static DNS servers...')\n language['setting_static_ip'] = _('Setting static IP addresses...')\n language['running_dhcp'] = _('Obtaining IP address...')\n language['dhcp_failed'] = _('Connection Failed: Unable to Get IP Address')\n language['aborted'] = _('Connection Cancelled')\n language['bad_pass'] = _('Connection Failed: Bad password')\n language['done'] = _('Done connecting...')\n return language",
"def getFilteredLanguages(self, data):\n def filterLanguage(dataItem):\n return dataItem['type'] == 'language'\n return list(filter(filterLanguage, data))"
] | [
"0.6933556",
"0.6791755",
"0.6202333",
"0.60564715",
"0.6018329",
"0.59928876",
"0.59139585",
"0.58982617",
"0.58469373",
"0.5843785",
"0.5796447",
"0.57480514",
"0.57101274",
"0.5703919",
"0.5663914",
"0.5649781",
"0.5592568",
"0.55881494",
"0.55463994",
"0.5546072",
"0.55238163",
"0.5521125",
"0.5507484",
"0.54990417",
"0.5477939",
"0.5466426",
"0.54645246",
"0.5464463",
"0.54534775",
"0.5449289"
] | 0.6805568 | 1 |
Test an execution plan with downloadable git files | def test_execution_plan_type_downloable_git(self, mock_makedir, mock_path,
mock_git):
mock_makedir.return_value = None
mock_path.return_value = True
mock_git.clone.return_value = None
template = self.get_template_downloable_git()
files = files_manager.FilesManager(self.get_template_downloable(5))
files._download_url_file(template.Files['mycoockbook'], "script") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_download_deployment_run_test_report(self):\n pass",
"def test_download_file(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n assets_calculated_sha = 'notasha'\n sha_dict = {}\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == os.path.basename(TEST_FILENAME):\n\n # the uploaded asset\n request = requests.get(check_asset.browser_download_url)\n open(TEST_DOWNLOAD, 'wb').write(request.content)\n\n # recalc hash of downloaded file\n assets_calculated_sha = Arguments.get_hash(TEST_DOWNLOAD)\n\n elif check_asset.name == sha_filename:\n\n # the sha hash file\n request = requests.get(check_asset.browser_download_url)\n sha_dict = request.json()\n\n assert assets_calculated_sha == sha_dict[os.path.basename(TEST_FILENAME)]",
"def calc_test(commits, author):\n\topen('modifications.csv', 'w').close()\n\t\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 5 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t\t# getting every blob from a given commit\n\t\tquery = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +\n\t\t\t# splitting it and discarding the newlines and the commit's hash\n\t\t\t'awk -v RS=\"[;\\\\n]\" 1 | tail -n+2); do ' +\n\t\t\t# We look up the content's of each blob, and discard the STDERR,\n\t\t\t# in the case of trying to look up a blob that does not exist in the database\n\t\t\t'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +\n\t\t\t# We search for the use of a unit testing library, using the above regex, and\n\t\t\t# keeping the first result only, since that is enough to know that the commit contains\n\t\t\t# a unit testing file, to make the execution faster\n\t\t\t'egrep -m 1 \"' + final_reg + '\"')\n\t\tif bash(query): # if contains unit testing lib\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\t# at this point we could search the parent's tree for the existence of tests, but this\n\t\t\t# would require recursively looking at every directory and parsing every file in the tree, so, due\n\t\t\t# to the complexity, we skip it and consider it a modification instead of a possible introduction\n\n\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\tprint 'modification'\n\t\t\tf.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit",
"def test_download1(self):\n pass",
"def test_download_file_no_sha(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n pass_test = True\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == sha_filename:\n\n pass_test = False\n\n assert pass_test",
"def test_download2(self):\n pass",
"def test_download(self):\n pass",
"def test_main():\n\n temp_dir = \"./deepreg_download_temp_dir\"\n branch = Repo(\".\").head.object.hexsha\n\n main(args=[\"--output_dir\", temp_dir, \"--branch\", branch])\n\n # Check downloading all req'd folders into temp, verify that they are the same as in main branch.\n config_dcmp = dircmp(\"./config\", os.path.join(temp_dir, \"config\"))\n assert not has_diff_files(config_dcmp)\n\n data_dcmp = dircmp(\"./data\", os.path.join(temp_dir, \"data\"))\n assert not has_diff_files(data_dcmp)\n\n demos_dcmp = dircmp(\"./demos\", os.path.join(temp_dir, \"demos\"))\n assert not has_diff_files(demos_dcmp)\n\n shutil.rmtree(temp_dir)",
"def test_execution_plan_type_svn(self, mock_makedir, mock_subproc_popen):\n process_mock = mock.Mock()\n attrs = {'communicate.return_value': ('ouput', 'ok'),\n 'poll.return_value': 0}\n process_mock.configure_mock(**attrs)\n mock_subproc_popen.return_value = process_mock\n\n template = self.get_template_svn()\n files = files_manager.FilesManager(template)\n files._download_url_file(template.Files['file'], \"script\")",
"def test_get_file_executors(self):\n pass",
"def test_release(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Release branch with no newsfragments, all good.\")",
"def test_execute_with_multi_file_builds(self):\n review, review_files = self.run_tool_execute(\n checkout_dir=self.checkout_dir,\n filename='Makefile',\n file_contents=(\n b'all: test1.o test2.o\\n'\n b'\\n'\n b'.c.o:\\n'\n b'\\tgcc -c $<\\n'\n ),\n other_files={\n 'test1.c': (\n b'#include <stdlib.h>\\n'\n b'\\n'\n b'int null_deref() {\\n'\n b' int* i = NULL;\\n'\n b' return *i;\\n'\n b'}\\n'\n b'\\n'\n b'void mem_leak() {\\n'\n b' int* p = (int*)malloc(sizeof(int));\\n'\n b'}\\n'\n ),\n 'test2.c': (\n b'#include <fcntl.h>\\n'\n b'#include <stdio.h>\\n'\n b'#include <stdlib.h>\\n'\n b'#include <unistd.h>\\n'\n b'\\n'\n b'void fp_leak() {\\n'\n b' open(\"foo.txt\", O_WRONLY);\\n'\n b'}\\n'\n ),\n },\n tool_settings={\n 'build_type': 'make',\n })\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_files['test1.c'].id,\n 'first_line': 5,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'pointer `i` last assigned on line 4 could be null and '\n 'is dereferenced at line 5, column 12.\\n'\n '\\n'\n 'Column: 12\\n'\n 'Severity: ERROR\\n'\n 'Error code: Null Dereference'\n ),\n },\n {\n 'filediff_id': review_files['test1.c'].id,\n 'first_line': 9,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'The value written to &p (type int*) is never used.\\n'\n '\\n'\n 'Column: 5\\n'\n 'Severity: ERROR\\n'\n 'Error code: Dead Store'\n ),\n },\n {\n 'filediff_id': review_files['test2.c'].id,\n 'first_line': 7,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'resource acquired by call to `open()` at line 7, column '\n '5 is not released after line 7, column 5.\\n'\n '\\n'\n 'Column: 5\\n'\n 'Severity: ERROR\\n'\n 'Error code: Resource Leak'\n ),\n },\n ])\n self.assertEqual(review.general_comments, [])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n 'run',\n '--no-progress-bar',\n '--',\n 'make',\n ],\n ignore_errors=True,\n with_errors=True)",
"def test_get_file_with_git_and_base_commit_id(self):\n self._test_get_file(\n tool_name='Git',\n revision='123',\n base_commit_id='456',\n expected_revision='123')",
"def test_get_file_with_git_and_revision(self):\n self._test_get_file(\n tool_name='Git',\n revision='123',\n base_commit_id=None,\n expected_revision='123')",
"def test_fetch_blocking_valid():\n res_dir = 'tmp/test'\n ident = _id()\n res = proj.fetch('test', ident)\n assert res.result['val'] > 0\n entry_dir = os.path.join(res_dir, ident)\n assert os.path.isdir(entry_dir)\n paths = ['storage/hello.txt', 'result.json', 'run.log', 'error.log']\n for p in paths:\n assert os.path.exists(os.path.join(entry_dir, p))\n with open(os.path.join(res.paths['storage'], 'hello.txt')) as fd:\n content = fd.read()\n assert content == 'hello world'\n with open(res.paths['status']) as fd:\n status = fd.read()\n assert status == 'complete'\n with open(res.paths['start_time']) as fd:\n start_time = int(fd.read())\n with open(res.paths['end_time']) as fd:\n end_time = int(fd.read())\n assert start_time <= end_time\n with open(res.paths['result']) as fd:\n assert json.load(fd)['val'] > 0\n with open(res.paths['log']) as fd:\n assert 'this should go into run.log' in fd.read()",
"def test_single_repo_lots_of_tests(self):\n self.Mokes.add_repo_to_pi()\n self.Mokes.make_lots_of_tests(\n 45, self.db.session, self.Mokes.greekLit,\n coverage_ends_at=75.0, datetime_starts_at=datetime.datetime(2017, 4, 5, 7, 4, 22, tzinfo=None)\n )\n\n page1 = BeautifulSoup(self.client.get(\"/repo/PerseusDl/canonical-greekLit\").data.decode(), 'html.parser')\n self.assertEqual(\n page1.select('a.next')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=2\",\n \"There should be a next link\"\n )\n self.assertEqual(\n page1.select('a.last')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=3\",\n \"There should be a last link\"\n )\n self.assertEqual(\n len(page1.select('a.prev')), 0,\n \"There should not be a prev link\"\n )\n self.assertEqual(\n len(page1.select('a.first')), 0,\n \"There should not be a firstLink\"\n )\n tests = page1.select(\"#body tbody tr\")\n self.assertEqual(len(tests), 20, \"There should be 20 tests\")\n\n last_test = tests[0]\n self.assertEqual(\n len(last_test.select('a[href=\"/repo/PerseusDl/canonical-greekLit/47\"]')), 1,\n \"There should be a link to the last test\"\n )\n self.assertEqual(\n len(last_test.select('a[href=\"https://github.com/PerseusDL/canonical-latinLit/commit/'\n 'fb644351560d8296fe6da332236b1f8d61b2828a#all_commit_comments\"]')),\n 1,\n \"There should be a link to the commit on GitHub\"\n )\n self.assertIn(\"<td>75.0</td>\", str(last_test), \"There should be the coverage shown\")\n\n ###############\n #\n # Second Page\n #\n ###############\n page2 = BeautifulSoup(self.client.get(\"/repo/PerseusDl/canonical-greekLit?page=2\").data.decode(), 'html.parser')\n self.assertEqual(\n page2.select('a.prev')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=1\",\n \"There should be a Previous link\"\n )\n self.assertEqual(\n page2.select('a.first')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=1\",\n \"There should be a First link\"\n )\n self.assertEqual(\n page2.select('a.next')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=3\",\n \"There should be a Next link\"\n )\n self.assertEqual(\n page2.select('a.last')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=3\",\n \"There should be a last Link\"\n )\n tests = page2.select(\"#body tbody tr\")\n self.assertEqual(len(tests), 20, \"There should be 20 tests\")\n\n last_test = tests[0]\n self.assertEqual(\n len(last_test.select('a[href=\"/repo/PerseusDl/canonical-greekLit/27\"]')), 1,\n \"There should be a link to the last test\"\n )\n self.assertEqual(\n len(last_test.select('a[href=\"https://github.com/PerseusDL/canonical-latinLit/commit/'\n 'f6e1126cedebf23e1463aee73f9df08783640400#all_commit_comments\"]')),\n 1,\n \"There should be a link to the commit on GitHub\"\n )\n self.assertIn(\"<td>65.0</td>\", str(last_test), \"There should be the coverage shown\")\n\n ###############\n #\n # Third Page\n #\n ###############\n page3 = BeautifulSoup(self.client.get(\"/repo/PerseusDl/canonical-greekLit?page=3\").data.decode(), 'html.parser')\n self.assertEqual(\n page3.select('a.prev')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=2\",\n \"There should be a Previous link\"\n )\n self.assertEqual(\n page3.select('a.first')[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit?page=1\",\n \"There should be a First link\"\n )\n self.assertEqual(\n len(page3.select('a.next')), 0,\n \"There should not be a Next link\"\n )\n self.assertEqual(\n len(page3.select('a.last')), 0,\n \"There should not be a last Link\"\n )\n tests = page3.select(\"#body tbody tr\")\n self.assertEqual(len(tests), 5, \"There should be 5 tests\")\n\n last_test = tests[0]\n self.assertEqual(\n len(last_test.select('a[href=\"/repo/PerseusDl/canonical-greekLit/7\"]')), 1,\n \"There should be a link to the last test\"\n )\n self.assertEqual(\n len(last_test.select('a[href=\"https://github.com/PerseusDL/canonical-latinLit/commit/'\n 'ac3478d69a3c81fa62e60f5c3696165a4e5e6ac4#all_commit_comments\"]')),\n 1,\n \"There should be a link to the commit on GitHub\"\n )\n self.assertIn(\"<td>55.0</td>\", str(last_test), \"There should be the coverage shown\")",
"def test_fun(file_path, urls):\n assert os.path.exists(\"src/01_download/urls.txt\"), \"Urls text file not\\\n found in location\"\n assert os.path.exists(\"data/raw/census_2001.csv\"), \"Census file not\\\n found in location\"\n print(\"Tests ran succesfully\")",
"def test_get_run(self):\n pass",
"def test_retrieve_files_with_pre_hook(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n os.makedirs('/tmp/remote_pacha/localhost/pacha_pre')\n touch_script = open('/tmp/remote_pacha/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/remote_pacha/localhost/pre_got_executed.txt''')\n touch_script.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/pre_got_executed.txt'))",
"def main():\n parser = argparse.ArgumentParser(description='Fetch master build artifacts.')\n parser.add_argument('--token', type=str, help='API token to use')\n parser.add_argument(\n '--job', type=str, help='From what job to fetch artifacts from')\n parser.add_argument(\n '--artifact-download-dir',\n type=str,\n default='.',\n help='Where to download the artifacts')\n parser.add_argument(\n '--build-output-dir',\n type=str,\n default='.',\n help='Generated build files directory to use to compare for bloat')\n parser.add_argument(\n '--report-file',\n type=str,\n default='report.txt',\n help='From what job to fetch artifacts from')\n parser.add_argument(\n '--github-api-token',\n type=str,\n help='Github API token to upload the report as a comment')\n parser.add_argument(\n '--github-repository', type=str, help='Repository to use for PR comments')\n parser.add_argument(\n '--github-comment-pr-number',\n type=str,\n default=None,\n help='To what PR to comment in github')\n parser.add_argument(\n '--log-level',\n default=logging.INFO,\n type=lambda x: getattr(logging, x),\n help='Configure the logging level.')\n args = parser.parse_args()\n\n # Ensures somewhat pretty logging of what is going on\n logging.basicConfig(\n level=args.log_level,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n coloredlogs.install()\n\n if not args.token or not args.job:\n logging.error(\n 'Required arguments missing. Please specify at least job and token.')\n return\n\n try:\n ci_fetch_artifacts.fetchArtifactsForJob(args.token, args.job,\n args.artifact_download_dir)\n except Exception as e:\n logging.warning('Failed to fetch artifacts: %r', e)\n\n compareResults = generateBloatReport(\n args.report_file,\n args.artifact_download_dir,\n args.build_output_dir,\n title=\"Bloat report for job '%s'\" % args.job)\n\n if args.github_api_token and args.github_repository and args.github_comment_pr_number:\n sendFileAsPrComment(args.job, args.report_file, args.github_api_token,\n args.github_repository,\n int(args.github_comment_pr_number), compareResults)",
"def check(self):\n json = JsonBackend(\"../src/builder/projects.json\")\n json.load()\n\n TM_ITSELF = 1\n expected_files = TM_ITSELF + sum(p.downloadable is True\n for p in json.projects)\n self.downloads_for_project('tots', expected_files)\n\n expected_files = TM_ITSELF + sum(p.softcatala is True and\n p.downloadable is True\n for p in json.projects)\n\n self.downloads_for_project('softcatala', expected_files)\n\n expected_files = 1\n for project_dto in json.projects:\n if not project_dto.downloadable:\n continue\n\n self.downloads_for_project(project_dto.name, expected_files)\n self.check_project_link(project_dto.projectweb)",
"def test_basic_execution(self):",
"def test_compile_local_files(self, tester_login):\n filenames = os.listdir(COMPILE_TESTER_DIR)\n test_files = [os.path.join(COMPILE_TESTER_DIR, name) for name in filenames]\n projects = [self.upload_project('#uploadFolderZip form', fname,\n os.path.splitext(os.path.basename(fname))[0]) for fname\n in test_files]\n flag = True\n while flag:\n uploaded_sketches = self.get_elements(By.CSS_SELECTOR, '#project_list > li')\n if len(uploaded_sketches) >= len(projects):\n flag = False\n break\n time.sleep(1)\n self.compile_all_sketches(COMPILE_TESTER_STAGING_URL,\n '#user_projects tbody a',\n iframe=False,\n compile_type='sketch',\n create_report=True, logfile=COMPILE_TESTER_LOGFILE_STAGING)\n for name in projects:\n self.delete_project(name.replace(\" \", \"-\"))",
"def test_issue1(self):\r\n full_path = os.path.join(TEST_FILES_PATH, 'issue1.bb')\r\n steps, vars = ExecuteScriptFile(full_path, {})\r\n\r\n self.assertEquals(\r\n len(steps[-1].output.split(\"+\")) > 3,\r\n True)",
"def test_git_pull(self):\r\n\r\n self._setstaff_login()\r\n\r\n response = self._add_edx4edx()\r\n response = self._add_edx4edx()\r\n self.assertIn(_(\"The course {0} already exists in the data directory! \"\r\n \"(reloading anyway)\").format('edx4edx_lite'),\r\n response.content.decode('utf-8'))\r\n self._rm_edx4edx()",
"def main():\r\n args = getargs()\r\n dir_name = args.dir_name\r\n url = args.url\r\n fetch_junit(dir_name, url)",
"def test_9_dryruns(self):\n\n f = fmri.PkgFmri(self.published[3], None)\n\n rpth = tempfile.mkdtemp(dir=self.test_root)\n self.pkgrepo(\"create {0}\".format(rpth))\n expected = [\"pkg5.repository\"]\n self.pkgrecv(self.durl1, \"-n -d {0} {1}\".format(rpth, f))\n self.assertEqualDiff(expected, os.listdir(rpth))\n\n self.pkgrecv(self.durl1, \"-r -n -d {0} {1}\".format(rpth, f))\n self.assertEqualDiff(expected, os.listdir(rpth))\n\n self.pkgrecv(self.durl1, \"--clone -n -p '*' -d {0}\".format(rpth))\n self.assertEqualDiff(expected, os.listdir(rpth))\n\n arc_path = os.path.join(self.test_root, \"test.p5p\")\n self.pkgrecv(self.durl1, \"-a -n -d {0} \\*\".format(arc_path))\n self.assert_(not os.path.exists(arc_path))\n\n # --raw actually populates the destination with manifests even\n # with -n, so just check that it exits 0.\n self.pkgrecv(self.durl1, \"--raw -n -d {0} {1}\".format(\n self.tempdir, f))\n\n # --raw actually populates the destination with manifests even\n # with -n, so just check that it exits 0.\n self.pkgrecv(self.durl1, \"--raw -r -n -d {0} {1}\".format(\n self.tempdir, f))",
"def main(args=getargs()):\n start, startrefsp = get_details(args.initial_change_id)\n end, endrefsp = get_details(args.end_change_id)\n for change, chash, refspec in [\n (args.initial_change_id, start, startrefsp),\n (args.end_change_id, end, endrefsp),\n ]:\n checkout(change, refspec)\n clog = get_commit_log()\n if clog[0] != chash:\n sys.exit(\n f\"Something went wrong, expected {chash}, got {clog},\"\n \"bailing out\"\n )\n if not args.test_dir.exists() or args.test_dir.is_file():\n sys.exit(f\"{args.test_dir} does not exist or not dir\")\n tests = glob.glob(str(args.test_dir.joinpath(\"*.py\")))\n pairs = [(start, end)]\n if args.recursive:\n pairs = [\n (start, i)\n for i in list(reversed(get_commit_log((start, end))))[1:]\n ]\n failures = runtests(\n pairs, tests, args.target1, args.target2, args.target3, args.target4\n )\n if failures:\n sys.exit(\n f\"Failures detected, failed tests (and exit statuses): {failures}\"\n )",
"def verify_files():\n toverify = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='unverified'\")\n\n numverified = 0\n for file in toverify:\n\n actualsize = pipeline_utils.get_file_size(file['filename'])\n\n expectedsize = file['size']\n\n last_attempt_id = jobtracker.query(\"SELECT id \" \\\n \"FROM download_attempts \" \\\n \"WHERE file_id=%s \" \\\n \"ORDER BY id DESC \" % file['id'], \\\n fetchone=True)\n \n queries = []\n if actualsize == expectedsize:\n dlm_cout.outs(\"Download of %s is complete and verified.\" % \\\n os.path.split(file['filename'])[-1])\n # Everything checks out!\n queries.append(\"UPDATE files \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n\n\t # Mark the beam as downloaded in the main database\n\t #mark_beam_downloaded(os.path.split(file['filename'])[-1]))\n\n numverified += 1\n else:\n dlm_cout.outs(\"Verification of %s failed. \\n\" \\\n \"\\tActual size (%d bytes) != Expected size (%d bytes)\" % \\\n (os.path.split(file['filename'])[-1], actualsize, expectedsize))\n \n # Boo... verification failed.\n queries.append(\"UPDATE files \" \\\n \"SET status='failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='verification_failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n jobtracker.query(queries)\n return numverified",
"def test_call_sh_files(self):\n def structs():\n \"\"\"Mockups for called scripts.\"\"\"\n server = MockUCSHttpServer('server')\n struct_r = U.UCSRepoPool(major=MAJOR, minor=MINOR, part=PART, patchlevel=PATCH, arch=ARCH)\n preup_r = struct_r.path('preup.sh')\n postup_r = struct_r.path('postup.sh')\n struct_c = U.UCSRepoPool(major=MAJOR, minor=MINOR, part='%s/component' % (PART,), patch='c', arch=ARCH)\n preup_c = struct_c.path('preup.sh')\n postup_c = struct_c.path('postup.sh')\n\n yield (server, struct_r, 'preup', preup_r, 'r_pre')\n yield (server, struct_r, 'postup', postup_r, 'r_post')\n yield (server, struct_c, 'preup', preup_c, 'c_pre')\n yield (server, struct_c, 'postup', postup_c, 'c_post')\n tmp = NamedTemporaryFile()\n\n gen = self.u.call_sh_files(structs(), tmp.name, 'arg')\n\n # The Updater only yields the intent, the content is only available after the next step\n self.assertEqual(('update', 'pre'), gen.next()) # download\n self.assertEqual([], MockPopen.mock_get())\n self.assertEqual(('preup', 'pre'), gen.next()) # pre\n self.assertEqual([], MockPopen.mock_get())\n self.assertEqual(('preup', 'main'), gen.next())\n self.assertEqual(('pre', 'arg', 'c_pre'), MockPopen.mock_get()[0][1:])\n self.assertEqual(('preup', 'post'), gen.next())\n self.assertEqual(('arg', 'r_pre'), MockPopen.mock_get()[0][1:])\n self.assertEqual(('update', 'main'), gen.next()) # update\n self.assertEqual(('post', 'arg', 'c_pre'), MockPopen.mock_get()[0][1:])\n self.assertEqual(('postup', 'pre'), gen.next()) # post\n self.assertEqual([], MockPopen.mock_get())\n self.assertEqual(('postup', 'main'), gen.next())\n self.assertEqual(('pre', 'arg', 'c_post'), MockPopen.mock_get()[0][1:])\n self.assertEqual(('postup', 'post'), gen.next())\n self.assertEqual(('arg', 'r_post'), MockPopen.mock_get()[0][1:])\n self.assertEqual(('update', 'post'), gen.next())\n self.assertEqual(('post', 'arg', 'c_post'), MockPopen.mock_get()[0][1:])\n self.assertRaises(StopIteration, gen.next) # done\n self.assertEqual([], MockPopen.mock_get())"
] | [
"0.65409565",
"0.6530164",
"0.6279986",
"0.6256195",
"0.6231449",
"0.6157987",
"0.6156309",
"0.61558706",
"0.61408335",
"0.60977995",
"0.604785",
"0.594729",
"0.5861912",
"0.5824399",
"0.57773626",
"0.5769683",
"0.57622266",
"0.5680721",
"0.5665056",
"0.5652566",
"0.5651526",
"0.5648709",
"0.5644176",
"0.56060153",
"0.5599011",
"0.5581468",
"0.5567995",
"0.5555391",
"0.55391043",
"0.5530262"
] | 0.6872425 | 0 |
Test an execution plan with svn files. | def test_execution_plan_type_svn(self, mock_makedir, mock_subproc_popen):
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('ouput', 'ok'),
'poll.return_value': 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
template = self.get_template_svn()
files = files_manager.FilesManager(template)
files._download_url_file(template.Files['file'], "script") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_file_with_svn_and_revision(self):\n self._test_get_file(\n tool_name='Subversion',\n revision='123',\n base_commit_id=None,\n expected_revision='123')",
"def test_get_file_with_svn_and_base_commit_id(self):\n self._test_get_file(\n tool_name='Subversion',\n revision='123',\n base_commit_id='456',\n expected_revision='123')",
"def run(ctx, test_plan, only):\n\n handler = ValidateCommandHandler(ctx.obj['qa_dir'])\n if handler.validate():\n handler = RunCommandHandler(ctx.obj['qa_dir'],\n vcs_adapter=__vcs_factory__.create_cvs_adapter(ctx.obj['vcs']),\n test_plan=test_plan,\n report_dir=ctx.obj['report_dir'],\n debug=ctx.obj['debug'])\n\n handler.run_test_cases(only=only)\n\n else:\n exit(1)",
"def main():\r\n\r\n checkArgs()\r\n\r\n #setup some variables for cron setup\r\n cron_setup = False\r\n log_dir = \"\"\r\n\r\n # -c means we have to setup the crontab with a new cron job\r\n if sys.argv[1] == '-c':\r\n cron_setup = True\r\n sys.argv = sys.argv[1:]\r\n checkArgs(cron_setup)\r\n log_dir = sys.argv[2]\r\n \r\n if not( os.path.isdir(log_dir) ):\r\n help_exit(\"given log_foldername is not a directory\")\r\n\r\n #set up all the variables about directory information\r\n current_dir = os.getcwd()\r\n \r\n #ensures that the backslashes and forward slashes are proper for the OS\r\n target_dir = os.path.normpath(sys.argv[1])\r\n\r\n #make sure svn is up to date and start up preparetest\r\n os.chdir(os.path.normpath(current_dir + \"/../trunk\"))\r\n os.system(\"svn up\")\r\n os.chdir(current_dir)\r\n preparetest.main()\r\n\r\n if not( os.path.isdir(target_dir) ):\r\n help_exit(\"given foldername is not a directory\")\r\n\r\n #change to target directory and clean up the target directory folder\r\n os.chdir(target_dir)\r\n\r\n files_to_remove = glob.glob(\"*\")\r\n\r\n for f in files_to_remove:\r\n if os.path.isdir(f):\r\n shutil.rmtree(f)\r\n else:\r\n os.remove(f)\r\n\r\n os.chdir(current_dir)\r\n\r\n #the next few lines is necessary unless the file is manually copied over\r\n preparetest.copy_to_target(\"../trunk/integrationtests/repy_nm_unit_tests/*\", target_dir)\r\n preparetest.copy_to_target(\"../trunk/integrationtests/common/*\", target_dir)\r\n\r\n #check to see if cron setup was requested, if yes run cron_setup\r\n if cron_setup:\r\n\r\n #create the absolute path for the log file and the file needed for the \r\n #cron job\r\n cron_tab_dir=os.path.normpath(current_dir + \"/\" + target_dir)\r\n cron_log_dir=os.path.normpath(current_dir + \"/\" + log_dir)\r\n \r\n cron_line=\"45 * * * * export GMAIL_USER='[email protected]' && export GMAIL_PWD='repyrepy' && /usr/bin/python \" + cron_tab_dir + \"/rununittests.py >> \" + cron_log_dir + \"/cron_log.rununittests 2>&1\" + os.linesep\r\n\r\n #setup the cron job\r\n setup_crontab.add_crontab(cron_line, \"rununittests\")",
"def svn_command(s):\n out = launchsvn(s, show=opts[\"show-changes\"] or opts[\"dry-run\"],\n pretend=opts[\"dry-run\"],\n split_lines=False)\n if not opts[\"dry-run\"]:\n print(out)",
"def run_test(self, testcase, name, options):\n name = options.suite+'_'+name\n cmd = options.solver+' '\n if not options.cat_options is None:\n cmd += options.cat_options+' '\n cmd += options.file\n print( \"Running test suite '%s' test '%s' command '%s'\" % \\\n (options.suite, name, cmd))\n pyutilib.subprocess.run(cmd, outfile=options.currdir+'test_'+name+\".out\")\n testcase.failUnlessFileEqualsBaseline(\n options.currdir+'test_'+name+\".out\",\n options.currdir+'test_'+name+\".txt\")",
"def _main():\r\n @enum.unique\r\n class Verbosity(enum.IntEnum):\r\n NOTHING = 0\r\n SIMPLE = 1\r\n DETAIL = 2\r\n\r\n CURRENT_PATH = Path('./file/report.txt')\r\n EXEC_DATETIME = datetime.today()\r\n FILE_MOD_DATETIME = datetime.fromtimestamp(CURRENT_PATH.stat().st_mtime)\r\n LOG_PATH = CURRENT_PATH.with_name(\r\n CURRENT_PATH.stem + f'_{EXEC_DATETIME.date()}.log')\r\n \r\n with open(CURRENT_PATH, mode='w') as fw:\r\n fw.write(\r\n f'[Date of script modification] {str(FILE_MOD_DATETIME)}\\n'\r\n f'[Date of this test execution] {str(EXEC_DATETIME)}\\n'\r\n '\\n')\r\n\r\n unittest.main(\r\n module=\"tests.test_login\", \r\n testRunner=unittest.TextTestRunner(\r\n stream=fw,\r\n descriptions=False,\r\n verbosity=Verbosity.DETAIL))",
"def comparePlans(arguments):\n if os.path.exists(arguments[1]) and os.path.exists(arguments[2]):\n planFrom = os.path.basename(arguments[1]), os.path.dirname(arguments[1])\n planTo = os.path.basename(arguments[2]), os.path.dirname(arguments[2])\n fileTo = open(arguments[2], \"r\")\n compareFiles(findBaseline(planFrom[0], planFrom[1]), fileTo)\n fileTo.close()\n elif not os.path.exists(arguments[1]):\n sys.stderr.write('Error: cannot find ' + arguments[1])\n else: \n sys.stderr.write('Error: cannot find ' + arguments[2])",
"def test_get_query_list_from_file(): # ***Incomplete test\n ##########################\n # Arrange.\n infp = \"infp\"\n\n ##########################\n # Act.\n #x = get_query_list_from_file(infp)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_basic_execution(self):",
"def test_run_any_search(): # ***Incomplete test\n ##########################\n # Arrange.\n queryfile = \"queryfile\"\n\n ##########################\n # Act.\n #x = run_any_search(queryfile)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_execute_review_7(self):\n review.execute_review(self.alchemist, self.test_dir,\n self.review_test_dir.name,\n s_report=True)\n\n self.assertTrue(self.review_test_dir.is_dir())\n\n summary_report_file = self.review_test_dir.joinpath(\"SummaryReport.txt\")\n self.assertTrue(summary_report_file.is_file())",
"def test_execute_with_single_file_builds(self):\n review, review_file = self.run_tool_execute(\n checkout_dir=self.checkout_dir,\n filename='Hello.java',\n file_contents=(\n b'class Hello {\\n'\n b' int test() {\\n'\n b' String s = null;\\n'\n b' return s.length();\\n'\n b' }\\n'\n b'}\\n'\n ),\n tool_settings={\n 'build_type': 'javac',\n })\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_file.id,\n 'first_line': 4,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'object `s` last assigned on line 3 could be null and '\n 'is dereferenced at line 4.\\n'\n '\\n'\n 'Severity: ERROR\\n'\n 'Error code: Null Dereference'\n ),\n },\n ])\n self.assertEqual(review.general_comments, [])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n 'run',\n '--no-progress-bar',\n '--',\n 'javac',\n 'Hello.java',\n ],\n ignore_errors=True,\n with_errors=True)",
"def test(self, cmdline):\n\n if tm.UPDATE_BEFORE_TEST:\n print \"Updating directory of source ...\"\n mu.update_dir(tm.SOURCE_DIR)\n\n args = mu.get_second_arg(cmdline).strip().split()\n if len(args) == 0:\n print \"Invalid command, test [sourcename] ([maxThread] ([pageLimit]))\"\n return\n elif len(args) == 1:\n self.sourcename, = args\n self.max_thread = '5'\n self.page_limit = '2'\n elif len(args) == 2:\n self.sourcename, self.max_thread = args\n self.page_limit = '2'\n elif len(args) == 3:\n self.sourcename, self.max_thread, self.page_limit = args\n\n print \"Searching directory of %s ...\" % self.sourcename\n self.sourcedir = mu.search_for_source(self.sourcename)\n if not self.sourcedir:\n print \"Directory of %s doesn't exist.\\n\" % self.sourcename\n return\n\n self.sourcetype = self.get_source_type()\n if self.sourcetype == 'blog':\n process = BlogProcess(self.sourcename, self.sourcedir)\n config_files = ('%s.xq' % self.sourcename, 'config.xml', 'globalConfig.xml', 'subSourceConfig.xml')\n elif self.sourcetype == 'forum':\n process = ForumProcess(self.sourcename, self.sourcedir, string.atoi(self.max_thread), self.page_limit)\n config_files = ('%s-url.xq' % self.sourcename, '%s-thread.xq' % self.sourcename, 'finished.xml', 'webForumConfiguration.xml')\n self.test_source(process, self.sourcedir, config_files)",
"def test_run_all_searches(): # ***Incomplete test\n ##########################\n # Arrange.\n query_file_list = \"query_file_list\"\n\n ##########################\n # Act.\n #x = run_all_searches(query_file_list)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_get_file_exists_with_svn_and_base_commit_id(self):\n self._test_get_file_exists(\n tool_name='Subversion',\n revision='123',\n base_commit_id='456',\n expected_revision='123',\n expected_found=True)",
"def test_execute_with_multi_file_builds(self):\n review, review_files = self.run_tool_execute(\n checkout_dir=self.checkout_dir,\n filename='Makefile',\n file_contents=(\n b'all: test1.o test2.o\\n'\n b'\\n'\n b'.c.o:\\n'\n b'\\tgcc -c $<\\n'\n ),\n other_files={\n 'test1.c': (\n b'#include <stdlib.h>\\n'\n b'\\n'\n b'int null_deref() {\\n'\n b' int* i = NULL;\\n'\n b' return *i;\\n'\n b'}\\n'\n b'\\n'\n b'void mem_leak() {\\n'\n b' int* p = (int*)malloc(sizeof(int));\\n'\n b'}\\n'\n ),\n 'test2.c': (\n b'#include <fcntl.h>\\n'\n b'#include <stdio.h>\\n'\n b'#include <stdlib.h>\\n'\n b'#include <unistd.h>\\n'\n b'\\n'\n b'void fp_leak() {\\n'\n b' open(\"foo.txt\", O_WRONLY);\\n'\n b'}\\n'\n ),\n },\n tool_settings={\n 'build_type': 'make',\n })\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_files['test1.c'].id,\n 'first_line': 5,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'pointer `i` last assigned on line 4 could be null and '\n 'is dereferenced at line 5, column 12.\\n'\n '\\n'\n 'Column: 12\\n'\n 'Severity: ERROR\\n'\n 'Error code: Null Dereference'\n ),\n },\n {\n 'filediff_id': review_files['test1.c'].id,\n 'first_line': 9,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'The value written to &p (type int*) is never used.\\n'\n '\\n'\n 'Column: 5\\n'\n 'Severity: ERROR\\n'\n 'Error code: Dead Store'\n ),\n },\n {\n 'filediff_id': review_files['test2.c'].id,\n 'first_line': 7,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'resource acquired by call to `open()` at line 7, column '\n '5 is not released after line 7, column 5.\\n'\n '\\n'\n 'Column: 5\\n'\n 'Severity: ERROR\\n'\n 'Error code: Resource Leak'\n ),\n },\n ])\n self.assertEqual(review.general_comments, [])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n 'run',\n '--no-progress-bar',\n '--',\n 'make',\n ],\n ignore_errors=True,\n with_errors=True)",
"def run_silent(self, opts):\n\n if not opts.add_to_svn and not opts.export_locally:\n print \"This execution will probably not have any effect. Make sure at least one of \"\\\n \"the options -l and -a are activated.\"\n\n svn_OK = False\n if opts.add_to_svn:\n svn_OK = check_svn()\n\n # look for and enter project file\n if not self.enter_project_file():\n raw_input(\"Execution aborted!\\nPress any key to close.\")\n return\n\n # which files are in directories\n self.enter_directories()\n\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n if opts.project_file_link:\n self._pr_intersect()\n\n if opts.meta_results_file_link:\n if not self.enter_meta_results_file():\n raw_input(\"Execution aborted!\\nPress any key to close.\")\n return\n self._ms_intersect()\n\n if opts.summary_reports_must_be_ok:\n self.check_analysis_status()\n self._OK_intersect()\n\n if opts.clean_up_missing_references:\n project_file_references_valid, meta_results_file_references_valid =\\\n self._check_pr_and_ms_references()\n\n if not project_file_references_valid:\n self._clean_up_project_file()\n if not meta_results_file_references_valid:\n self._clean_up_meta_results_file()\n\n # go through all summary.report.json files and extract linked files\n srl = SummaryReportsLinks(self.result_files)\n self.files_linked_from_sum_reps = srl.get_files()\n self.folders_linked_from_sum_reps = srl.get_folders()\n\n include_dashboard = not opts.do_not_export_dashboard\n if svn_OK:\n self.add_to_svn(include_dashboard)\n\n if opts.export_locally:\n self.export_locally(include_dashboard)",
"def tplans_content(ident, args):\n\n struct = {\n \"root\": [\n (\"descr\", True),\n (\"descr_long\", False),\n (\"hooks\", False),\n (\"evars\", False),\n (\"testsuites\", True)\n ],\n \"suites\": [\n (\"name\", True),\n (\"alias\", False),\n (\"hooks\", False),\n (\"hooks_pr_tcase\", False),\n (\"evars\", False),\n (\"evars_pr_tcase\", False)\n ]\n }\n\n violations = []\n\n\n tplans = _index(args.testplans_root, \"TPLAN\")\n\n for tp_fname in tplans:\n tp_fpath = os.sep.join([args.testplans_root, tp_fname])\n\n suites = []\n hooks = []\n\n tplan = None\n try:\n with open(tp_fpath) as tp_fd:\n tplan = yaml.load(tp_fd)\n\n except IOError as exc:\n continue\n except Exception as exc:\n continue\n\n for k in list(set(tplan.keys()) - set([k for k, _ in struct[\"root\"]])):\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"invalid key: %r\" % k\n ))\n\n for k in (k for k, req in struct[\"root\"] if req):\n if k not in tplan.keys():\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"missing required key: %r\" % k\n ))\n\n hooks += tplan[\"hooks\"] if \"hooks\" in tplan else []\n suites += []\n\n if \"testsuites\" not in tplan:\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"missing key 'testsuites'\"\n ))\n continue\n\n for suite in tplan[\"testsuites\"]:\n for k in list(set(suite.keys()) - set([k for k, _ in struct[\"suites\"]])):\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"invalid key: %r\" % k\n ))\n\n for k in (k for k, req in struct[\"suites\"] if req):\n if k not in suite.keys():\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"missing required key: %r\" % k\n ))\n\n if \"name\" in suite:\n suites.append(suite[\"name\"])\n\n if \"hooks\" in suite:\n hooks += suite[\"hooks\"]\n\n if \"hooks_pr_tcase\" in suite:\n hooks += suite[\"hooks_pr_tcase\"]\n\n # Check for existence of suites\n suites = list(set(suites))\n for suite_name in suites:\n suite_fpath = os.sep.join([\n args.testsuites_root,\n \"%s.suite\" % suite_name\n ])\n\n if not os.path.exists(suite_fpath):\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"testsuite: %r, does not exist\" % suite_fpath\n ))\n\n # Check for existence of hooks\n hooks = list(set(hooks))\n for hook_name in hooks:\n\n exists = []\n for tmpl in [\"%s.sh\", \"%s_enter.sh\", \"%s_exit.sh\"]:\n hook_fpath = os.sep.join([\n args.hooks_root,\n tmpl % hook_name\n ])\n exists.append(os.path.exists(hook_fpath))\n\n if not sum(exists):\n violations.append(MESSAGES[ident] % (\n tp_fname,\n \"hook: %r, does not exist\" % hook_name\n ))\n\n return violations",
"def runSingleTest(filePath):\n print filePath\n try:\n runTest(filePath)\n print 'OK'\n except DiffError, e:\n print e.diff()",
"def test_get_file_exists_with_svn_and_revision(self):\n self._test_get_file_exists(\n tool_name='Subversion',\n revision='123',\n base_commit_id=None,\n expected_revision='123',\n expected_found=True)",
"def test_download_deployment_run_test_report(self):\n pass",
"def test_subversion_binary_file(host):\n assert host.file(PACKAGE_BINARY).is_file",
"def execute(self):\n\n self._status = 'Running'\n\n for test_plan in self._test_plans:\n try:\n test_plan.execute()\n except Failure as e:\n self._status = 'Fail'\n self._message = ('The \"{0}\" test plan in the test run \"{1}\" failed with the '\n 'message: \"{2}\"'.format(test_plan.name, self.name, e.msg))\n except FatalError as e:\n self._status = 'Fail'\n self._message = ('The \"{0}\" test plan in the test run \"{1}\" encountered the fatal '\n 'error: \"{2}\"'.format(test_plan.name, self.name, e.msg))\n raise FatalError(self._message)\n\n if self._status == 'Fail':\n raise Failure(self._message)\n\n self._status = 'Pass'",
"def test_with_file(self, file_path):\n result = self.run(file_path=file_path)\n return self._handle_test_result(result)",
"def do_test(self, line):\n #hostnames = [\"US1004511WP\", \"DESKTOP-90N8EBG\"]\n hostnames = [\"DESKTOP-90N8EBG\"]\n #hostnames = [\"US1004511WP\"]\n #hostnames = [\"SPEEDYTURTLEW10\"]\n\n param = [\"--dest\", \"C:\\\\tools\\\\scripts\\\\leet_dev\"]\n pg = self._leet.get_plugin(\"get_collection\")\n #param = [\"--source\", \"C:\\Windows\\\\system32\\\\cmd.exe\", \"--dest\", \"C:\\\\tools\\\\scripts\\\\cb_test\"]\n #param = [\"--source\", \"C:\\\\song.txt\", \"--dest\", \"C:\\\\tools\\\\scripts\\\\leet_dev\"]\n #pg = self._leet.get_plugin(\"file_download\")\n # param = [\"--path\", \"C:\\\\maintenance\"]\n # pg = self._leet.get_plugin(\"dirlist\")\n pg.parse_parameters(param)\n\n self._leet.schedule_jobs(pg, hostnames)",
"def test_split_reports_with_execution(self):\n self._test_reports_helper({\"--split-reports\": \"\",\n \"--profile-execution\": \"\"},\n [\"compile.txt\", \"execution.txt\"])",
"def test_run_dir(delimiter, mode, path, expected):\n\n p = Worker(TEST_DEFAULT_SCHEMA, delimiter, mode, None)\n p.run(path)\n\n statements = [s for file in p.results for s in file['statements']]\n statements_expected = [s for file in expected for s in file['statements']]\n\n # Assert statements parsed are correct\n assert sorted(statements, key=lambda k: k['operation'] + k['procedure']) == \\\n sorted(statements_expected, key=lambda k: k['operation'] + k['procedure'])",
"def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)",
"def checkin_trunk(case_dict, svn_cmd, message, username, password):\n # ---------------------------------------------------------------------\n logger.debug(\"checkin_trunk\")\n\n os.chdir(case_dict[\"archive_temp_dir\"])\n svn_repo = \"{0}/trunk\".format(case_dict[\"svn_repo_url\"])\n msg = '\"{0}\"'.format(message)\n cmd = [\n \"svn\",\n svn_cmd,\n \"--username\",\n username,\n \"--password\",\n password,\n \".\",\n \"--message\",\n msg,\n ]\n\n if svn_cmd in [\"import\"]:\n # create the trunk dir\n msg = '\"create trunk\"'\n cmd = [\n \"svn\",\n \"mkdir\",\n \"--parents\",\n svn_repo,\n \"--username\",\n username,\n \"--password\",\n password,\n \"--message\",\n msg,\n ]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n cmd_nopasswd = [\n \"svn\",\n \"mkdir\",\n \"--parents\",\n svn_repo,\n \"--username\",\n username,\n \"--password\",\n \"******\",\n \"--message\",\n msg,\n ]\n msg = _call_template.substitute(\n function=\"checkin_trunk\",\n cmd=cmd_nopasswd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)\n\n # create the trunk_tags dir\n tags = \"{0}/trunk_tags\".format(case_dict[\"svn_repo_url\"])\n msg = '\"create trunk_tags\"'\n cmd = [\n \"svn\",\n \"mkdir\",\n tags,\n \"--username\",\n username,\n \"--password\",\n password,\n \"--message\",\n msg,\n ]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n cmd_nopasswd = [\n \"svn\",\n \"mkdir\",\n tags,\n \"--username\",\n username,\n \"--password\",\n \"******\",\n \"--message\",\n msg,\n ]\n msg = _call_template.substitute(\n function=\"checkin_trunk\",\n cmd=cmd_nopasswd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)\n\n msg = '\"{0}\"'.format(message)\n cmd = [\n \"svn\",\n svn_cmd,\n \"--username\",\n username,\n \"--password\",\n password,\n \".\",\n svn_repo,\n \"--message\",\n msg,\n ]\n\n # check-in the trunk to svn\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n cmd_nopasswd = [\n \"svn\",\n svn_cmd,\n \"--username\",\n username,\n \"--password\",\n \"******\",\n \".\",\n \"--message\",\n msg,\n ]\n msg = _call_template.substitute(\n function=\"checkin_trunk\",\n cmd=cmd_nopasswd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)"
] | [
"0.60671014",
"0.59154356",
"0.5870971",
"0.57437944",
"0.56704193",
"0.56515604",
"0.5607383",
"0.5580017",
"0.55432963",
"0.55121726",
"0.55041003",
"0.5493844",
"0.5489692",
"0.54305446",
"0.5429826",
"0.5413528",
"0.5403241",
"0.53945506",
"0.5375887",
"0.53562385",
"0.5347507",
"0.53441393",
"0.5336701",
"0.53305006",
"0.5308597",
"0.5307566",
"0.52802336",
"0.52789307",
"0.5274638",
"0.5274014"
] | 0.75792676 | 0 |
Helper function to convert padding format for pad operator. | def transform_padding(pad_width):
num_pad_values = len(pad_width)
onnx_pad_width = [0]*num_pad_values
start_index = 0
# num_pad_values will always be multiple of 2
end_index = int(num_pad_values/2)
for idx in range(0, num_pad_values):
if idx % 2 == 0:
onnx_pad_width[start_index] = pad_width[idx]
start_index += 1
else:
onnx_pad_width[end_index] = pad_width[idx]
end_index += 1
return onnx_pad_width | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_padding(g, op, block):\n\n input_x = g.get_node(op.input(\"X\")[0])\n input_padding = op.input(\"Paddings\")\n if input_padding:\n padding = g.get_node(input_padding[0])\n padding = infer_value(padding, g.get_params()).numpy().tolist()\n else:\n padding = op.attr(\"paddings\")\n padding = op.attr(\"paddings\")\n value = op.attr(\"value\")\n data_format = op.attr(\"data_format\")\n mode = op.attr(\"mode\")\n assert mode != \"circular\", \"Don't support mod='circular' for PaddlePaddle's padding\"\n if mode == \"replicate\":\n mode = \"edge\"\n\n pad_len = len(padding)\n new_paddings = [0] * (pad_len + 4)\n for i in range(0, pad_len, 2):\n index = -1 - i\n if data_format[:2] != \"NC\":\n index = -3 - i\n new_paddings[index] = padding[i + 1]\n new_paddings[index - 1] = padding[i]\n\n new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]\n\n out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_padding(padding, expected_length=4):\n explicit_paddings = []\n if padding == \"EXPLICIT\":\n raise ValueError(\"'EXPLICIT' is not a valid value for `padding`. To use \"\n \"explicit padding, `padding` must be a list.\")\n if isinstance(padding, (list, tuple)):\n for i, dim_paddings in enumerate(padding):\n if not isinstance(dim_paddings, (list, tuple)):\n raise ValueError(\"When `padding` is a list, each element of `padding` \"\n \"must be a list/tuple of size 2. Received: \"\n f\"padding={padding} with element at index {i} of type \"\n f\"{type(dim_paddings)}\")\n if len(dim_paddings) != 2:\n raise ValueError(\"When `padding` is a list, each element of `padding` \"\n \"must be a list/tuple of size 2. Received: \"\n f\"padding={padding} with element at index {i} of size \"\n f\"{len(dim_paddings)}\")\n explicit_paddings.extend(dim_paddings)\n if len(padding) != expected_length:\n raise ValueError(\n f\"When padding is a list, it must be of size {expected_length}. \"\n f\"Received: padding={padding} of size {len(padding)}\")\n padding = \"EXPLICIT\"\n return padding, explicit_paddings",
"def convert_padding_type(padding_type):\n padding_type = padding_type.casefold()\n if 'constant' in padding_type:\n padding_type = cv2.BORDER_CONSTANT\n elif 'reflect' in padding_type:\n padding_type = cv2.BORDER_REFLECT\n elif 'reflect_101' in padding_type:\n padding_type = cv2.BORDER_REFLECT_101\n elif 'replicate' in padding_type:\n padding_type = cv2.BORDER_REPLICATE\n else:\n padding_type = cv2.BORDER_REFLECT\n print(\"pad type:\",padding_type)\n return padding_type",
"def int_padding(length, val, direction=\">\"):\n return '{0:0{direction}{fill}}'.format(val, direction=direction, fill=length)",
"def str_padding(length, val):\n return '{0:<{fill}}'.format(val, fill=length)",
"def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)",
"def pad(pfile):\n h, b, t = pfile.split('.') # [\"P06144\", \"7\", \"4754\"]\n\n if len(t) == 3:\n t = '0' + t\n elif len(t) == 2:\n t = '00' + t\n elif len(t) == 1:\n t = '000' + t\n else:\n pass\n\n return '.'.join([h, b, t])",
"def _padding(inputs, paddings, data_format):\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], paddings, paddings])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], paddings, paddings, [0, 0]])\n return padded_inputs",
"def conv_pad(x, ks, mode):\n\tpad = (int(np.floor((ks-1)/2)), int(np.ceil((ks-1)/2)))\n\treturn F.pad(x, (*pad, *pad), mode=mode)",
"def pad_string(text, pad):\n\tpad_str = ''\n\tresult = ''\n\tif (type(pad) is int):\n\t\tpad_str = zen_settings['variables']['indentation'] * pad\n\telse:\n\t\tpad_str = pad\n\t\t\n\tnl = get_newline()\n\tlines = text.split(nl)\n\tresult = result + lines[0]\n\tfor line in lines[1:]:\n\t\tresult += nl + pad_str + line\n\t\t\n\treturn result",
"def filter_pad(val: Union[int, str], width: int, fillchar: str = '0') -> str:\n return str(val).rjust(width, fillchar)",
"def pkcsPadding():\n test_data = [(20, 'This is a Saturday'),(16, 'NO PAIN NO GAIN!')]\n\n for padlength,data in test_data:\n print padlength, repr(data), repr(pkcs7_pad(padlength, data))",
"def convert_pad(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mxnet_pad_width = convert_string_to_list(attrs.get(\"pad_width\"))\n onnx_pad_width = transform_padding(mxnet_pad_width)\n\n pad_mode = attrs.get(\"mode\")\n\n if pad_mode == \"constant\":\n pad_value = float(attrs.get(\"constant_value\")) \\\n if \"constant_value\" in attrs else 0.0\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode='constant',\n value=pad_value,\n pads=onnx_pad_width,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode=pad_mode,\n pads=onnx_pad_width,\n name=name\n )\n\n return [node]",
"def calculate_padding_to_align(length, align):\n return 0 if length % align == 0 else (align - (length % align))",
"def float_padding(length, val, decimals=2):\n return '{0:0>{fill}.{precision}f}'.format(float(val), fill=length, precision=decimals)",
"def pad(number, width=0):\n return str(number).zfill(width)",
"def _dynamic_padding(self, batch_data, pad_id):\n pad_p_len = min(self.max_p_len, max(batch_data['passage_length']))\n pad_q_len = min(self.max_q_len, max(batch_data['question_length']))\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def pad_pattern():\n pattern = is_op(\"nn.pad\")(wildcard(), is_constant())\n return pattern",
"def add_padding(text1: str) -> str:\n\n pad_len = 8 - (len(text1) % 8)\n return text1 + (pad_len * '\\0')",
"def parse_padding(padding: PaddingType) -> Tuple4IntType:\n if padding is False or None:\n padding = 0\n assert isinstance(padding, PaddingInstance)\n\n if isinstance(padding, NumberInstance):\n assert padding >= 0, 'padding cannot be a negative number'\n return int(padding), int(padding), int(padding), int(padding)\n else:\n assert 1 <= len(padding) <= 4, 'padding must be a tuple of 2, 3 or 4 elements'\n for i in range(len(padding)):\n assert isinstance(padding[i], NumberInstance), \\\n 'all padding elements must be integers or floats'\n assert padding[i] >= 0, \\\n 'all padding elements must be equal or greater than zero'\n if len(padding) == 1:\n return int(padding[0]), int(padding[0]), int(padding[0]), int(padding[0])\n elif len(padding) == 2:\n return int(padding[0]), int(padding[1]), int(padding[0]), int(padding[1])\n elif len(padding) == 3:\n return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[1])\n else:\n return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[3])",
"def _pad8(s):\n return '%08d' % int(s)",
"def _padboth(width, s):\n fmt = \"{0:^%ds}\" % width\n return fmt.format(s)",
"def _get_padding(w, h):\n dim_diff = np.abs(h - w)\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n return (0, pad1, 0, pad2) if h <= w else (pad1, 0, pad2, 0)",
"def padding(input_value, value):\n padding_value = str(input_value)\n for i in range(value - len(str(input_value))):\n padding_value += \" \"\n return padding_value",
"def pad(text, width, pad_character=\" \"):\n\n length = len(text)\n if width < 0 and length < -width:\n return text + (-width - length) * pad_character\n elif width > 0 and length < width:\n return (width - length) * pad_character + text\n else:\n return text",
"def pad_digits(x, width):\n if pd.notnull(x):\n return '{0:0{1}d}'.format(int(x), width)\n else:\n return x",
"def padAlignment(align, applyPadding=True):\n if type(align) in [dict, np.ndarray, list]:\n align = pd.Series(align)\n\n \"\"\"Replace * and # with - and - \"\"\"\n for ind in align.index:\n if '*' in align[ind]:\n align[ind] = align[ind].replace('*', '-')\n if '#' in align[ind]:\n align[ind] = align[ind].replace('#', '-')\n \"\"\"Pad with gaps if the lengths are all the same\"\"\"\n if applyPadding:\n L = align.map(len).unique()\n if len(L) > 1:\n #print 'Sequences have different lengths (pading with gaps): %s' % L\n L = L.max()\n for ind in align.index:\n if len(align[ind]) < L:\n align[ind] = align[ind].ljust(L, '-')\n else:\n L = L.max()\n return align",
"def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs",
"def pad(data, padding_char, length):\n if is_null_or_empty(padding_char):\n padding_char = DEFAULT_PAD_CHAR\n\n padded = create(padding_char, length)\n string_buf = padded + data + padded\n return string_buf"
] | [
"0.68323696",
"0.6653805",
"0.6588574",
"0.6539116",
"0.6471568",
"0.6465333",
"0.64627033",
"0.6430866",
"0.63953453",
"0.63670754",
"0.6362064",
"0.6358119",
"0.63537",
"0.63283384",
"0.63223386",
"0.6321739",
"0.6320306",
"0.6287044",
"0.62865645",
"0.62791353",
"0.6271379",
"0.62621254",
"0.6240058",
"0.6203671",
"0.6194152",
"0.61824286",
"0.61787677",
"0.61770207",
"0.6164591",
"0.61611795"
] | 0.68918765 | 0 |
Helper function to convert a string version of Boolean attributes to integer for ONNX. Takes attribute dictionary and attr_name as parameters. | def get_boolean_attribute_value(attrs, attr_name):
return 1 if attrs.get(attr_name, 0) in ["True", "1"] else 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_to_intbool(val_str):\n return 1 if val_str == 'Yes' else 0",
"def int_from_bool_as_string(subject):\r\n return bool_from_string(subject) and 1 or 0",
"def _bool_to_int(self, bool_arg):\n if bool_arg == True:\n return 1\n else:\n return 0",
"def bool_attr(attr):\n if attr.lower() == \"true\":\n val = True\n elif attr.lower() == \"false\":\n val = False\n else:\n raise EzXMLError(\"Must be \"\\\n \"'true' or 'false'. Not %s\" % (attr))\n return val",
"def convertToInt(boolean: bool) -> int:\n ...",
"def preprocess_bools(args):\n for arg in args:\n if type(args[arg]) == bool:\n args[arg] = int(args[arg])\n return args",
"def cast_to_integer(array, attributes):\n atts = array.att_names\n\n for nm, typ, null in array.sdbtype.full_rep:\n if nm not in attributes:\n continue\n if 'int' in typ:\n continue\n if typ == 'bool':\n x = _new_attribute_label('__cast', array)\n array = array.attribute_rename(nm, x).apply(nm, 'iif(%s, 1, 0)' % x)\n continue\n else:\n raise ValueError(\"Don't know how to turn %s to int64\" % typ)\n\n return array.project(*atts)",
"def _maybe_decode_attr(da, attr):\n # TODO: Fix this so that bools get written as attributes just fine\n # bool -> int\n if (attr in da.attrs) and (type(da.attrs[attr] == bool)):\n da.attrs[attr] = int(da.attrs[attr])\n\n return da",
"def get_integer(bool_var):\n if bool_var:\n return \"1\"\n else:\n return \"0\"",
"def bool_converter(self, bool_var):\n if bool_var == True:\n result = 1\n elif bool_var == False:\n result = 0\n return result",
"def cast(value):\n try:\n value = int(value)\n except ValueError:\n if value.lower().strip() in [\"true\", \"t\", \"1\", \"yes\"]:\n value = True\n elif value.lower().strip() in [\"false\", \"f\", \"no\", \"0\"]:\n value = False\n return value",
"def parse_bool_str(attr, key, default='False'):\n return attr.get(key, default).strip().lower() in ['true', '1', 't', 'y', 'yes']",
"def boolify(x):\r\n if isinstance(x, str):\r\n x = x.lower()\r\n try:\r\n return _BOOLIFY_DICT[x]\r\n except KeyError as e:\r\n raise ValueError('Can\\'t boolify value: %r' % x) from None",
"def value_to_int(attrib, key):\n val = attrib.get(key, 0)\n if isinstance(val, str):\n if val.isspace() or val == '':\n return 0\n return val",
"def parameter_cast_to_bool(ival):\r\n if type(ival) is bool:\r\n return ival\r\n if type(ival) is int:\r\n return bool(ival)\r\n if type(ival) is str:\r\n lower = ival.lower()\r\n if lower == 'no' or lower == 'false' or lower == '0':\r\n return False\r\n if lower == 'yes' or lower == 'true' or lower == '1':\r\n return True\r\n return None",
"def test_int_to_bool_true(self):\n self.assertEqual(TransformList.int_to_bool({'varname': 1}, 'varname'), True)",
"def cast_string(self, subject):\n for key in subject.keys():\n if key[0:3] == \"is_\":\n if subject[key] == '0':\n subject[key] = False\n else:\n subject[key] = True\n return subject",
"def IDX_CHECK(attribute_name):\n if attribute_name == 'Alt':\n return 0\n if attribute_name == 'Bar':\n return 1\n if attribute_name == 'Fri':\n return 2\n if attribute_name == 'Hun':\n return 3\n if attribute_name == 'Pat':\n return 4\n if attribute_name == 'Price':\n return 5\n if attribute_name == 'Rain':\n return 6\n if attribute_name == 'Res':\n return 7\n if attribute_name == 'Type':\n return 8\n if attribute_name == 'Est':\n return 9",
"def _parse_param_as_bool(\n enodeb: EnodebAcsStateMachine,\n param_name: ParameterName\n) -> str:\n try:\n param = enodeb.get_parameter(param_name)\n pval = param.lower().strip()\n if pval in {'true', '1'}:\n return '1'\n elif pval in {'false', '0'}:\n return '0'\n else:\n logging.warning(\n '%s parameter not understood (%s)', param_name, param)\n return '0'\n except (KeyError, ConfigurationError):\n return '0'",
"def _str_to_bool(s):\r\n if s.lower() not in ['true', 'false']:\r\n raise ValueError('Argument needs to be a boolean, got {}'.format(s))\r\n return {'true': True, 'false': False}[s.lower()]",
"def BoolTypeConvert(bool_type):\n if isinstance(bool_type, bool):\n if bool_type:\n return 'y'\n else:\n return 'n'\n elif isinstance(bool_type, str):\n if bool_type == 'y' or bool_type.lower() == 'true':\n return True\n elif bool_type == 'n' or bool_type.lower() == 'false':\n return False",
"def _convert_attribute_to_tag(key, attr):\n if isinstance(attr, bool):\n return jaeger.Tag(key=key, vBool=attr, vType=jaeger.TagType.BOOL)\n if isinstance(attr, str):\n return jaeger.Tag(key=key, vStr=attr, vType=jaeger.TagType.STRING)\n if isinstance(attr, int):\n return jaeger.Tag(key=key, vLong=attr, vType=jaeger.TagType.LONG)\n if isinstance(attr, float):\n return jaeger.Tag(key=key, vDouble=attr, vType=jaeger.TagType.DOUBLE)\n logger.warning(\"Could not serialize attribute %s:%r to tag\", key, attr)\n return None",
"def bools(key):\n @_copy_docs(distutils.util.strtobool)\n def to_bool(value):\n return bool(distutils.util.strtobool(value))\n\n return Converter(key, to_bool, str)",
"def convertStringToBool(nodeText):\n stringsThatMeanTrue = list(['yes','y','true','t','on'])\n val = False\n if nodeText.lower() in stringsThatMeanTrue:\n val = True\n return val",
"def _str_to_bool(s):\n if s.lower() not in ['true', 'false']:\n raise ValueError('Argument needs to be a '\n 'boolean, got {}'.format(s))\n return {'true': True, 'false': False}[s.lower()]",
"def _str_to_bool(s):\n if s.lower() not in ['true', 'false']:\n raise ValueError('Argument needs to be a '\n 'boolean, got {}'.format(s))\n return {'true': True, 'false': False}[s.lower()]",
"def _str_to_bool(s):\n if s.lower() not in ['true', 'false']:\n raise ValueError('Argument needs to be a '\n 'boolean, got {}'.format(s))\n return {'true': True, 'false': False}[s.lower()]",
"def _str_to_bool(s):\n if s.lower() not in ['true', 'false']:\n raise ValueError('Argument needs to be a '\n 'boolean, got {}'.format(s))\n return {'true': True, 'false': False}[s.lower()]",
"def _cast_boolean(value):\n _BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False, '': False}\n value = str(value)\n if value.lower() not in _BOOLEANS:\n raise ValueError('Not a boolean: %s' % value)\n\n return _BOOLEANS[value.lower()]",
"def _BoolsToInts(arg_list):\n result = []\n for arg in arg_list:\n if isinstance(arg, (list, tuple)):\n result.append(_BoolsToInts(arg))\n elif arg is True:\n result.append(1)\n elif arg is False:\n result.append(0)\n else:\n result.append(arg)\n\n return result"
] | [
"0.64697516",
"0.64594436",
"0.62451714",
"0.6019205",
"0.6004195",
"0.599485",
"0.5975256",
"0.59653413",
"0.5840328",
"0.5804822",
"0.56859833",
"0.56824034",
"0.5662187",
"0.5627271",
"0.5594165",
"0.5585451",
"0.5547492",
"0.5395261",
"0.53894955",
"0.53652155",
"0.53648996",
"0.5361133",
"0.5336285",
"0.5331353",
"0.5323965",
"0.5323965",
"0.5323965",
"0.5323965",
"0.53139323",
"0.53053457"
] | 0.6708033 | 0 |
Helper function to create a basic operator node that doesn't contain op specific attrs | def create_basic_op_node(op_name, node, kwargs):
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def base_operator(self):\n raise NotImplementedError()",
"def _remove_operator(self, operator):",
"def convert_logical_not(node, **kwargs):\n return create_basic_op_node('Not', node, kwargs)",
"def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))",
"def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper",
"def op(self) -> str:\n return self._node.get(\"op\")",
"def get_operator_to_make_TOD(self):\n if len(self) == 1:\n return self.get_operator()\n op = self._get_array_of_operators()\n return BlockRowOperator(op, new_axisin=0)",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def new_xmldoc_opml():\n xmldoc = XMLDoc()\n opml = OPML()\n xmldoc.root_element = opml\n\n return (xmldoc, opml)",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\n else:\n node.input.append(\"\")\n return node",
"def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator",
"def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator",
"def _append_operator(self, operator):",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression",
"def __init__(self):\n super(OperatorCodegen, self).__init__()",
"def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")"
] | [
"0.63253284",
"0.632039",
"0.62180704",
"0.62112194",
"0.6123615",
"0.61039203",
"0.61005336",
"0.6060044",
"0.600828",
"0.6002395",
"0.60003734",
"0.595907",
"0.5952342",
"0.5919387",
"0.59105414",
"0.5905878",
"0.589422",
"0.58619446",
"0.58419603",
"0.58360064",
"0.5822752",
"0.5795959",
"0.5762364",
"0.5757603",
"0.57442415",
"0.5720301",
"0.57133347",
"0.5701792",
"0.5695357",
"0.56902695"
] | 0.7472675 | 0 |
Helper function to convert weights and inputs. | def convert_weights_and_inputs(node, **kwargs):
name, _, _ = get_inputs(node, kwargs)
if kwargs["is_input"] is False:
weights = kwargs["weights"]
initializer = kwargs["initializer"]
np_arr = weights[name]
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
dims = np.shape(np_arr)
tensor_node = onnx.helper.make_tensor_value_info(name, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=name,
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
)
)
return [tensor_node]
else:
tval_node = onnx.helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"])
return [tval_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _input(self, inputs):\n return sum([w*i for w,i in zip(self._weights, inputs)])",
"def normalize_input(inputs: [float]) -> [float]:",
"def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_init()\n\n return input_user, input_item, input_rating, user_embeddings, item_embeddings",
"def _set_weights(self, weights):\r\n self.weights = weights.reshape(self.output_size, self.input_size+1)",
"def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1",
"def _TransformInputs(self, _):\n raise NotImplementedError()",
"def my_assign_weights(context, data):\n pass",
"def standardize_sample_or_class_weights(x_weight, output_names, weight_type):\n if x_weight is None or (isinstance(x_weight, (list, tuple)) and\n len(x_weight) == 0): # pylint: disable=g-explicit-length-test\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, (list, tuple)):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) + ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' + weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type + '` type not understood: ' +\n str(x_weight))",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def normalize_weights(self, labels, weights):\n if self._ragged:\n labels, _, weights, _ = utils.ragged_to_dense(labels, None, weights)\n return self._normalize_weights_impl(labels, weights)",
"def updateWeights(inputs, outputs, learning_rate, y, weights):\n for i in range(len(weights)):\n weights[i] = weights[i] + learning_rate * (outputs - y) * inputs[i]\n return weights",
"def _forward(self, input_data, weights):\n\n # handle None input\n if self.num_inputs == 0 and input_data is None:\n return np.zeros(self.output_shape)\n\n return np.zeros(self.output_shape)",
"def get_weights(self):",
"def reconstruct_input_ext(self, model_in):",
"def _preprocess_inputs(\n self,\n membership: types.TensorType,\n predictions: types.TensorType,\n sample_weight: Optional[types.TensorType] = None\n ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:\n # pyformat: disable\n # pyformat: enable\n # Transform membership if transform is provided and cast.\n if self.membership_transform is not None:\n membership = self.membership_transform(membership)\n membership = tf.cast(membership, tf.float32)\n # Transform predictions if transform is provided and cast.\n if self.predictions_transform is not None:\n predictions = self.predictions_transform(predictions)\n predictions = tf.cast(predictions, tf.float32)\n # Transform weights.\n shape = [tf.shape(membership)[0], 1]\n if sample_weight is None:\n sample_weight = 1.0\n sample_weight = tf.cast(sample_weight, tf.float32)\n sample_weight += tf.zeros(\n shape, dtype=tf.float32) # Broadcast to the correct shape.\n sample_weight = tf.cast(sample_weight, tf.float32)\n # Raise error if any individual weights are negative.\n assert_op = tf.debugging.assert_non_negative(\n sample_weight,\n message='`sample_weight` cannot contain any negative weights, given: {}'\n .format(sample_weight))\n with tf.control_dependencies([assert_op]): # Guarantee assert is run first.\n normed_weights = tf.math.divide_no_nan(sample_weight,\n tf.reduce_sum(sample_weight))\n return membership, predictions, normed_weights",
"def standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None and sample_weight_mode != 'samplewise':\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError(\n 'Found a sample_weight array with shape {}. In order to '\n 'use timestep-wise sample weights, you should specify '\n 'sample_weight_mode=\"temporal\" in compile(); founssd \"{}\" '\n 'instead. If you just mean to use sample-wise weights, '\n 'make sure your sample_weight array is 1D.'.format(\n sample_weight.shape, sample_weight_mode))\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tensor_util.is_tf_type(sample_weight) and\n y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n\n if tensor_util.is_tf_type(y):\n # Few classes are expected, so densifying is reasonable.\n keys = np.array(sorted(class_weight.keys()))\n values = np.array([class_weight[i] for i in keys])\n weight_vector = np.zeros(np.max(keys) + 1)\n weight_vector[:] = np.nan\n weight_vector[keys] = values\n\n y_classes = smart_cond.smart_cond(\n len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1,\n lambda: backend.argmax(y, axis=1),\n lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n class_sample_weight = array_ops.gather(weight_vector, y_classes)\n gen_array_ops.check_numerics(\n class_sample_weight,\n 'Invalid classes or class weights detected. NaN values indicate that '\n 'an appropriate class weight could not be determined.')\n class_sample_weight = math_ops.cast(class_sample_weight, backend.floatx())\n if sample_weight is not None:\n sample_weight = math_ops.cast(\n tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight),\n backend.floatx(),\n )\n else:\n y_classes = y\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError(\n '`class_weight` must contain all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.' % (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None",
"def __create_conv_weights(self, conv_weights):\n\n conv_xform_weights = []\n curr_n = 32\n k = 5\n for idx, conv_w in enumerate(conv_weights):\n\n curr_n = self.n_values[idx]\n W = self.__create_W_matrix(curr_n, conv_w)\n conv_xform_weights.append(W)\n\n return conv_xform_weights",
"def _get_weights(dist, weights):\n if weights in (None, \"uniform\"):\n return None\n\n if weights == \"distance\":\n # if user attempts to classify a point that was zero distance from one\n # or more training points, those training points are weighted as 1.0\n # and the other points as 0.0\n if dist.dtype is np.dtype(object):\n for point_dist_i, point_dist in enumerate(dist):\n # check if point_dist is iterable\n # (ex: RadiusNeighborClassifier.predict may set an element of\n # dist to 1e-6 to represent an 'outlier')\n if hasattr(point_dist, \"__contains__\") and 0.0 in point_dist:\n dist[point_dist_i] = point_dist == 0.0\n else:\n dist[point_dist_i] = 1.0 / point_dist\n else:\n with np.errstate(divide=\"ignore\"):\n dist = 1.0 / dist\n inf_mask = np.isinf(dist)\n inf_row = np.any(inf_mask, axis=1)\n dist[inf_row] = inf_mask[inf_row]\n return dist\n\n if callable(weights):\n return weights(dist)",
"def gen_in_weights(self):\n\n gen = Generator(device = self.device).manual_seed(self.random_seed)\n n, m = self.n_nodes_, self.n_inputs_\n in_w_shape_ = (n, m)\n print('m,n', m,n)\n\n #at the moment all input weight matrices use uniform bias.\n self.bias = rand( n, 1, generator = gen, device = self.device) * 2 - 1\n\n #weights\n if self.input_weight_type_ == \"uniform\":\n self.in_weights = rand((n,m), generator = gen, device = self.device)\n self.in_weights = self.in_weights * 2 - 1\n print('in_weights', self.in_weights.shape)\n\n elif self.input_weight_type_ == \"exponential\":\n printc(\"BUILDING SIGN_\", 'fail')\n sign1 = random_state.choice([-1, 1], size= (in_w_shape_[0], in_w_shape_[1]//2))\n sign2 = random_state.choice([-1, 1], size= (in_w_shape_[0], in_w_shape_[1]//2))\n\n self.sign_dual = (sign1, sign2)\n self.sign = np.concatenate((sign1, sign2), axis = 1)\n\n #regularization\n self.feedback_weights = rand(n, 1, **self.tensorArgs, generator = gen) * 2 - 1\n\n #regularization\n self.noise_z = normal(0, 1, size = (n, m), **self.tensorArgs, generator = gen)",
"def normalize_weights(time_arr, imp_arr, cost_arr):\n tot_time = sum(time_arr)\n tot_imp = sum(imp_arr)\n tot_cost = sum(cost_arr)\n time_norm = []\n imp_norm = []\n cost_norm = []\n for index in range(len(time_arr)):\n time_norm.append(time_arr[index] / tot_time)\n imp_norm.append(imp_arr[index] / tot_imp)\n cost_norm.append(cost_arr[index] / tot_cost)\n return time_norm, imp_norm, cost_norm",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None):\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) +\n '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) +\n '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' + str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tensor_util.is_tensor(sample_weight)\n and y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) +\n ' for an input with shape ' + str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for ' '3+ dimensional targets.')\n\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = K.argmax(y, axis=1)\n # y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n # class_sample_weight = np.asarray(\n # [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n keys = list(map(lambda x: tf.cast(x, tf.int32), class_weight.keys()))\n values = list(map(lambda x: tf.cast(x, tf.int32), class_weight.values()))\n key_value = tf.contrib.lookup.KeyValueTensorInitializer(keys, values)\n class_weight_table = tf.contrib.lookup.HashTable(key_value, -1)\n class_sample_weight = class_weight_table.lookup(tf.cast(y_classes, tf.int32))\n class_weight_table.init.run(session=K.get_session())\n\n # print(K.get_session().run(class_sample_weight))\n # class_sample_weight = np.asarray(\n # [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n # if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n # existing_classes = set(y_classes)\n # existing_class_weight = set(class_weight.keys())\n # raise ValueError('`class_weight` must contain all classes in the data.'\n # ' The classes %s exist in the data but not in '\n # '`class_weight`.' % (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None",
"def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n",
"def weight_to_int_fn(weights):\n # extract absolute non-zero weights\n abs_non_zero_wts = {abs(wt) for wt in weights if wt != 0}\n\n # if all weights are zero\n if len(abs_non_zero_wts) == 0:\n # return zero (int) function\n return lambda wt: 0\n\n # extract smallest and largest absolute non-zero weights\n min_abs_non_zero_wt = min(abs_non_zero_wts)\n max_abs_non_zero_wt = max(abs_non_zero_wts)\n\n # if largest (absolute) weight is less than \"infty\" and all weight are ints\n if max_abs_non_zero_wt < infty() / 10 and all(isinstance(wt, int) for wt in weights):\n # return identity function\n return lambda wt: wt\n\n # define scaling so largest (absolute) weight is an order of magnitude smaller than \"infty\"\n scaling = infty() / 10 / max_abs_non_zero_wt\n\n # define _weight_to_int using scaling\n def _weight_to_int(weight):\n # multiply weight by scaling (round to nearest with ties going away from zero).\n return int(decimal.Decimal(weight * scaling).to_integral_value(rounding=decimal.ROUND_HALF_UP))\n\n # warn if smallest (absolute) weight is zero or less than 3 significant figures.\n scaled_min_abs_non_zero_wt = _weight_to_int(min_abs_non_zero_wt)\n if scaled_min_abs_non_zero_wt == 0:\n logger.warning('SCALED MINIMUM ABSOLUTE NON-ZERO WEIGHT IS ZERO')\n elif scaled_min_abs_non_zero_wt < 100:\n logger.warning('SCALED MINIMUM ABSOLUTE NON-ZERO WEIGHT LESS THAN 3 S.F.:{}'.format(scaled_min_abs_non_zero_wt))\n\n return _weight_to_int",
"def init_weights_(self):\n raise NotImplementedError",
"def _get_weights(layer_name, weights):\n W = weights[layer_name][0]\n b = weights[layer_name][1]\n return W, b",
"def _tie_or_clone_weights(self, output_embeddings, input_embeddings):\n if output_embeddings.weight.shape == input_embeddings.weight.shape:\n output_embeddings.weight = input_embeddings.weight\n elif output_embeddings.weight.shape == input_embeddings.weight.t(\n ).shape:\n output_embeddings.weight.set_value(input_embeddings.weight.t())\n else:\n raise ValueError(\n \"when tie input/output embeddings, the shape of output embeddings: {}\"\n \"should be equal to shape of input embeddings: {}\"\n \"or should be equal to the shape of transpose input embeddings: {}\".\n format(output_embeddings.weight.shape, input_embeddings.weight.\n shape, input_embeddings.weight.t().shape))\n if getattr(output_embeddings, \"bias\", None) is not None:\n if output_embeddings.weight.shape[\n -1] != output_embeddings.bias.shape[0]:\n raise ValueError(\n \"the weight lase shape: {} of output_embeddings is not equal to the bias shape: {}\"\n \"please check output_embeddings configuration\".format(\n output_embeddings.weight.shape[\n -1], output_embeddings.bias.shape[0]))",
"def _mutate_weights(self, weights):\n return weights + normal(loc=0, scale=self.standard_deviation, size=weights.shape[0])",
"def weighted_sum(self, inputs):\r\n weighted_sum = 0\r\n for i in range(self.num_inputs):\r\n weighted_sum += self.weights[i]*inputs[i]\r\n return weighted_sum",
"def update_weights(self):\n\t\tpass"
] | [
"0.68360084",
"0.64592713",
"0.6424411",
"0.6227467",
"0.6222293",
"0.61991525",
"0.618634",
"0.61317295",
"0.61289394",
"0.6118224",
"0.6114964",
"0.6105876",
"0.6105316",
"0.60550016",
"0.6040807",
"0.6006805",
"0.60003716",
"0.5966255",
"0.5961394",
"0.59462756",
"0.5920563",
"0.5900017",
"0.5881188",
"0.58427256",
"0.5832244",
"0.5823301",
"0.58058167",
"0.57947654",
"0.5770471",
"0.57613295"
] | 0.7170687 | 0 |
Map MXNet's convolution operator attributes to onnx's Conv operator and return the created node. | def convert_convolution(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
kernel_dims = list(parse_helper(attrs, "kernel"))
stride_dims = list(parse_helper(attrs, "stride", [1, 1]))
pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
num_group = int(attrs.get("num_group", 1))
dilations = list(parse_helper(attrs, "dilate", [1, 1]))
pad_dims = pad_dims + pad_dims
conv_node = onnx.helper.make_node(
"Conv",
inputs=input_nodes,
outputs=[name],
kernel_shape=kernel_dims,
strides=stride_dims,
dilations=dilations,
pads=pad_dims,
group=num_group,
name=name
)
return [conv_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node",
"def _create_conv(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(\n onnx_node.attrs[\"pads\"]) if \"pads\" in onnx_node.attrs else (0, 0)\n stride = tuple(onnx_node.getattr('strides', (1, 1)))\n # default the odd_padding is 0, once there are same pad mode, we modify it\n # for odd_padding, please refer the autegrade.py\n odd_padding = (0, 0, 0, 0)\n if \"auto_pad\" in onnx_node.attrs:\n auto_pad = utils.force_unicode(onnx_node.attrs['auto_pad'])\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n padding, odd_padding = utils.get_padding_shape(\n auto_pad, inputs[0].shape[2:], kernel, stride)\n\n # not support dilation\n dilation = onnx_node.getattr('dilations', 1)\n if dilation != 1 and list(dilation) != [1, 1]:\n raise ValueError(\"Not implemented yet for dilation\")\n group = onnx_node.getattr('group', 1)\n\n # only support 1d or 2d\n if len(kernel) > 2:\n raise ValueError(\"Only implemented for 1d or 2d\")\n\n bias = len(inputs) == 3\n x = inputs[0]\n x_shape = inputs[0].shape\n in_channels = x_shape[1]\n w_shape = inputs[1].shape\n out_channels = w_shape[0]\n assert w_shape[1] == in_channels // group\n\n if inputs[0].device.id() == -1:\n if group != 1:\n raise NotImplementedError\n else:\n handle = singa.ConvHandle(x.data, kernel, stride, padding,\n in_channels, out_channels, bias,\n group)\n else:\n handle = singa.CudnnConvHandle(x.data, kernel, stride, padding,\n in_channels, out_channels, bias,\n group)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(handle, odd_padding)",
"def make_convolution_op(onnx_node, ng_inputs, transpose=False):\n # type: (NodeWrapper, List[TensorOp], bool) -> Op\n if len(ng_inputs) == 3:\n x, weights, bias = ng_inputs\n elif len(ng_inputs) == 2:\n x, weights = ng_inputs\n bias = ng.constant(0)\n else:\n raise ValueError('Conv node (%s): unexpected number of input values: %d.',\n onnx_node.name, len(ng_inputs))\n\n # Reorder x axes from ONNX convention (N, C, H, W, D) to ngraph (C, D, H, W, N)\n # Reorder weights axes from ONNX (K, J, R, S, T) to ngraph (J, T, R, S, K)\n # Axis names follow https://ngraph.nervanasys.com/index.html/axes.html\n if len(x.axes) == 4: # 2D convolution\n x = reorder_axes(x, 'NCHW', 'CDHWN')\n weights = reorder_axes(weights, 'KJRS', 'JTRSK')\n elif len(x.axes) == 5: # 3D convolution\n x = reorder_axes(x, 'NCHWD', 'CDHWN')\n weights = reorder_axes(weights, 'KJRST', 'JTRSK')\n else:\n raise NotImplementedError('Conv node (%s): only 2D and 3D convolutions are supported.',\n onnx_node.name)\n\n groups = onnx_node.get_attribute_value('group', 1)\n if groups != 1:\n raise NotImplementedError('Conv node (%s): `group` attribute value %d not supported.',\n onnx_node.name, groups)\n\n # Prepare ngraph convolution operation\n conv_params = get_conv_params(onnx_node)\n output_axes = make_conv_output_axes(x, weights, conv_params)\n\n if transpose:\n conv = ng.deconvolution(conv_params, x, weights, axes=output_axes)\n\n else:\n conv = ng.convolution(conv_params, x, weights, axes=output_axes)\n\n conv = cast_to_pos_axes(conv) + bias\n\n # ONNX output should have axes in the order N, C, H, W, D\n conv = reorder_axes(conv, 'CDHWN', 'NCHWD')\n\n if len(ng_inputs[0].axes) == 4: # 2D convolution, slice away the D axis from output\n conv = ng.tensor_slice(conv, [slice(None), slice(None), slice(None), slice(None), 0])\n\n return conv",
"def create_attrs(params):\n return {\n 'type': 'Convolution',\n 'op': params['type_str'],\n 'bias_addable': True,\n 'bias_term': params['bias_term'],\n 'pad': np.array([[0, 0], [0, 0],\n [params['padding'][1], params['padding'][1]],\n [params['padding'][0], params['padding'][0]]], dtype=np.int64),\n 'pad_spatial_shape': np.array([[params['padding'][1], params['padding'][1]],\n [params['padding'][0], params['padding'][0]]], dtype=np.int64),\n 'dilation': np.array([1, 1,\n params['dilate'][1], params['dilate'][0]], dtype=np.int64),\n 'output_spatial_shape': None,\n 'output_shape': None,\n 'stride': np.array([1, 1, params['stride'][1],\n params['stride'][0]], dtype=np.int64),\n 'infer': caffe_conv2d_infer,\n 'group': params['group'],\n 'output': params['output'],\n 'kernel_spatial': np.array([params['kernel'][1], params['kernel'][0]], dtype=np.int64)\n }",
"def _fix_channels(self, op, attrs, inputs):\n if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]:\n return attrs\n weight_name = self._renames[inputs[1]]\n if not weight_name in self._params:\n raise ValueError(\"Unable to get channels/units attr from onnx graph.\")\n else:\n wshape = self._params[weight_name].shape\n assert len(wshape) >= 2, \"Weights shape is invalid: {}\".format(wshape)\n channels = wshape[0]\n if op in [mx.sym.FullyConnected]:\n attrs['num_hidden'] = channels\n else:\n attrs['num_filter'] = channels\n return attrs",
"def get_conv_params(onnx_node): # type: (NodeWrapper) -> Dict\n pad_h, pad_w, pad_d = get_pads(onnx_node)\n str_h, str_w, str_d = get_strides(onnx_node)\n dil_h, dil_w, dil_d = get_dilations(onnx_node)\n\n return {'pad_d': pad_d, 'pad_h': pad_h, 'pad_w': pad_w,\n 'str_d': str_d, 'str_h': str_h, 'str_w': str_w,\n 'dil_d': dil_d, 'dil_h': dil_h, 'dil_w': dil_w}",
"def _conv_op(self, in_obj, channel_axes, spatial_axes):\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n output_axes = self._output_axes(in_obj, pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_spatial_shape,\n self.strides, pad_int, self.dilation)\n return ng.convolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)",
"def _conv(\n conv_type,\n nd_util,\n input,\n weight,\n bias=None,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n):\n weight_shape = list(weight.shape)\n return FunctionLib.apply(\n conv_type,\n input.device,\n [input, weight] + ([bias] if bias else []),\n in_channels=weight_shape[1],\n out_channels=weight_shape[0],\n kernel_shape=weight_shape[2:],\n strides=nd_util(stride),\n pads=nd_util(padding),\n dilations=nd_util(dilation),\n group=groups,\n bias=bias is not None,\n dtype=weight.dtype,\n input_shape=list(input.shape),\n )",
"def convolution(self, x, w, name=\"\"):\n if self.gpu:\n return tf.nn.conv2d(x, w, strides=self.conv_stride, padding=self.conv_padding, name=name)\n\n return tf.nn.conv2d(x, w, strides=self.conv_stride, padding=self.conv_padding,\n use_cudnn_on_gpu=False, name=name)",
"def _conv(self, indim, outdim, ksize, stride, padding):\n\n return nn.Sequential(\n nn.BatchNorm2d(indim),\n nn.Conv2d(indim, outdim, ksize, stride, padding),\n self.activ(),\n )",
"def convert_conv2d(g, op, block):\n\n dilations = op.attr(\"dilations\")\n groups = op.attr(\"groups\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n strides = op.attr(\"strides\")\n\n kernel = g.get_node(op.input(\"Filter\")[0])\n input_x = g.get_node(op.input(\"Input\")[0])\n data_layout = op.attr(\"data_format\")\n out_channels, _, k_h, k_w = infer_shape(kernel)\n if padding_algorithm == \"VALID\":\n paddings = [0, 0]\n elif padding_algorithm == \"SAME\":\n # Handle history issue of PaddlePaddle\n # while padding_algorithm == \"SAME\"\n # dilations will be set to [1, 1]\n dilations = [1, 1]\n input_x = autopad(input_x, strides, [k_h, k_w], dilations)\n paddings = [0, 0]\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 2:\n paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]\n elif len(paddings) == 4:\n paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]\n else:\n msg = f'Value {padding_algorithm} in attribute \"padding\" of operator Conv is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n out = _op.nn.conv2d(\n input_x,\n kernel,\n strides=strides,\n padding=paddings,\n dilation=dilations,\n groups=groups,\n channels=out_channels,\n kernel_size=[k_h, k_w],\n data_layout=data_layout,\n )\n g.add_node(op.output(\"Output\")[0], out)",
"def get_convolution_op(input_shape, output_shape, kernel_shape):\n filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)\n if conv_type == 'NORMAL':\n def conv_op(inputs, weight, name='generic_convolution'):\n with tf.name_scope(name):\n if padding_type=='VALID' and np.sum(padding) > 0:\n inputs = tf.pad(inputs, padding, name='padding')\n return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution')\n\n else:\n def conv_op(inputs, weight, name='generic_convolution'):\n if padding_type=='SAME':\n padded_output = [padded_shape[0]] + output_shape[-3:]\n else:\n padded_output = padded_shape\n with tf.name_scope(name):\n if padded_output[0] is None:\n batch_size = tf.shape(inputs)[0]\n padded_output = [batch_size] + padded_output[1:]\n\n output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')\n if padding_type=='VALID' and np.sum(padding) > 0:\n output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],\n [-1] + output_shape[-3:], name='cropping')\n return output\n\n return filter_shape, conv_op",
"def conv(input, inch, outch, filter_h, filter_w, stride_h, stride_w, padding='SAME', name='conv_layer'):\n with tf.name_scope(name) as scope:\n layer = tf.layers.conv2d(input, outch, filter_h, strides=(stride_h, stride_w), padding=\"same\",\n activation=tf.nn.relu)\n return layer",
"def to_device(self, device):\n for i in range(self.num_layers):\n getattr(self, \"conv{}\".format(i+1)).to_device(device)\n self.to(device)\n return self",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def _conv_op(self, in_obj, channel_axes, spatial_axes):\n\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n\n output_axes = self._output_axes(in_obj.axes,\n pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_shape,\n self.strides, pad_int, self.dilation)\n return ng.deconvolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)",
"def _conv2d(self, x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')",
"def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n weight_init=weight, has_bias=False, pad_mode=\"valid\")",
"def create_conv2d(self, x, w, b, stride = 1, name = None):\n x = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding='VALID', name = name)\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)",
"def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n weight_init=weight,\n has_bias=False,\n pad_mode=\"valid\",\n )",
"def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)",
"def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]",
"def all_conv_ops(self):\n pass",
"def conv(self, inputs, filters, kernel_size, strides, padding='SAME', name='conv_layer'):\n input_channels = inputs[-1]\n kernel = tf.Variable(tf.random.truncated_normal(shape=[kernel_size, kernel_size, input_channels, filters]),\n dtype=tf.float32, name='kernel')\n bias = tf.Variable(tf.zeros(shape=[filters]), name='bias')\n conv = tf.nn.conv2d(inputs, filter=kernel,\n strides=[1, strides, strides, 1],\n padding=padding, name='conv')\n out = tf.nn.relu(conv + bias, name='relu')\n return out",
"def conv_pattern():\n pattern = is_op(\"nn.conv2d\")(wildcard(), is_constant())\n pattern = pattern.optional(lambda x: is_op(\"nn.bias_add\")(x, is_constant()))\n pattern = pattern.optional(lambda x: is_op(\"add\")(x, is_constant()))\n pattern = pattern.optional(\n lambda x: is_tuple_get_item(\n is_op(\"nn.batch_norm\")(\n x, is_constant(), is_constant(), is_constant(), is_constant()\n )\n )\n )\n pattern = pattern.optional(is_op(\"nn.relu\"))\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern",
"def apply_filter_operator(self, input, filter_operator):\n\n input = input.permute(0,2,1).contiguous().view(-1,self.num_nodes)\n filter_operator = filter_operator.view(self.num_nodes, -1)\n output = torch.matmul(input, filter_operator).view(self.batch_size, self.filter_size_in, self.num_nodes, self.filter_size_out).permute(0,2,3,1)\n\n matched_mask = self.mask.unsqueeze(2).repeat(1,1,self.filter_size_out,1)\n output = output * matched_mask\n\n # Debug\n logger.debug('Filter operator with matched dimensions of spectral conv layer: {}'.format(filter_operator.shape))\n logger.debug('Output after applying filter operator on input of spectral conv layer: {}'.format(output.size()))\n\n return output",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def create_conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')",
"def conv(input, output, size, stride,\n reuse=False,\n norm=instance_norm,\n activation=leaky_relu,\n dropout=1.0,\n padding='VALID',\n pad_size=None,\n is_training=True,\n name='conv'):\n with tf.variable_scope(name, reuse=reuse):\n dropout = 1.0 if dropout is None else dropout\n # Pre pad the input feature map\n x = pad(input, pad_size)\n # Apply convolution\n x = slim.conv2d(x, output, size, stride,\n activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(stddev=0.02),\n padding=padding)\n # Apply dropout\n x = tf.nn.dropout(x, dropout)\n # Apply activation\n x = activation(x) if activation else x\n # Apply normalization\n x = norm(x, is_training) if norm else x\n return x"
] | [
"0.69920486",
"0.66528916",
"0.6432573",
"0.59314674",
"0.59063995",
"0.5811342",
"0.5794324",
"0.57784176",
"0.57155585",
"0.5674647",
"0.5611912",
"0.5560294",
"0.552098",
"0.5466808",
"0.5450267",
"0.54139715",
"0.5381454",
"0.538118",
"0.53697866",
"0.5363581",
"0.5358975",
"0.53409356",
"0.533698",
"0.5302297",
"0.52987224",
"0.52918166",
"0.5270636",
"0.52532643",
"0.5243113",
"0.5233982"
] | 0.7379639 | 0 |
Map MXNet's deconvolution operator attributes to onnx's ConvTranspose operator and return the created node. | def convert_deconvolution(node, **kwargs):
name, inputs, attrs = get_inputs(node, kwargs)
kernel_dims = list(parse_helper(attrs, "kernel"))
stride_dims = list(parse_helper(attrs, "stride", [1, 1]))
pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
num_group = int(attrs.get("num_group", 1))
dilations = list(parse_helper(attrs, "dilate", [1, 1]))
adj_dims = list(parse_helper(attrs, "adj", [0, 0]))
pad_dims = pad_dims + pad_dims
deconv_node = onnx.helper.make_node(
"ConvTranspose",
inputs=inputs,
outputs=[name],
kernel_shape=kernel_dims,
strides=stride_dims,
dilations=dilations,
output_padding=adj_dims,
pads=pad_dims,
group=num_group,
name=name
)
return [deconv_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTranspose(\n filters=num_filters,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_uniform\",\n )(inp)\n x = BatchNormalization()(x)\n x = Activation(\"elu\")(x)\n\n return x",
"def convert_transpose(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axes = attrs.get(\"axes\", ())\n if axes:\n axes = tuple(map(int, re.findall(r'\\d+', axes)))\n\n transpose_node = onnx.helper.make_node(\n \"Transpose\",\n input_nodes,\n [name],\n perm=axes,\n name=name\n )\n else:\n transpose_node = onnx.helper.make_node(\n \"Transpose\",\n input_nodes,\n [name],\n name=name\n )\n\n return [transpose_node]",
"def deconv(dims, inplanes, outplanes, kernel_size, stride, bias, dilation):\n padding = math.floor((kernel_size-stride+1)/2)\n if dims==2:\n return nn.ConvTranspose2d(inplanes, outplanes, kernel_size, stride,\n padding=padding, bias=bias) #, dilation=1)\n elif dims==3:\n return nn.ConvTranspose3d(inplanes, outplanes, kernel_size, stride,\n padding = padding, bias=bias) #, dilation=1)\n else:\n raise ValueError('dimension of deconv must be 2 or 3')",
"def convert_conv2d_transpose(g, op, block):\n\n dilations = op.attr(\"dilations\")\n groups = op.attr(\"groups\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n strides = op.attr(\"strides\")\n output_padding = op.attr(\"output_padding\") if op.attr(\"output_padding\") else [0, 0]\n\n kernel = g.get_node(op.input(\"Filter\")[0])\n input_x = g.get_node(op.input(\"Input\")[0])\n _, out_channels, k_h, k_w = infer_shape(kernel)\n k_size = [k_h, k_w]\n if padding_algorithm == \"VALID\":\n paddings = [0, 0]\n elif padding_algorithm == \"SAME\":\n # SAME padding of conv2d_transpose is not same with conv2d\n # We cannot use auto_pad here, only static shape is supported now\n dilations = [1, 1]\n input_shape = shape_of(input_x)\n h_w = _op.strided_slice(input_shape, [2], [4])\n try:\n h_w = infer_value(h_w, g.get_params()).numpy().tolist()\n except Exception as e:\n msg = \"The SAME padding algorithm of conv2d_transpose not support dynamic shape\"\n raise tvm.error.OpAttributeInvalid(msg) from e\n paddings = []\n for i in range(2):\n if strides[i] == 1 or h_w[i] % strides[i] == 0:\n pad = max(k_size[i] - strides[i], 0)\n else:\n pad = max(k_size[i] - (h_w[i] % strides[i]), 0)\n pad_before = pad // 2\n pad_after = pad - pad_before\n paddings.insert(-1, pad_before)\n paddings.append(pad_after)\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 2:\n paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]\n elif len(paddings) == 4:\n paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]\n else:\n msg = f'Value {padding_algorithm} in attribute \"padding\" of operator Conv is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n out = _op.nn.conv2d_transpose(\n input_x,\n kernel,\n strides=strides,\n padding=paddings,\n dilation=dilations,\n groups=groups,\n channels=out_channels * groups,\n kernel_size=k_size,\n output_padding=output_padding,\n )\n g.add_node(op.output(\"Output\")[0], out)",
"def create_helper_trans_node(input_name, output_name, perm=None):\n attrs = {}\n if perm is not None:\n attrs['perm'] = perm\n trans_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_name],\n outputs=[output_name],\n name=output_name,\n **attrs\n )\n return [trans_node]",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])",
"def convert_transpose(g, op, block):\n\n perm = op.attr(\"axis\")\n out = _op.transpose(g.get_node(op.input(\"X\")[0]), axes=perm)\n g.add_node(op.output(\"Out\")[0], out)",
"def conv2d_transpose(self, output_shape, filter_):\n return self.add_layer(conv2d_transpose, output_shape, filter_)",
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n # append transpose conv layer\n # TODO: shouldn't we set bias to NOT batch_norm instead of always being False ?\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n # optional batch norm layer\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def convert_convolution(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n\n pad_dims = pad_dims + pad_dims\n\n conv_node = onnx.helper.make_node(\n \"Conv\",\n inputs=input_nodes,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [conv_node]",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def deconv(\n in_channels,\n out_channels,\n kernel_size,\n stride=2,\n padding=1,\n batch_norm=True,\n):\n layers = []\n layers.append(\n nn.ConvTranspose2d(\n in_channels, out_channels, kernel_size, stride, padding, bias=False\n )\n )\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def deconv(depth, nfilter, ksize=3, stride=1, \r\n pad_in=0, pad_out=0, groups=1,\r\n dilation=1, pad_mode='zeros',\r\n bias=True, lrelu=None):\r\n assert (depth>0 and nfilter>0 and ksize>0 and ksize%2==1 and \r\n stride>0 and pad_in>=0 and pad_out>=0 and dilation>=1 and\r\n groups>=1 and depth%groups==0 and nfilter%groups==0)\r\n deconv_ = nn.ConvTranspose2d(depth, nfilter, ksize, stride, \r\n pad_in, pad_out, groups, bias, dilation,\r\n pad_mode)\r\n if lrelu is not None:\r\n deconv_ = nn.Sequential(deconv_, \r\n nn.LeakyReLU(lrelu, inplace=True))\r\n return deconv_",
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, normalization=True, norm_type='instance_norm'):\n layers = []\n # append transpose conv layer\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n\n # optional normalization layer\n if normalization == True and norm_type == 'instance_norm':\n layers.append(nn.InstanceNorm2d(out_channels))\n elif normalization == True and norm_type == 'batch_norm':\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def transition_up(self, x, filters, name):\n with tf.name_scope(name):\n x = tf.layers.conv2d_transpose(x,\n filters=filters,\n kernel_size=[3, 3],\n strides=[2, 2],\n padding='SAME',\n activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name=name+'_trans_conv3x3')\n\n return x",
"def _create_transpose(cls, onnx_node, inputs, opset_version):\n shape = inputs[0].shape\n perm = onnx_node.getattr(\"perm\", list(range(len(shape) - 1, -1, -1)))\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(perm)",
"def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output",
"def CustomConv3DTranspose(x_in, nf, strides=2, kernel_size = 3):\r\n\tx_out = Conv3DTranspose(nf, kernel_size=3, padding='same',kernel_initializer='he_normal', strides=strides)(x_in)\r\n\t#print(\"AAAAA\", x_out.shape)\r\n\tx_out = BatchNormalization()(x_out)\r\n\tx_out = LeakyReLU(0.2)(x_out)\r\n\treturn x_out",
"def deconv_layer(self, dtype,\n N, C, K,\n M, P, Q,\n T=1, R=1, S=1,\n pad_d=0, pad_h=0, pad_w=0,\n str_d=1, str_h=1, str_w=1,\n dil_d=1, dil_h=1, dil_w=1):\n return layer_mkl.DeconvLayerMKL(self, dtype, N, C, K, M, P, Q, T, R, S,\n pad_d, pad_h, pad_w, str_d, str_h, str_w,\n dil_d, dil_h, dil_w)",
"def convert_dot(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n input_node_a = input_nodes[0]\n input_node_b = input_nodes[1]\n\n trans_a_node = None\n trans_b_node = None\n\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if trans_a:\n input_node_a = op_name + \"_a\"\n trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a)\n if trans_b:\n input_node_b = op_name + \"_b\"\n trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b)\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_node_a, input_node_b],\n outputs=[name],\n name=name\n )\n\n if not trans_a and not trans_b:\n return [matmul_node]\n elif trans_a and not trans_b:\n return [trans_a_node, matmul_node]\n elif trans_b and not trans_a:\n return [trans_b_node, matmul_node]\n else:\n return [trans_a_node, trans_b_node, matmul_node]",
"def _make_major_transpose_nodes(inputs, scope, node_dict, prev_node, post):\n input_shape = node_dict[inputs[0]].attr[\"_output_shapes\"][0]\n input_rank = len(input_shape)\n\n perm_node = TensorflowNode(\n op_type=\"Const\",\n name=\"/\".join([scope, \"transpose\", \"perm\",\n get_unique_suffix()]),\n attr={\n \"value\": np.asarray([1, 0] + list(range(input_rank))[2:], np.int32),\n \"dtype\": data_type.tf2onnx(tf.int32),\n \"_output_shapes\": [input_rank]\n })\n\n if post:\n input_shape = [input_shape[i] for i in perm_node.attr[\"value\"]]\n prev_node.attr[\"_output_shapes\"] = [input_shape]\n\n trans_node = TensorflowNode(\n op_type=\"Transpose\",\n name=\"/\".join([scope, \"transpose\",\n get_unique_suffix()]),\n inputs=[inputs[0] if not post else prev_node.name, perm_node.name],\n attr={\n \"dtype\": data_type.tf2onnx(node_dict[inputs[0]].attr[\"T\"]),\n \"_output_shapes\":\n [[input_shape[i] for i in perm_node.attr[\"value\"]]]\n })\n return [perm_node, trans_node]",
"def make_convolution_op(onnx_node, ng_inputs, transpose=False):\n # type: (NodeWrapper, List[TensorOp], bool) -> Op\n if len(ng_inputs) == 3:\n x, weights, bias = ng_inputs\n elif len(ng_inputs) == 2:\n x, weights = ng_inputs\n bias = ng.constant(0)\n else:\n raise ValueError('Conv node (%s): unexpected number of input values: %d.',\n onnx_node.name, len(ng_inputs))\n\n # Reorder x axes from ONNX convention (N, C, H, W, D) to ngraph (C, D, H, W, N)\n # Reorder weights axes from ONNX (K, J, R, S, T) to ngraph (J, T, R, S, K)\n # Axis names follow https://ngraph.nervanasys.com/index.html/axes.html\n if len(x.axes) == 4: # 2D convolution\n x = reorder_axes(x, 'NCHW', 'CDHWN')\n weights = reorder_axes(weights, 'KJRS', 'JTRSK')\n elif len(x.axes) == 5: # 3D convolution\n x = reorder_axes(x, 'NCHWD', 'CDHWN')\n weights = reorder_axes(weights, 'KJRST', 'JTRSK')\n else:\n raise NotImplementedError('Conv node (%s): only 2D and 3D convolutions are supported.',\n onnx_node.name)\n\n groups = onnx_node.get_attribute_value('group', 1)\n if groups != 1:\n raise NotImplementedError('Conv node (%s): `group` attribute value %d not supported.',\n onnx_node.name, groups)\n\n # Prepare ngraph convolution operation\n conv_params = get_conv_params(onnx_node)\n output_axes = make_conv_output_axes(x, weights, conv_params)\n\n if transpose:\n conv = ng.deconvolution(conv_params, x, weights, axes=output_axes)\n\n else:\n conv = ng.convolution(conv_params, x, weights, axes=output_axes)\n\n conv = cast_to_pos_axes(conv) + bias\n\n # ONNX output should have axes in the order N, C, H, W, D\n conv = reorder_axes(conv, 'CDHWN', 'NCHWD')\n\n if len(ng_inputs[0].axes) == 4: # 2D convolution, slice away the D axis from output\n conv = ng.tensor_slice(conv, [slice(None), slice(None), slice(None), slice(None), 0])\n\n return conv",
"def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node",
"def conv_transpose_pattern():\n pattern = is_op(\"nn.conv2d_transpose\")(wildcard(), is_constant())\n pattern = pattern.optional(lambda x: is_op(\"nn.bias_add\")(x, is_constant()))\n pattern = pattern.optional(lambda x: is_op(\"add\")(x, is_constant()))\n pattern = pattern.optional(\n lambda x: is_tuple_get_item(\n is_op(\"nn.batch_norm\")(\n x, is_constant(), is_constant(), is_constant(), is_constant()\n )\n )\n )\n pattern = pattern.optional(is_op(\"nn.relu\"))\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern",
"def transpose(incoming, conv, nonlinearity, *args, **kwargs):\n return TransposedConv2DLayer(incoming, conv.input_shape[1],\n conv.filter_size, stride=conv.stride,\n crop=conv.pad, W=conv.W,\n flip_filters=not conv.flip_filters,\n nonlinearity=nonlinearity, *args,\n **kwargs)",
"def __init__(\n self,\n *,\n input_dims: Union[List[int], Tuple[int]],\n cnn_transpose_filter_specifiers: List[List[Union[int, List]]],\n cnn_transpose_use_bias: bool = True,\n cnn_transpose_activation: Optional[str] = \"relu\",\n cnn_transpose_use_layernorm: bool = False,\n ):\n super().__init__()\n\n assert len(input_dims) == 3\n\n cnn_transpose_activation = get_activation_fn(\n cnn_transpose_activation, framework=\"tf2\"\n )\n\n layers = []\n\n # Input layer.\n layers.append(tf.keras.layers.Input(shape=input_dims))\n\n for i, (num_filters, kernel_size, strides) in enumerate(\n cnn_transpose_filter_specifiers\n ):\n is_final_layer = i == len(cnn_transpose_filter_specifiers) - 1\n layers.append(\n tf.keras.layers.Conv2DTranspose(\n filters=num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"same\",\n # Last layer is never activated (regardless of config).\n activation=(\n None\n if cnn_transpose_use_layernorm or is_final_layer\n else cnn_transpose_activation\n ),\n # Last layer always uses bias (b/c has no LayerNorm, regardless of\n # config).\n use_bias=cnn_transpose_use_bias or is_final_layer,\n )\n )\n if cnn_transpose_use_layernorm and not is_final_layer:\n # Use epsilon=1e-5 here (instead of default 1e-3) to be unified with\n # torch. Need to normalize over all axes.\n layers.append(\n tf.keras.layers.LayerNormalization(axis=[-3, -2, -1], epsilon=1e-5)\n )\n layers.append(tf.keras.layers.Activation(cnn_transpose_activation))\n\n # Create the final CNNTranspose network.\n self.cnn_transpose = tf.keras.Sequential(layers)\n\n self.expected_input_dtype = tf.float32",
"def _conv_transpose(\n conv_type,\n nd_util,\n input,\n weight,\n bias=None,\n stride=1,\n padding=0,\n output_padding=0,\n groups=1,\n dilation=1,\n):\n weight_shape = list(weight.shape)\n return FunctionLib.apply(\n conv_type,\n input.device,\n [input, weight] + ([bias] if bias else []),\n in_channels=weight_shape[0],\n out_channels=weight_shape[1],\n kernel_shape=weight_shape[2:],\n strides=nd_util(stride),\n pads=nd_util(padding),\n dilations=nd_util(dilation),\n group=groups,\n output_padding=nd_util(output_padding),\n bias=bias is not None,\n dtype=weight.dtype,\n input_shape=list(input.shape),\n )",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def getTransposeMatrix(self) -> CMatrix4:\n ..."
] | [
"0.6097374",
"0.601943",
"0.58937025",
"0.58428407",
"0.57710683",
"0.5762493",
"0.56841415",
"0.55704516",
"0.55147016",
"0.55106026",
"0.5437586",
"0.5421498",
"0.53722894",
"0.53698725",
"0.53527224",
"0.5335422",
"0.5334541",
"0.53216076",
"0.5319174",
"0.5318343",
"0.53002244",
"0.52945316",
"0.52719516",
"0.52555466",
"0.521734",
"0.51985157",
"0.5188655",
"0.51789945",
"0.51693875",
"0.5149708"
] | 0.790625 | 0 |
Map MXNet's crop operator attributes to onnx's Crop operator and return the created node. | def convert_crop(node, **kwargs):
name, inputs, attrs = get_inputs(node, kwargs)
start = np.array([0, 0, 0, 0], dtype=np.int) # index是int类型
export_nodes = []
start_node = create_helper_tensor_node(start, name + '__starts', kwargs)
export_nodes.extend(start_node)
start_node = start_node[-1].name
shape_node = create_helper_shape_node(inputs[1], inputs[1] + '__shape')
export_nodes.extend(shape_node)
shape_node = shape_node[-1].name
crop_node = onnx.helper.make_node(
"Slice",
inputs=[inputs[0], name + '__starts', inputs[1] + '__shape'], # data、start、end
outputs=[name],
name=name
)
logging.warning(
"Using an experimental ONNX operator: Crop. " \
"Its definition can change.")
export_nodes.extend([crop_node])
return export_nodes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\n else:\n node.input.append(\"\")\n return node",
"def basic_crop(data):\n return data['crop'];",
"def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def convert_copy(node, **kwargs):\n return create_basic_op_node('Identity', node, kwargs)",
"def get_crops(x_train, y_train, offset=4):\n\ttopleft = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, offset, offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\ttopright = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, 4 - offset, offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotleft = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, offset, 4 - offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotright = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, 4 - offset, 4 - offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tcenter = iaa.Sequential([\n\t\tiaa.Crop(px=(2, 2, 2, 2)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\taugs = [topleft, topright, botleft, botright, center]\n\n\taug_imgs = []\n\tfor aug in tqdm(augs):\n\t\taug_imgs.append(aug.augment_images(x_train * 255))\n\n\taug_x_train = [item for sublist in aug_imgs for item in sublist]\n\taug_y_train = y_train * 5\n\n\treturn aug_x_train, aug_y_train",
"def crop (*args, **kwargs):\n return compute('crop', inputs=list(args), args=kwargs)",
"def configure_crop(self, context_pad):\n # crop dimensions\n in_ = self.net.inputs[0]\n tpose = self.transformer.transpose[in_]\n inv_tpose = [tpose[t] for t in tpose]\n self.crop_dims = np.array(self.net.blobs[in_].data.shape[1:])[inv_tpose]\n #.transpose(inv_tpose)\n # context padding\n self.context_pad = context_pad\n if self.context_pad:\n in_ = self.net.inputs[0]\n transpose = self.transformer.transpose.get(in_)\n channel_order = self.transformer.channel_swap.get(in_)\n raw_scale = self.transformer.raw_scale.get(in_)\n # Padding context crops needs the mean in unprocessed input space.\n mean = self.transformer.mean.get(in_)\n if mean is not None:\n inv_transpose = [transpose[t] for t in transpose]\n crop_mean = mean.copy().transpose(inv_transpose)\n if channel_order is not None:\n channel_order_inverse = [channel_order.index(i)\n for i in range(crop_mean.shape[2])]\n crop_mean = crop_mean[:,:, channel_order_inverse]\n if raw_scale is not None:\n crop_mean /= raw_scale\n self.crop_mean = crop_mean\n else:\n self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)",
"def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)",
"def create_crops(merged_boxes, hyperspectral_pool=None, rgb_pool=None, sensor=\"hyperspectral\", expand=0, hyperspectral_savedir=\".\"): \n crops = []\n labels = []\n box_index = []\n for index, row in merged_boxes.iterrows():\n #Crop and append\n box = row[\"geometry\"] \n plot_name = row[\"plotID\"] \n \n #get sensor data\n if sensor == \"rgb\":\n sensor_path = find_sensor_path(bounds=box.bounds, lookup_pool=rgb_pool, sensor=\"rgb\")\n elif sensor == \"hyperspectral\":\n rgb_path = find_sensor_path(bounds=box.bounds, lookup_pool=rgb_pool, sensor=\"rgb\")\n hyperspectral_h5_path = find_sensor_path(bounds=box.bounds, lookup_pool=hyperspectral_pool, sensor=\"hyperspectral\")\n sensor_path = convert_h5(hyperspectral_h5_path, rgb_path, savedir=hyperspectral_savedir)\n \n crop = crop_image(sensor_path=sensor_path, box=box, expand=expand)\n \n crops.append(crop)\n labels.append(row[\"taxonID\"])\n box_index.append(\"{}_{}\".format(plot_name,index))\n \n return crops, labels, box_index",
"def createRotoPaintNodeMI():\n return gr()",
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def convert_shape(node, **kwargs):\n return create_basic_op_node('Shape', node, kwargs)",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _build_crop_fn(self, img_shape, crop_modes):\n h = img_shape[0]\n w = img_shape[1]\n\n w_crop = int(w * self.CROP_RATIO)\n h_crop = int(h * self.CROP_RATIO)\n\n top_pads = {\n Crop.TOP: 0,\n Crop.CENTER: int((h - h_crop) / 2),\n Crop.BOTTOM: h - h_crop\n }\n left_pads = {\n Crop.LEFT: 0,\n Crop.CENTER: int((w - self.CROP_RATIO) / 2),\n Crop.RIGHT: w - w_crop\n }\n\n def crop(image, directory):\n for crop_mode in crop_modes:\n top_pad = top_pads[crop_mode.vertical]\n left_pad = left_pads[crop_mode.horizontal]\n fname = self.name_generator.generate_aug_name(\n original=image.name,\n aug_name=\"{}_{}\".format(crop_mode.vertical, crop_mode.horizontal)\n )\n fpath = os.path.join(directory, fname)\n\n crop = image.x[top_pad:top_pad + h_crop, left_pad:left_pad + w_crop]\n crop = cv2.resize(crop, (w, h))\n cv2.imwrite(fpath, crop)\n\n return crop",
"def clone_attributes():\n nr_rows = pcr.clone().nrRows()\n nr_cols = pcr.clone().nrCols()\n cell_size = pcr.clone().cellSize()\n ymax = pcr.clone().north()\n xmin = pcr.clone().west()\n ymin = ymax - nr_rows * cell_size\n xmax = xmin + nr_cols * cell_size\n return xmin, xmax, ymin, ymax, nr_rows, nr_cols, cell_size",
"def convert_convolution(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n\n pad_dims = pad_dims + pad_dims\n\n conv_node = onnx.helper.make_node(\n \"Conv\",\n inputs=input_nodes,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [conv_node]",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def _build_binary_rop(op):\n def binary_rop(self, other):\n if (other is pstar\n or other is defaultpdict\n or other is frozenpset\n or other is pdict\n or other is plist\n or other is pset\n or other is ptuple\n ):\n # The plist.__r<op>__ methods should never be hit during conversion for valid conversions.\n raise NotImplementedError('Operation %s is not supported as a pstar conversion method.' % op.__name__)\n return plist([op(other, x) for x in self], root=self.__root__)\n\n return binary_rop",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def get_crop(self):\n if self.cropping_method == self.CROP_NONE:\n self.autocrop()\n return '{h}% {v}%'.format(h=self.from_left, v=self.from_top)",
"def _make_process_op(self):\n\n with tf.variable_scope(\"state_preprocess\"):\n self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8)\n output = tf.image.rgb_to_grayscale(self.input_state)\n output = tf.image.crop_to_bounding_box(output, 34, 0, 160, 160)\n output = tf.image.resize_images(output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n output = tf.to_float(output) / 255.0\n output = tf.transpose(output, perm=[2, 1, 0])\n\n return output"
] | [
"0.5632038",
"0.55095476",
"0.5424357",
"0.5367828",
"0.5309837",
"0.5078018",
"0.5071087",
"0.49778563",
"0.49666363",
"0.491709",
"0.4914572",
"0.49061403",
"0.49024212",
"0.4902058",
"0.4891922",
"0.48729894",
"0.48681563",
"0.4843361",
"0.4805982",
"0.47791198",
"0.47575107",
"0.47406515",
"0.47394142",
"0.47338244",
"0.47289586",
"0.47264266",
"0.47208047",
"0.4715144",
"0.4710438",
"0.47089833"
] | 0.63828367 | 0 |
Map MXNet's UpSampling operator attributes to onnx's Upsample operator and return the created node. | def convert_upsample(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
sample_type = attrs.get('sample_type', 'nearest')
sample_type = 'linear' if sample_type == 'bilinear' else sample_type
scale = convert_string_to_list(attrs.get('scale'))
scaleh = scalew = float(scale[0])
if len(scale) > 1:
scaleh = float(scale[0])
scalew = float(scale[1])
scale = np.array([1.0, 1.0, scaleh, scalew], dtype=np.float32)
roi = np.array([], dtype=np.float32)
export_nodes = []
node_roi = create_helper_tensor_node(roi, name + 'roi', kwargs)
export_nodes.extend(node_roi)
node_roi = node_roi[-1].name
node_sca = create_helper_tensor_node(scale, name + 'scale', kwargs)
export_nodes.extend(node_sca)
node_sca = node_sca[-1].name
node = onnx.helper.make_node(
'Resize',
inputs=[input_nodes[0], node_roi, node_sca],
outputs=[name],
coordinate_transformation_mode='asymmetric',
mode=sample_type,
nearest_mode='floor',
name=name
)
export_nodes.extend([node])
return export_nodes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_upsample_layer(self, in_channels=None, out_channels=None):\n if self.expand_strategy == \"upsample\":\n return nn.Upsample(scale_factor=2, mode=\"nearest\")\n elif self.expand_strategy == \"transpose_convolution\":\n return nn.ConvTranspose2d(\n in_channels, out_channels, 3, stride=2, padding=1, output_padding=1\n )\n else:\n raise ValueError(\"Unkown expand strategy\")",
"def upsample(self, target, target_type=\"hz\", **kwargs):\n df_us = upsample(\n self,\n sampling_freq=self.sampling_freq,\n target=target,\n target_type=target_type,\n **kwargs,\n )\n if self.features is not None:\n us_features = upsample(\n self.features,\n sampling_freq=self.sampling_freq,\n target=target,\n target_type=target_type,\n **kwargs,\n )\n else:\n us_features = self.features\n return self.__class__(df_us, sampling_freq=target, features=us_features)",
"def build_upsample(self):\n with tf.variable_scope('upsample'):\n response = tf.expand_dims(self.response, 3)\n up_method = 'bicubic'\n methods = {'bilinear': tf.image.ResizeMethod.BILINEAR,\n 'bicubic': tf.image.ResizeMethod.BICUBIC}\n up_method = methods[up_method]\n response_spatial_size = self.response.get_shape().as_list()[1:3]\n up_size = [s * 16 for s in response_spatial_size]\n response_up = tf.image.resize_images(response,\n up_size,\n method=up_method,\n align_corners=True)\n response_up = tf.squeeze(response_up, [3])\n self.response_up = response_up\n #print (\"response_up {}\".format(self.response_up))",
"def Upsampler(avg, pooling, input_shape, data_format='channels_last'):\n Input0 = Input(shape=input_shape)\n Up = UpSampling1D(avg)(Input0, data_format=data_format)\n Avg = AveragePooling1D(pooling, padding='same', stride=avg,\n data_format=data_format)(Up)\n return keras.Model(Input0, Avg)",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def upsample_model():\n\n inputs = tf.keras.Input(shape=(16, 16, 3,))\n x = tf.keras.layers.Conv2D(8, (2, 2))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65)(x)\n x = tf.nn.relu(x)\n x = tf.keras.layers.MaxPool2D()(x)\n residual = x\n\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25)(x)\n x = tf.add(x, residual)\n x = tf.nn.relu(x)\n\n x = tf.keras.layers.Conv2D(4, (1, 1))(x)\n x = tf.keras.layers.AvgPool2D()(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"upsample_model\")(x)\n return outputs",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def upsample(self, method):\n from scipy.signal import resample\n from scipy.ndimage.interpolation import zoom\n #print \"mm: 100 x 100 x 131\"\n #print \"Dims:\", self.D.shape\n fact = np.array(self.info.shape).astype(\"float32\") / np.array(self.info.read_shape).astype(\"float32\")+0.00001 # hrmpf!!\n if method == \"zoom\":\n print \"Resampling...\"\n self.D = zoom(self.D, fact).astype(\"float32\")\n elif method == \"resample\":\n print \"Resampling...\"\n a = self.info.resample_ax\n s = self.info.shape[a]\n self.D = resample(self.D, s, axis=a, window=10).astype(\"float32\")\n elif method == None:\n pass\n else:\n raise NotImplementedError(\"Unknown upsampling method: %s\" % method)\n #print \"Dims:\", self.D.shape\n print \"done.\"",
"def __init__(self, inpt, inpt_height, inpt_width,\n inpt_depth, up_factor=None, to_shape=None,\n transfer='identity',\n declare=None, name=None):\n self.inpt = inpt\n self.to_shape = to_shape\n self.inpt_height = inpt_height\n self.inpt_width = inpt_width\n self.inpt_depth = inpt_depth\n\n if up_factor is None:\n assert to_shape is not None\n self.output_height = to_shape[0]\n self.output_width = to_shape[1]\n self.output_depth = to_shape[2]\n else:\n assert to_shape is None\n self.output_height = inpt_height * up_factor[0]\n self.output_width = inpt_width * up_factor[1]\n self.output_depth = inpt_depth * up_factor[2]\n\n self.up_factor = up_factor\n self.to_shape = to_shape\n\n if transfer != 'identity':\n warnings.warn('Transfer functions can only be used in activation layers.', DeprecationWarning)\n self.transfer = 'identity'\n\n super(NearestNeighborsUpsample3d, self).__init__(declare=declare, name=name)",
"def Upsampler2D(avg, pooling, input_shape, data_format='channels_fist'):\n Input0 = Input(shape=input_shape)\n Up = UpSampling2D(size=(avg, 1), data_format=data_format)(Input0)\n Avg = AveragePooling2D(pooling, padding='same',\n data_format=data_format, strides=(avg,1))(Up)\n return keras.Model(Input0, Avg)",
"def upsample(layer, scale, mode=\"repeat\"):\n if mode in [\"repeat\", \"dilate\"]:\n return Upscale2DLayer(layer, scale, mode=mode)\n elif mode in [\"bilinear\"]:\n nb_kernels = nn.layers.get_output_shape(layer)[1]\n return upsample_bilinear(layer, nb_kernels, ratio=scale)\n raise ValueError(\"Invalid mode: \" + str(mode))",
"def __reduce__(self):\n return ImageNetDownsample, (self.cutout,)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def upsample_model_for_tf2():\n\n inputs = tf.keras.Input(shape=(16, 16, 3,))\n x = tf.keras.layers.Conv2D(8, (2, 2))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65, trainable=False)(x)\n x = tf.nn.relu(x)\n x = tf.keras.layers.MaxPool2D()(x)\n residual = x\n\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25, trainable=False)(x)\n x = tf.add(x, residual)\n x = tf.nn.relu(x)\n\n x = tf.keras.layers.Conv2D(4, (1, 1))(x)\n x = tf.keras.layers.AvgPool2D()(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"upsample_model\")(x)\n return outputs",
"def pack_sample(self, X, U):\n assert X.shape[0] == self.T\n assert U.shape[0] == self.T\n assert X.shape[1] == self.dX\n assert U.shape[1] == self.dU\n\n sample = Sample(self)\n for sensor, idx in self._x_data_idx.items():\n sample.set(sensor, X[:, idx])\n for actuator, idx in self._u_data_idx.items():\n sample.set(actuator, U[:, idx])\n sample.set(ACTION, U)\n return sample",
"def model_with_upsample2d():\n inputs = tf.keras.Input(shape=(8, 8, 3,))\n x = tf.keras.layers.Conv2D(8, (2, 2))(inputs)\n x = tf.keras.layers.UpSampling2D(size=(2, 3))(x)\n x = tf.keras.layers.Conv2D(4, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"model_with_upsample2d\")(x)\n return outputs",
"def upsampling(data, scale, layout=\"NCHW\"):\n\n if layout == \"NCHW\":\n return upsampling_nchw(data, scale)\n elif layout == \"NHWC\":\n return upsampling_nhwc(data, scale)\n else:\n raise ValueError(\"not support this layout {} yet\".format(layout))",
"def upsample(x, name, size):\n with tf.name_scope(name):\n outputs = tf.image.resize_bilinear(x, size)\n # Return layer's output\n return outputs",
"def upscale_2x(self, output_channel, filter_):\n return self.add_layer(upscale_2x, output_channel, filter_)",
"def sample(self, num_samples = 1):\n\n # shortcut\n shape = self.shape\n loc = self.loc\n scale = self.scale\n\n # some sampling\n U = self.UG.sample(num_samples)\n X = 1 / scale * (-np.log(U)) ** (1 / shape)\n return scale * X + loc",
"def AddGPGatewayUtilization(asg_name, arn_scalein, arn_scaleout):\n logger.info('Creating GP Gateway Utilization CloudWatch alarm for ASG: ' + asg_name)\n alarmname= asg_name + '-cw-gpu'\n return common_alarm_func_add(asg_name, \"panGPGatewayUtilizationPct\", lib.get_cw_name_space(stackname, asg_name), arn_scalein, arn_scaleout,\n\t\t\talarmname, \"GP Gateway Utilization\", 'Percent')",
"def up_block(x, out_channels, name, training=True):\n with tf.variable_scope(name):\n bn0 = ops.BatchNorm(name='bn_0')\n bn1 = ops.BatchNorm(name='bn_1')\n x_0 = x\n x = tf.nn.relu(bn0(x))\n x = usample(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv1')\n x = tf.nn.relu(bn1(x))\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv2')\n\n x_0 = usample(x_0)\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, training, 'snconv3')\n\n return x_0 + x",
"def model_with_upsample_already_present():\n inputs = tf.keras.Input(shape=(8, 8, 3,))\n x = tf.keras.layers.Conv2D(8, (3, 3))(inputs)\n with tf.name_scope(\"upsample\"):\n unstacked = tf.unstack(x, axis=-1)\n zeros = tf.zeros_like(unstacked[0])\n current_index = 0\n\n for index in [0, 3, 4, 6, 7, 8, 9, 11]:\n while current_index < index:\n unstacked.insert(current_index, zeros)\n current_index += 1\n current_index += 1\n\n stack = tf.stack(unstacked, axis=-1)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65)(stack, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"model_with_upsample_already_present\")(x)\n return outputs",
"def _create_gather(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n node.input.append(op.name + \":indices\")\n return node",
"def bilinear_upsample_weights(factor, num_outputs):\n\n kernel_size = 2 * factor - factor % 2\n\n weights_kernel = np.zeros((kernel_size,\n num_outputs,\n num_outputs), dtype = np.float32)\n\n rfactor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = rfactor - 1\n else:\n center = rfactor - 0.5\n\n og = np.ogrid[:kernel_size]\n upsample_kernel = (1 - abs(og - center) / rfactor)\n\n for i in xrange(num_outputs):\n weights_kernel[:, i, i] = upsample_kernel\n\n init = tf.constant_initializer(value = weights_kernel, dtype = tf.float32)\n weights = tf.get_variable('weights', weights_kernel.shape, tf.float32, init)\n\n return weights",
"def test_upsample_get_op_product_graph(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n module_zero_channels_list = []\n\n _ = upsample_model()\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n\n input_op_names = ['input_1']\n output_op_names = ['upsample_model/Softmax']\n\n tf_op = tf.compat.v1.get_default_graph().get_operation_by_name(\"conv2d_3/Conv2D\")\n input_channels_to_winnow = [3, 5, 7]\n module_mask_pair = (tf_op, input_channels_to_winnow)\n module_zero_channels_list.append(module_mask_pair)\n\n new_sess, _ = winnow.winnow_tf_model(sess, input_op_names, output_op_names,\n list_of_modules_to_winnow=module_zero_channels_list,\n reshape=True, in_place=True, verbose=True)\n\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), input_op_names, output_op_names)\n self.assertEqual(18, len(conn_graph.get_all_ops()))\n reduced_bn_1_op = conn_graph.get_op_from_module_name('reduced_batch_normalization_1/cond/FusedBatchNormV3_1')\n self.assertTrue(reduced_bn_1_op.output.consumers[0].type == 'Upsample')\n new_sess.close()",
"def register():\n\n def grid_sampler(g, input, grid, mode, padding_mode, align_corners):\n # mode\n # 'bilinear' : onnx::Constant[value={0}]\n # 'nearest' : onnx::Constant[value={1}]\n # 'bicubic' : onnx::Constant[value={2}]\n # padding_mode\n # 'zeros' : onnx::Constant[value={0}]\n # 'border' : onnx::Constant[value={1}]\n # 'reflection' : onnx::Constant[value={2}]\n mode = symbolic_helper._maybe_get_const(mode, \"i\")\n padding_mode = symbolic_helper._maybe_get_const(padding_mode, \"i\")\n mode_str = [\"bilinear\", \"nearest\", \"bicubic\"][mode]\n padding_mode_str = [\"zeros\", \"border\", \"reflection\"][padding_mode]\n align_corners = int(symbolic_helper._maybe_get_const(align_corners, \"b\"))\n\n # From opset v13 onward, the output shape can be specified with\n # (N, C, H, W) (N, H_out, W_out, 2) => (N, C, H_out, W_out)\n # input_shape = input.type().sizes()\n # gird_shape = grid.type().sizes()\n # output_shape = input_shape[:2] + gird_shape[1:3]\n # g.op(...).setType(input.type().with_sizes(output_shape))\n\n return g.op(\n \"com.microsoft::GridSample\",\n input,\n grid,\n mode_s=mode_str,\n padding_mode_s=padding_mode_str,\n align_corners_i=align_corners,\n )\n\n _reg(grid_sampler)\n\n def inverse(g, self):\n return g.op(\"com.microsoft::Inverse\", self).setType(self.type())\n\n _reg(inverse)\n\n @torch.onnx.symbolic_helper.parse_args(\"v\", \"s\")\n def gelu(g, self: torch._C.Value, approximate: str = \"none\"):\n # Use microsoft::Gelu for performance if possible. It only supports approximate == \"none\"\n if approximate == \"none\":\n return g.op(\"com.microsoft::Gelu\", self).setType(self.type())\n return torch.onnx.symbolic_opset9.gelu(g, self, approximate)\n\n _reg(gelu)\n\n def triu(g, self, diagonal):\n return g.op(\"com.microsoft::Trilu\", self, diagonal, upper_i=1).setType(self.type())\n\n _reg(triu)\n\n def tril(g, self, diagonal):\n return g.op(\"com.microsoft::Trilu\", self, diagonal, upper_i=0).setType(self.type())\n\n _reg(tril)",
"def __init__(self):\n super().__init__()\n self.metric = 'TP'",
"def net_u(self, x, t):\n\n u = self.model(torch.cat((x,t),1))\n return u"
] | [
"0.5554982",
"0.5521366",
"0.54900855",
"0.5459652",
"0.5323029",
"0.5225662",
"0.5160975",
"0.51450866",
"0.5143616",
"0.5027456",
"0.5023463",
"0.49990726",
"0.49227133",
"0.48704767",
"0.48497036",
"0.48493198",
"0.48339114",
"0.47982448",
"0.47692373",
"0.4767733",
"0.47560626",
"0.47228533",
"0.47196177",
"0.47156194",
"0.46972063",
"0.46951324",
"0.46870926",
"0.46594718",
"0.46353036",
"0.46294746"
] | 0.64564914 | 0 |
Map MXNet's FullyConnected operator attributes to onnx's Gemm operator and return the created node. | def convert_fully_connected(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
initializer = kwargs["initializer"]
no_bias = get_boolean_attribute_value(attrs, "no_bias")
fcnode = []
op_name = "flatten_" + str(kwargs["idx"])
flatten_node = onnx.helper.make_node(
'Flatten',
inputs=[input_nodes[0]],
outputs=[op_name],
name=op_name
)
input_nodes[0] = op_name
fcnode.append(flatten_node)
if no_bias:
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
bias_name = "bias" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))
initializer.append(
onnx.helper.make_tensor(
name=bias_name,
data_type=data_type,
dims=(1,),
vals=[0],
raw=False,
)
)
input_nodes.append(bias_name)
fcnode.append(tensor_node)
node = onnx.helper.make_node(
"Gemm",
input_nodes, # input (A, B, C) - C can be in place
[name], # output
alpha=1.0,
beta=1.0,
transA=False,
transB=True,
name=name
)
fcnode.append(node)
return fcnode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def _create_gemm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n alpha = onnx_node.getattr('alpha', 1.)\n beta = onnx_node.getattr('beta', 1.)\n transA = onnx_node.getattr('transA', 0)\n transB = onnx_node.getattr('transB', 0)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(alpha=alpha,\n beta=beta,\n transA=transA,\n transB=transB)",
"def createGridWarpNodeMI():\n return gy()",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def generate_mxp_graph(model_name, activations, stats, first_node_name, last_node_name, io_info,\n input_type, ignore_strides=False, inline_depthwise=False, verbose=False):\n network = {}\n network['layers'] = []\n network['test_input'] = None\n network['test_output'] = None\n network['scale'] = 1.0\n\n model = onnx.load(model_name)\n nodes = model.graph.node\n inits = model.graph.initializer\n\n idx = get_node_index(nodes, first_node_name)\n if idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(first_node_name, mname))\n assert(idx != None)\n\n last_idx = get_node_index(nodes, last_node_name)\n if last_idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(last_node_name, mname))\n assert(last_idx != None)\n\n while True:\n node = nodes[idx]\n if verbose:\n print(node.name, node.op_type)\n src_node = get_node_source(nodes, node.input[0])\n if src_node == None:\n input_id = node.input[0]\n else:\n input_id = src_node.output[0]\n output_id = node.output[0]\n\n\n if len(network['layers']) == 0:\n previous = None\n else:\n previous = network['layers'][-1]\n for layer in network['layers']:\n if layer['output_id'] == input_id:\n previous = layer\n\n input_shapes, output_shapes = get_shapes(activations, stats, node)\n assert len(output_shapes) == 1, \"Multi-output nodes not supported\"\n output_shape = output_shapes[0]\n if node.op_type == \"Conv\":\n c, m, n = input_shapes[0]\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n assert(get_attr(node, 'pads') == None or not any(get_attr(node, 'pads')))\n\n group = get_attr(node, 'group')\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n dilations = np.asarray(get_attr(node, 'dilations')).tolist()\n if not group:\n group = 1\n if not strides:\n strides = [1, 1]\n if not dilations:\n dilations = [1, 1]\n\n use_strided = 0\n assert(strides == [1, 1] or strides == [2, 2] or strides == [4, 4])\n\n if DO_STRIDES and not ignore_strides:\n if (strides[0] > 1 or strides[1] > 1) and group == 1: # TODO handle depthwise as well\n assert(previous['output_size'] == int(np.prod(input_shapes[0])))\n use_strided = 1\n previous['output_strides'] = strides\n if verbose:\n print('adding output strides to previous node')\n\n m = m + (m % strides[0])\n n = n + (n % strides[1])\n if int(np.prod(input_shapes[0])) != int(c*m*n):\n if verbose:\n print('adjusting size for strided maps')\n previous['output_size'] = int(c*4*m//strides[0]*n//strides[1])\n previous['output_shape'] = (c*4,m//strides[0],n//strides[1])\n\n w = get_tensor(inits, node.input[1])\n kernels, channels, _, _ = w.shape\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n conv_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(c*m*n),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels * group,\n 'kernels': kernels,\n 'kernel_shape': kernel_shape,\n 'dilations': dilations,\n 'strides': strides,\n 'group': group,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'use_cvi': 0,\n 'use_depthwise': 0,\n 'use_strided': use_strided,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n conv_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(kernels)]\n conv_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n\n network['layers'].append(conv_layer)\n\n elif node.op_type == \"Gemm\":\n w = get_tensor(inits, node.input[1])\n output_size, input_size = w.shape\n\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n gemm_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(np.prod(input_shapes[0])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'gemm_input_size': input_size,\n 'gemm_output_size': output_size,\n 'input_id': input_id,\n 'output_id': output_id,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n gemm_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(output_size)]\n gemm_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n network['layers'].append(gemm_layer)\n\n elif node.op_type in multipath_nodes:\n node_inputs = get_previous_nodes(nodes, node)\n shapes = input_shapes\n\n if node.op_type == \"Sum\":\n assert(all([x == shapes[0] for x in shapes[1:]]))\n elif node.op_type == \"Concat\":\n assert(all([x[1:] == shapes[0][1:] for x in shapes[1:]]))\n\n buf = node_inputs[0].name\n if node.op_type == \"Concat\":\n buf = output_id\n\n buffer_offset = 0\n for n, node_input in enumerate(node_inputs):\n noutput = node_input.output[0]\n for l, layer in enumerate(network['layers']):\n if layer['output_id'] == noutput: # if layer pointing to this node\n network['layers'][l]['output_id'] = buf # rename layer's output\n network['layers'][l]['buffer_offset'] = buffer_offset # and offset appropriately\n if layer['input_id'] == noutput:\n network['layers'][l]['input_id'] = buf #TODO\n\n buffer_offset += int(np.prod(input_shapes[n]))\n\n if node.op_type == \"Sum\":\n channels, m, n = shape3d(output_shape)\n sum_layer = {\n 'op_type': \"Sum\",\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': node_inputs[0].name,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'num_inputs': len(node.input),\n \"sublayers\": [],\n }\n network['layers'].append(sum_layer)\n\n elif node.op_type == \"Identity\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n identity_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(identity_layer)\n\n elif node.op_type == \"LRN\":\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n lrn_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'alpha': get_attr(node, 'alpha'),\n 'beta': get_attr(node, 'beta'),\n 'bias': get_attr(node, 'bias'),\n 'size': get_attr(node, 'size'),\n 'scale': 1.0,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(lrn_layer)\n\n elif node.op_type == \"Scale\":\n scale_sublayer = {\n 'op_type': 'Scale',\n 'name': node.name,\n \"use_replay\": 1,\n 'scale': get_attr(node, 'scale'),\n }\n previous['sublayers'].append(scale_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"GlobalAveragePool\", \"GlobalMaxPool\"]:\n assert(previous['n'] == previous['m'])\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type.replace('Global', ''),\n 'name': node.name,\n 'use_replay': 0,\n 'kernel_shape': [previous['m'], previous['n']],\n 'strides': [previous['m'], previous['n']],\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n\n elif node.op_type in [\"MaxPool\", \"AveragePool\"]:\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n\n if node.op_type == \"AveragePool\": #TODO quick fix for tf average pool quirk\n if kernel_shape[0] * kernel_shape[1] == previous['m'] * previous['n']:\n kernel_shape = [previous['m'], previous['n']]\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n if strides is None:\n strides = [ 1 for _ in kernel_shape]\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'kernel_shape': kernel_shape,\n 'strides': strides,\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type == \"PRelu\":\n slope = get_tensor(inits, node.input[1])\n slope = slope.flatten().tolist()\n prelu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'slope': slope,\n }\n previous['sublayers'].append(prelu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"LeakyRelu\":\n alpha = get_attr(node, 'alpha')\n if alpha is None:\n alpha = .01\n leaky_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'alpha': alpha\n }\n previous['sublayers'].append(leaky_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Relu\":\n relu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(relu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Clip\":\n clip_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'min': float(get_tensor(inits,node.input[1])),\n 'max': float(get_tensor(inits,node.input[2])),\n }\n previous['sublayers'].append(clip_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Pad\":\n pads = pads6(get_tensor(inits,node.input[1]).tolist())\n value = int(get_tensor(inits,node.input[2]))\n if value < -1:\n value = -1\n if value > 1:\n value = 1\n pad_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'value': value,\n 'pads': pads,\n }\n previous['sublayers'].append(pad_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type in [\"Add\", \"Mul\", \"Sub\", \"Div\"]:\n\n skip = False\n if node.op_type == \"Mul\":\n next_nodes = get_node_inputs(nodes, node.output[0])\n if node.name == nodes[-1].name:\n if verbose:\n print('removing final scale node')\n skip = True\n\n elif previous['op_type'] in [\"LRN\"]:\n if verbose:\n print('skipping mul after lrn')\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n previous['scale'] = float(array[0])\n print('skipping mul after lrn', previous['scale'], previous['input_id'], previous['output_id'])\n\n skip = True\n\n elif next_nodes[0].op_type in [\"Softmax\"]:\n if verbose:\n print('skipping mul before softmax')\n skip = True\n\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n c = activations[node.input[1]].shape[1]\n else:\n c = input_shapes[0][0]\n\n if node.op_type == \"Add\": # TODO for scalar Add\n dims = len(np.squeeze(array).shape)\n if dims == 0:\n array = np.ones((c, 1)) * array\n\n dims = len(np.squeeze(array).shape)\n if c == 1 and dims == 0:\n dims = 1\n\n array = array.flatten().tolist()\n # force_broadcast_2 = False\n # if force_broadcast_2:\n # # if c != 1 and dims == 0:\n # if c != 1 and dims == 0 and node.op_type != \"Mul\": # TODO forcing to broadcast 2 not broadcast 3\n # dims = 1\n # array = [array[0] for _ in range(c)]\n\n if not skip:\n arithmetic_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'dims': dims,\n 'array': array,\n }\n previous['sublayers'].append(arithmetic_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Abs\", \"Max\", \"Mean\", \"Min\", \"Neg\", \"Not\"]:\n unary_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(unary_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n\n elif node.op_type == \"Reshape\":\n dims = get_tensor(inits, node.input[1])\n\n if len(dims) == 4 and dims[-1] == 2:\n idx += 6\n node = nodes[idx]\n output_id = node.output[0]\n _, output_shapes = get_shapes(activations, stats, node)\n output_shape = output_shapes[0]\n channels, m, n = shape3d(output_shape)\n reorg_layer = {\n 'op_type': \"Reorg\",\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n \"stride\": int(dims[-1]),\n }\n network['layers'].append(reorg_layer)\n else:\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Flatten\",'Cast']:\n previous['output_id'] = output_id\n elif node.op_type == \"Resize\":\n scales = get_tensor(inits, node.input[2])\n assert(scales[0] == 1 and scales[1] == 1)\n scale = float(scales[2])\n mode = get_attr(node, 'mode').decode()\n assert(mode == 'nearest' or mode == 'linear')\n shapes = input_shapes[:1]\n channels, m, n = shape3d(output_shape)\n in_size= [d for d in one_elem(input_shapes)[1:]]\n replay = 0 if in_size == [1,1] else 1\n resize_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': replay,\n 'input_size': int(np.prod(one_elem(input_shapes))),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'mode' :mode,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(resize_layer)\n elif node.op_type == \"ArgMax\":\n input_shape = one_elem(input_shapes)\n channels, m, n = shape3d(input_shape)\n argmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(argmax_layer)\n\n elif node.op_type == \"Softmax\":\n prev = get_previous_nodes(nodes, node)[0]\n if prev.op_type == \"Mul\":\n scale = get_tensor(inits, prev.input[1])\n scale = scale.flatten().tolist()\n else:\n scale = [1.0]\n if len(scale) > 1:\n raise NotImplementedError(\"Broadcast scale not implemented for softmax\")\n\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n softmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': scale,\n 'size': len(scale),\n }\n network['layers'].append(softmax_layer)\n\n # softmax_sublayer = {u'op_type': u'Softmax', 'scale': 1.0}\n # previous['sublayers'].append(softmax_sublayer)\n # previous['output_id'] = output_id\n # print('warning SOFTMAX ignored!... fine if last layer and sorting outputs')\n\n elif node.op_type == \"Transpose\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n permutation =[p-1 for p in get_attr(node, 'perm')[1:]]\n transpose_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'permutation':permutation,\n \"sublayers\": [],\n }\n network['layers'].append(transpose_layer)\n else:\n raise RuntimeError('Unknown node type:{} '.format(node.op_type))\n\n idx += 1\n if idx > last_idx:\n break\n\n unsigned_network_inputs = input_type == np.uint8\n\n if CVI_1x1:\n network = mxp_gemm_to_conv(network)\n\n network = mxp_set_replay(network, io_info)\n network = mxp_set_cvi(network)\n network = mxp_set_unsigned(network, unsigned_network_inputs)\n\n if inline_depthwise:\n network = mxp_inline_depthwise(network)\n\n network = mxp_describe_layers(network)\n network = mxp_number_buffers(network)\n buffers = mxp_size_buffers(network)\n network = mxp_number_sublayers(network)\n\n network['num_layers'] = len(network['layers'])\n network['buffers'] = buffers\n\n return network",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)",
"def createRotoPaintNodeMI():\n return gr()",
"def createRotoNodeMI():\n return gs()",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def basic_network(cm=False):\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 1, 0],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 0],\n ])\n if cm is False:\n cm = np.array([\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n else:\n cm = None\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,\n device: str = \"\"):\n Node.__init__(self, g, node_id=node_id, name=name,\n op_name=op_name, outputs=[], device=device)\n self._attributes = []\n self._inputs = []\n self._control_inputs = []",
"def test_get_hyperflex_node_by_moid(self):\n pass",
"def local_gpu_gemm(node):\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n tensor.blas.Gemm):\r\n z, a, x, y, b = host_input.owner.inputs\r\n return [gpu_gemm_no_inplace(gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y),\r\n b)]\r\n if isinstance(node.op, tensor.blas.Gemm):\r\n z, a, x, y, b = node.inputs\r\n x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))\r\n y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))\r\n z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))\r\n if x_on_gpu or y_on_gpu or z_on_gpu:\r\n return [host_from_gpu(gpu_gemm_no_inplace(gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y),\r\n b))]\r\n return False",
"def _create_custom_op_trainable_onnx_model():\n onnx_model = onnx.load(os.path.join(\"testdata\", \"custom_op_library\", \"custom_op_test.onnx\"))\n onnx_model.graph.value_info.append(\n onnx.helper.make_tensor_value_info(\"output_1\", onnx.TensorProto.FLOAT, [3, 5])\n )\n\n class CustomOpBlockWithLinear(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.linear = onnxblock.blocks.Linear(5, 10)\n\n def build(self, linear_input):\n return self.linear(linear_input)\n\n custom_op_block = CustomOpBlockWithLinear()\n with onnxblock.base(onnx_model) as model_accessor:\n model_accessor.model.opset_import.append(onnx.helper.make_opsetid(\"test.customop\", 1))\n model_accessor.model.opset_import.append(onnx.helper.make_opsetid(\"\", 14))\n model_accessor.model.ir_version = 7\n _ = custom_op_block(\"output_1\")\n\n return custom_op_block.to_model_proto()",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def generate_operator_layer(operator_class):\n\n def __init__(self, *args, **kwargs):\n \"\"\"Operator layer with a single operator\n\n Forwards arguments to lbann.OperatorLayer or sub-class of\n lbann.Operator.\n\n \"\"\"\n layer_kwargs = lbann.Layer.__init__.__kwdefaults__.copy()\n op_kwargs = {}\n for key, value in kwargs.items():\n if key in layer_kwargs:\n layer_kwargs[key] = value\n else:\n op_kwargs[key] = value\n layer_kwargs['ops'] = [ operator_class(**op_kwargs) ]\n OperatorLayer.__init__(self, *args, **layer_kwargs)\n\n def export_proto(self):\n \"\"\"Construct and return a protobuf message.\"\"\"\n\n # Use default datatype if not specified\n if self.datatype is None:\n self.datatype = 0\n\n # Convert device string to enum\n device = lbann.DeviceAllocation.DEFAULT_DEVICE\n if isinstance(self.device, str):\n if self.device.lower() == 'cpu':\n device = lbann.DeviceAllocation.CPU\n elif self.device.lower() == 'gpu':\n device = lbann.DeviceAllocation.GPU\n elif self.device is not None:\n raise TypeError('Unknown type for field device ' + str(type(device)))\n\n # Configure operators to match layer\n for o in self.ops:\n o.input_type = self.datatype\n o.output_type = self.datatype\n o.device = device\n\n # Generate Protobuf message\n return OperatorLayer.export_proto(self)\n\n # Return operator layer class\n class_name = operator_class.__name__\n class_dict = {'__init__': __init__, 'export_proto': export_proto}\n return type(class_name, (OperatorLayer,), class_dict)",
"def nodeCreator(cls):\n\n return OpenMayaMPx.asMPxPtr(cls())",
"def _extract_ops_from_onnx_graph(graph, operators, domain_opset_map):\n\n for operator in graph.node:\n # empty domain is used as an alias for 'ai.onnx'\n domain = operator.domain if operator.domain else \"ai.onnx\"\n\n if domain not in operators or domain not in domain_opset_map:\n continue\n\n operators[domain][domain_opset_map[domain]].add(operator.op_type)\n\n for attr in operator.attribute:\n if attr.type == onnx.AttributeProto.GRAPH: # process subgraph\n _extract_ops_from_onnx_graph(attr.g, operators, domain_opset_map)\n elif attr.type == onnx.AttributeProto.GRAPHS:\n # Currently no ONNX operators use GRAPHS.\n # Fail noisily if we encounter this so we can implement support\n raise RuntimeError(\"Unexpected attribute proto of GRAPHS\")",
"def node_encoder_construct(cfg, model_name='node_encoder', **kwargs):\n encoders = node_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown node encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node"
] | [
"0.6879449",
"0.5942417",
"0.58318627",
"0.5746459",
"0.5413726",
"0.53791827",
"0.5367006",
"0.5331266",
"0.527826",
"0.5257274",
"0.52468914",
"0.5242588",
"0.5240532",
"0.51549006",
"0.515457",
"0.51430964",
"0.5114177",
"0.5093029",
"0.50854313",
"0.5052347",
"0.50358003",
"0.49876368",
"0.49856737",
"0.4971864",
"0.4955984",
"0.4954637",
"0.4949926",
"0.49370658",
"0.49132505",
"0.48768368"
] | 0.5959116 | 1 |
Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator and return the created node. | def convert_batchnorm(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
momentum = float(attrs.get("momentum", 0.9))
eps = float(attrs.get("eps", 0.001))
bn_node = onnx.helper.make_node(
"BatchNormalization",
input_nodes,
[name],
name=name,
epsilon=eps,
momentum=momentum,
# MXNet computes mean and variance per channel for batchnorm.
# Default for onnx is across all spatial features. Relying on default
# ONNX behavior of spatial=1 for ONNX opset 8 and below. As the spatial
# attribute is deprecated in opset 9 and above, not explicitly encoding it.
)
return [bn_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_batchnorm(cls, op, op_t):\n # first, we init batchnorm node\n epsilon = 1e-5 # the epsilon value used in singa\n bn_node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n bn_node.attribute.extend([\n helper.make_attribute('momentum', op.handle.factor),\n helper.make_attribute('epsilon', epsilon),\n ])\n # then we add nodes of scal, bias, mean, var\n nodes = []\n running_values = {\"mean\": op.running_mean, \"var\": op.running_var}\n for tmp_name, running_value in running_values.items():\n node_name = op.name + \":\" + tmp_name\n bn_node.input.append(node_name)\n\n nodes.append(bn_node)\n return nodes",
"def convert_batch_norm(g, op, block):\n\n ipt_name = op.input(\"X\")[0]\n scale_name = op.input(\"Scale\")[0]\n bias_name = op.input(\"Bias\")[0]\n mean_name = op.input(\"Mean\")[0]\n variance_name = op.input(\"Variance\")[0]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.batch_norm(\n g.get_node(ipt_name),\n g.get_node(scale_name),\n g.get_node(bias_name),\n g.get_node(mean_name),\n g.get_node(variance_name),\n epsilon=epsilon,\n )\n g.add_node(op.output(\"Y\")[0], out[0])",
"def _create_batchnorm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n factor = onnx_node.getattr('momentum', 0.9)\n if x.device.id() == -1:\n handle = singa.BatchNormHandle(factor, x.data)\n else:\n handle = singa.CudnnBatchNormHandle(factor, x.data)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return handle, forward",
"def convert_instance_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n epsilon = op.attr(\"epsilon\")\n\n scale = center = True\n out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale)\n g.add_node(op.output(\"Y\")[0], out)",
"def convert_norm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n ord = int(attrs.get(\"ord\", 2))\n\n onnx_op_name = \"ReduceL1\" if ord == 1 else \"ReduceL2\"\n\n if axes:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]\n else:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]",
"def convert_frozen_batchnorm(cls, module):\n bn_module = nn.modules.batchnorm\n bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)\n res = module\n if isinstance(module, bn_module):\n res = cls(module.num_features)\n if module.affine:\n res.weight.data = module.weight.data.clone().detach()\n res.bias.data = module.bias.data.clone().detach()\n res.running_mean.data = module.running_mean.data\n res.running_var.data = module.running_var.data\n res.eps = module.eps\n else:\n for name, child in module.named_children():\n new_child = cls.convert_frozen_batchnorm(child)\n if new_child is not child:\n res.add_module(name, new_child)\n return res",
"def _special_handle_batchnorm(cls, op, X, W):\n # for singa, x, scale, bias is input\n # and mean and var is attribute\n # so we add the mean and var to W\n tensor_list = []\n append_inputs = {\"mean\": op.running_mean, \"var\": op.running_var}\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n append_input = tensor.to_numpy(tensor.from_raw_tensor(append_input))\n tensor_list.append(numpy_helper.from_array(append_input, node_name))\n return tensor_list",
"def convert_instancenorm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n eps = float(attrs.get(\"eps\", 0.001))\n\n node = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=input_nodes,\n outputs=[name],\n name=name,\n epsilon=eps)\n\n return [node]",
"def batch_normal(x, is_train, name, activation_fn=None):\n with tf.name_scope(name), tf.variable_scope(name):\n outputs = tf.contrib.layers.batch_norm(x,\n decay=0.999,\n scale=True,\n activation_fn=activation_fn,\n is_training=is_train)\n return outputs",
"def convert_layer_norm(g, op, block):\n\n begin_norm_axis = op.attr(\"begin_norm_axis\")\n epsilon = op.attr(\"epsilon\")\n x = g.get_node(op.input(\"X\")[0])\n bias_input = op.input(\"Bias\")\n scale_input = op.input(\"Scale\")\n\n x_shape = infer_shape(x)\n assert begin_norm_axis in (\n len(x_shape) - 1,\n -1,\n ), \"Support only normalization over last one dimension.\"\n\n if bias_input:\n bias = g.get_node(bias_input[0])\n else:\n bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))\n\n if scale_input:\n scale = g.get_node(scale_input[0])\n else:\n scale = _expr.const(np.ones(x_shape[begin_norm_axis]))\n\n out = _op.nn.layer_norm(\n x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True\n )\n g.add_node(op.output(\"Y\")[0], out)",
"def convert_group_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n num_groups = op.attr(\"groups\")\n epsilon = op.attr(\"epsilon\")\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n out = _op.nn.group_norm(\n x,\n gamma=gamma,\n beta=beta,\n num_groups=num_groups,\n axis=1,\n epsilon=epsilon,\n center=True,\n scale=True,\n )\n g.add_node(op.output(\"Y\")[0], out)",
"def create_batch_norm_layer(prev, n, activation):\n init = tf.keras.initializers.VarianceScaling(mode='fan_avg')\n layer = tf.keras.layers.Dense(\n units=n, kernel_initializer=init, name='layer')\n epsilon = 1e-8\n\n base = layer(prev)\n gamma = tf.Variable(tf.constant(1.0, shape=[n]), trainable=True)\n beta = tf.Variable(tf.constant(0.0, shape=[n]), trainable=True)\n mean, variance = tf.nn.moments(base, axes=[0])\n Z = tf.nn.batch_normalization(base, mean=mean,\n variance=variance,\n offset=beta,\n scale=gamma,\n variance_epsilon=epsilon)\n return activation(Z)",
"def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')",
"def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)",
"def batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n):\n return FunctionLib.apply(\n 'BatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum)",
"def convert_l2normalization(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mode = attrs.get(\"mode\", \"instance\")\n\n if mode != \"channel\":\n raise AttributeError(\"L2Normalization: ONNX currently supports channel mode only\")\n\n l2norm_node = onnx.helper.make_node(\n \"LpNormalization\",\n input_nodes,\n [name],\n axis=1, # channel only\n name=name\n )\n return [l2norm_node]",
"def BatchNorm(name=None, decay=0.9, epsilon=1.0e-5):\n return ConstructionWrapper.create(BatchNormLayerImpl,\n name=name,\n decay=decay,\n epsilon=epsilon)",
"def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn",
"def _bn_relu(self, input):\n depth = input.get_shape().as_list()[-1]\n\n # mean and variance calc on batch-height-width dimension\n mean, var = tf.nn.moments(input, axes=[0, 1, 2])\n beta = tf.Variable(tf.zeros([depth]), name='beta')\n gamma = self._get_weight_variable([depth], name='gamma')\n\n bn_out = tf.nn.batch_norm_with_global_normalization(input, mean, var, beta, gamma, 0.001,\n scale_after_normalization=True)\n\n out = tf.nn.relu(bn_out)\n\n return out",
"def batch_norm_pattern():\n pattern = is_op(\"nn.batch_norm\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant()\n )\n pattern = is_tuple_get_item(pattern)\n return pattern",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def normalize_layer(tensor, name, norm_use='bn'):\n if norm_use == \"gn\":\n x = GroupNorm(name=name + 'gn', groups=32)(tensor)\n elif norm_use == \"bn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'bn', epsilon=1.001e-5)(tensor)\n elif norm_use == \"rbn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'rbn', epsilon=1.001e-5, renorm=True)(tensor)\n elif norm_use == \"in\":\n x = InstanceNormalization(axis=-1, name=name + 'in')(tensor)\n else:\n x = tensor\n return x",
"def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsupported axis {attrs.get_int('axis')} in operator relay lrn operator. \"\n f\"Only axis = 1 is supported by Onnx.\"\n )\n\n return {\"alpha\": attrs.alpha, \"beta\": attrs.beta, \"bias\": attrs.bias, \"size\": attrs.size}",
"def initialize_batch_norm_eval(\n module: Union[BatchNorm1d, BatchNorm2d, BatchNorm3d]\n) -> Union[BatchNorm1d, BatchNorm2d, BatchNorm3d]:\n module.running_mean = rand_like(module.running_mean)\n module.running_var = rand_like(module.running_var)\n module.weight.data = rand_like(module.weight)\n module.bias.data = rand_like(module.bias)\n return module.train(False)",
"def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x",
"def convert_batchnorm_parameters(model: torch.nn.Module, bn: Union[torch.nn.BatchNorm1d, torch.nn.BatchNorm2d]):\n with utils.in_eval_mode(model), torch.no_grad():\n gamma = bn.weight\n beta = bn.bias\n running_mean = bn.running_mean\n inv_sigma = torch.rsqrt(bn.running_var + bn.eps)\n\n weight = gamma*inv_sigma\n bias = beta - running_mean * weight\n\n # Update the values\n bn.eps = 0\n bn.track_running_stats = False\n bn.weight.copy_(weight.clone().detach())\n bn.bias.copy_(bias.clone().detach())\n bn.running_mean = torch.zeros(bn.running_mean.shape, device=bn.running_mean.device, dtype=bn.running_mean.dtype)\n bn.running_var = torch.ones(bn.running_var.shape, device=bn.running_var.device, dtype=bn.running_var.dtype)",
"def batch_norm(x, train, init, act=None, name=None, eps=1e-5, decay=0.9):\n\n return tf.contrib.layers.batch_norm(x,\n decay=decay,\n epsilon=eps,\n scale=True,\n param_initializers=init,\n is_training=train,\n scope=name,\n activation_fn=act,\n updates_collections=None)",
"def _generate_batch_norms(self, Node_Sizes):\n batchnorms = [None for _ in range(len(Node_Sizes)-1)]\n for i in range(len(Node_Sizes)-1):\n batchnorms[i] = nn.BatchNorm1d(Node_Sizes[i])\n\n return batchnorms",
"def BatchNormalization(inputs, data_format):\n return tf.layers.BatchNormalization(axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY,\n epsilon=_BATCH_NORM_EPSILON,\n scale=True)(inputs)",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])"
] | [
"0.6464468",
"0.6376962",
"0.61264753",
"0.5883445",
"0.57059985",
"0.57012653",
"0.56393033",
"0.5572092",
"0.55718195",
"0.551708",
"0.54826975",
"0.5470176",
"0.5425131",
"0.5422429",
"0.54030514",
"0.5381643",
"0.5354029",
"0.5310859",
"0.52805203",
"0.5256326",
"0.522801",
"0.5202449",
"0.5193055",
"0.51129735",
"0.5110933",
"0.5091279",
"0.50824463",
"0.50599813",
"0.5055482",
"0.50535715"
] | 0.7146321 | 0 |
Map MXNet's tanh operator attributes to onnx's Tanh operator and return the created node. | def convert_tanh(node, **kwargs):
return create_basic_op_node('Tanh', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tanh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tanh()))",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def convert_tan(node, **kwargs):\n return create_basic_op_node('Tan', node, kwargs)",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def tanh(input, inplace=False):\n return FunctionLib.apply(\n 'Tanh', input.device, [input],\n outputs=[input if inplace else None])",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def create_tanh(x, bias):\n\n return tf.nn.tanh(tf.nn.bias_add(x, bias))",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def tanh(self):\t\t\t\t\n\t\tval = np.tanh(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = 1 / np.power(np.cosh(self.val), 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def tanh(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.tanh(obj.val)\n\t\tder = 1-np.tanh(obj.val)**2\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.tanh(obj)",
"def layer(self, h, t):\n mr1h = torch.matmul(h, self.mr1.weight) # h => [m, d], self.mr1 => [d, k]\n mr2t = torch.matmul(t, self.mr2.weight) # t => [m, d], self.mr2 => [d, k]\n return torch.tanh(mr1h + mr2t)",
"def test_get_hyperflex_node_by_moid(self):\n pass",
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def tanh(d: D) -> NumDict:\n\n return (2 * sigmoid(d)) - 1",
"def tanh(data):\n return _make.tanh(data)",
"def _rnn_tanh_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):\n if b_ih is None:\n igates = P.MatMul(False, True)(inputs, w_ih)\n hgates = P.MatMul(False, True)(hidden, w_hh)\n else:\n igates = P.MatMul(False, True)(inputs, w_ih) + b_ih\n hgates = P.MatMul(False, True)(hidden, w_hh) + b_hh\n return P.Tanh()(igates + hgates)",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def tanh(x):\n raise NotImplementedError",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def tanh(tensor, method=\"sigmoid\"):\n if method == \"sigmoid\":\n return _tanh_sigmoid(tensor)\n else:\n raise ValueError(f\"Invalid method {method} given for tanh function\")",
"def derived_tanh(x):\n return 1 - tanh(x)",
"def derived_tanh(x):\n return 1 - tanh(x)",
"def convert_tanhshrink(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = x - _op.tanh(x)\n g.add_node(op.output(\"Out\")[0], out)",
"def __tanh_old(self, x):\n return np.tanh(x)",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def convert_sigmoid(node, **kwargs):\n return create_basic_op_node('Sigmoid', node, kwargs)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def tanh(self, x):\n self.x = x\n output = np.tanh(x)\n return output"
] | [
"0.66637284",
"0.66221046",
"0.6183689",
"0.6041877",
"0.6009536",
"0.60078925",
"0.6001827",
"0.59675324",
"0.59085935",
"0.5895642",
"0.5892149",
"0.58851",
"0.5857893",
"0.5766065",
"0.5756888",
"0.5718928",
"0.56849617",
"0.5675731",
"0.56675726",
"0.566487",
"0.5653042",
"0.5564181",
"0.5560124",
"0.5560124",
"0.55498105",
"0.5544433",
"0.5530131",
"0.55050004",
"0.54965985",
"0.54526687"
] | 0.7406357 | 0 |
Map MXNet's cos operator attributes to onnx's Cos operator and return the created node. | def convert_cos(node, **kwargs):
return create_basic_op_node('Cos', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cos(self):\n return type(self)(self.parent(),\n self._simplify(self._express.cos()))",
"def convert_acos(node, **kwargs):\n return create_basic_op_node('Acos', node, kwargs)",
"def cos(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.cos(obj.val)\n\t\tder = -np.sin(obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.cos(obj)",
"def arccos(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arccos()))",
"def cos(self) -> np.float64:\n\n return (self.node2.x - self.node1.x) / self.get_length()",
"def cos(self):\n\t\tval = np.cos(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = -np.sin(self.val)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def cos(data):\n return _make.cos(data)",
"def cos(x):\n raise NotImplementedError",
"def create_dot1p_to_cos_mapping(self, ports, rx_attr_flag=False, **kwargs):\n pass",
"def cos(tensor):\n return _elementary_op(tensor, np.cos, lambda x: -np.sin(x))",
"def cos(self, a):\n return math.cos(a)",
"def cos(x):\n if isinstance(x, int):\n x = Expression(x)\n return _cos(x)",
"def _get_cos_dscp(self):\n return self.__cos_dscp",
"def modify_dot1p_to_cos_mapping(self, ports, rx_attr_flag=False, **kwargs):\n pass",
"def cos(q_1: Q) -> Q:\n\n end_q_type = f\"cos({q_1.q_type})\"\n\n abs_v = abs_of_vector(q_1)\n\n if abs_v.t == 0:\n return Q([math.cos(q_1.t), 0, 0, 0], q_type=end_q_type, representation=q_1.representation)\n\n sint = math.sin(q_1.t)\n cost = math.cos(q_1.t)\n sinhR = math.sinh(abs_v.t)\n coshR = math.cosh(abs_v.t)\n\n k = -1 * sint * sinhR / abs_v.t\n\n q_cos = Q()\n q_cos.t = cost * coshR\n q_cos.x = k * q_1.x\n q_cos.y = k * q_1.y\n q_cos.z = k * q_1.z\n\n q_cos.q_type = end_q_type\n q_cos.representation = q_1.representation\n\n return q_cos",
"def cos(angle):\n return math.cos(math.radians(angle))",
"def _get_dscp_cos(self):\n return self.__dscp_cos",
"def arccos(x):\n raise NotImplementedError",
"def cosmo(self):\n return self.cls(*self.cls_args, **self.cls_kwargs)",
"def cos(x):\n return 0.0",
"def Cos(num):\n return math.cos(float(num))",
"def cos(self):\r\n getcontext().prec += 2\r\n re = cos(self._real) * cosh(self._imag)\r\n im = sin(self._real) * sinh(self._imag)\r\n ans = self.__class__(re, -im)\r\n getcontext().prec -= 2\r\n return +ans",
"def cos1(self,k1,k2,cos12):\n return (-k1 - k2*cos12)/self.k3Length(k1, k2, cos12)",
"def acos(data):\n return _make.acos(data)",
"def phon_constructor(loader, node): \n value = loader.construct_scalar(node)\n stem, affix = [normalize(s) for s in value.split('+')]\n return Phon(stem, affix)",
"def cosines_to_global(self):\n r = Rotation.from_matrix(self.R2global())\n a, b, g = r.as_euler('xyz', degrees=False)\n return np.cos(a), np.cos(b), np.cos(g)",
"def cos_sim(com_feat,ref_feat):\n # Fill this in\n a = numpy.squeeze(com_feat)\n b = numpy.squeeze(ref_feat)\n return numpy.dot(a, b) / (numpy.linalg.norm(a) * numpy.linalg.norm(b))",
"def acos(self, x):\n return self.arccos(x)",
"def arccosh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arccosh()))",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node"
] | [
"0.68186814",
"0.64716417",
"0.6203887",
"0.6197016",
"0.6076361",
"0.60689795",
"0.6031383",
"0.5917666",
"0.5856112",
"0.5803058",
"0.5774136",
"0.57586396",
"0.5689945",
"0.56162506",
"0.56064403",
"0.5498463",
"0.5487711",
"0.5412239",
"0.53912306",
"0.53586626",
"0.5355759",
"0.5351919",
"0.53297335",
"0.5240472",
"0.52354205",
"0.52218",
"0.5218679",
"0.5214423",
"0.52128655",
"0.5180892"
] | 0.77887577 | 0 |
Map MXNet's sin operator attributes to onnx's Sin operator and return the created node. | def convert_sin(node, **kwargs):
return create_basic_op_node('Sin', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sin(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sin()))",
"def sin(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.sin(obj.val)\n\t\tder = np.cos(obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.sin(obj)",
"def sin(self):\n\t\tval = np.sin(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.cos(self.val)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def sinh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sinh()))",
"def sin(data):\n return _make.sin(data)",
"def sin(x):\n raise NotImplementedError",
"def sin(self) -> np.float64:\n\n return (self.node2.y - self.node1.y) / self.get_length()",
"def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)",
"def sin(x):\n if isinstance(x, int):\n x = Expression(x)\n return _sin(x)",
"def sin(self, a):\n return math.sin(a)",
"def sinwave(scene):\n # create an empty homogeneous transformation\n matrix = np.eye(4)\n # set Y as cos of time\n matrix[1][3] = np.cos(time.time()) * 2\n # set Z as sin of time\n matrix[2][3] = np.sin(time.time()) * 3\n\n # take one of the two spheres arbitrarily\n node = s.graph.nodes_geometry[0]\n # apply the transform to the node\n scene.graph.update(node, matrix=matrix)",
"def sin(tensor):\n return _elementary_op(tensor, np.sin, np.cos)",
"def xsin(x):\n return x + tf.sin(x)",
"def sin(self):\r\n getcontext().prec += 2\r\n re = sin(self._real) * cosh(self._imag)\r\n im = cos(self._real) * sinh(self._imag)\r\n ans = self.__class__(re, im)\r\n getcontext().prec -= 2\r\n return +ans",
"def get_bprop_sin(self):\n cos = P.Cos()\n\n def bprop(x, out, dout):\n dx = dout*cos(x)\n return (dx,)\n return bprop",
"def sinh(self):\t\t\n\t\tval = np.sinh(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.cosh(self.val)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def f_sin(k):\n return k * pk(k, suppression)",
"def sin(angle):\n return math.sin(math.radians(angle))",
"def sinh(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.sinh(obj.val)\n\t\tder = np.cosh(obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.sinh(obj)",
"def convert_sigmoid(node, **kwargs):\n return create_basic_op_node('Sigmoid', node, kwargs)",
"def Sin(num):\n return math.sin(float(num))",
"def sinh(self):\r\n getcontext().prec += 2\r\n re = sinh(self._real) * cos(self._imag)\r\n im = cosh(self._real) * sin(self._imag)\r\n ans = self.__class__(re, im)\r\n getcontext().prec -= 2\r\n return +ans",
"def sin(x):\r\n # see decorator for function body\r",
"def f_sin(k):\n return k * k * k * pk(k, suppression)",
"def __generate_sin(self, phase_shift=0):\n amp = self.SIGNAL_AMPLITUDE / 2\n fs = self.sampling_freq\n duration = self.DURATION\n f = self.pinger_freq\n b = self.BIAS\n return (amp * np.sin(2 * np.pi * np.arange(fs * duration) * f / fs +\n phase_shift) + b).astype(np.float32)",
"def sin(q_1: Q) -> Q:\n\n end_q_type = f\"sin({q_1.q_type})\"\n\n abs_v = abs_of_vector(q_1)\n\n if abs_v.t == 0:\n return Q([math.sin(q_1.t), 0, 0, 0], q_type=end_q_type, representation=q_1.representation)\n\n sint = math.sin(q_1.t)\n cost = math.cos(q_1.t)\n sinhR = math.sinh(abs_v.t)\n coshR = math.cosh(abs_v.t)\n\n k = cost * sinhR / abs_v.t\n\n q_sin = Q()\n q_sin.t = sint * coshR\n q_sin.x = k * q_1.x\n q_sin.y = k * q_1.y\n q_sin.z = k * q_1.z\n\n q_sin.q_type = end_q_type\n q_sin.representation = q_1.representation\n\n return q_sin",
"def addSin(self, scale=(2.*numpy.pi), value=1.0):\n self.fimage = None\n z = numpy.sin(2.0*numpy.pi*self.xx/float(scale)) * numpy.sin(2.0*numpy.pi*self.yy/float(scale))\n self.image += z * value\n return",
"def generateOutputs(self):\n return np.sin(np.pi*self.x)",
"def convert_asin(node, **kwargs):\n return create_basic_op_node('Asin', node, kwargs)",
"def _sin(pot, masses, kT, L=4, tau=10.0 * units.FS_TO_AU, damptime=10.0 * units.FS_TO_AU, nc=5, dim=3, mass_weight=False):\n return SIN_RESPA([pot], [], masses, kT, L=L, tau=tau, damptime=damptime, nc=nc, dim=dim, mass_weight=mass_weight)"
] | [
"0.6925629",
"0.63005567",
"0.62578577",
"0.6207433",
"0.60740703",
"0.5979021",
"0.5886588",
"0.5854158",
"0.5850431",
"0.58287054",
"0.57712215",
"0.5768085",
"0.57123333",
"0.56800777",
"0.55576193",
"0.54214805",
"0.54193765",
"0.5405628",
"0.53958225",
"0.53906035",
"0.53665364",
"0.5243222",
"0.5240192",
"0.518486",
"0.5171389",
"0.51530826",
"0.51523393",
"0.5145514",
"0.51144266",
"0.50993794"
] | 0.7659397 | 0 |
Map MXNet's tan operator attributes to onnx's tan operator and return the created node. | def convert_tan(node, **kwargs):
return create_basic_op_node('Tan', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_tanh(node, **kwargs):\n return create_basic_op_node('Tanh', node, kwargs)",
"def tan(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tan()))",
"def tan(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.tan(obj.val)\n\t\tder = 1+np.tan(obj.val)**2\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.tan(obj)",
"def tanh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tanh()))",
"def tan(self, a):\n return math.tan(a)",
"def tan(data):\n return _make.tan(data)",
"def tan(self):\n\t\t# Ensure that no values in self.val are of the form (pi/2 + k*pi) \n\t\tvalues = map(lambda x: ((x / np.pi) - 0.5) % 1 == 0.0, self.val)\n\t\tif any(values):\n\t\t\traise ValueError(\"Tangent not valid at pi/2, -pi/2.\")\n\t\tval = np.tan(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.power(1 / np.cos(self.val), 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = np.multiply(to_multiply, self.der)\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def tan(x):\n raise NotImplementedError",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def tan(tensor):\n return _elementary_op(tensor, np.tan, lambda x: 1 / (np.cos(x) ** 2))",
"def world_to_tanp(self, ra, dec):\n x, y = ra, dec\n return x, y",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def create_tanh(x, bias):\n\n return tf.nn.tanh(tf.nn.bias_add(x, bias))",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def convert_atan(node, **kwargs):\n return create_basic_op_node('Atan', node, kwargs)",
"def tan(x):\r\n # see decorator for function body\r",
"def tanh(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.tanh(obj.val)\n\t\tder = 1-np.tanh(obj.val)**2\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.tanh(obj)",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node",
"def convert_dot(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n input_node_a = input_nodes[0]\n input_node_b = input_nodes[1]\n\n trans_a_node = None\n trans_b_node = None\n\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if trans_a:\n input_node_a = op_name + \"_a\"\n trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a)\n if trans_b:\n input_node_b = op_name + \"_b\"\n trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b)\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_node_a, input_node_b],\n outputs=[name],\n name=name\n )\n\n if not trans_a and not trans_b:\n return [matmul_node]\n elif trans_a and not trans_b:\n return [trans_a_node, matmul_node]\n elif trans_b and not trans_a:\n return [trans_b_node, matmul_node]\n else:\n return [trans_a_node, trans_b_node, matmul_node]",
"def tanh(input, inplace=False):\n return FunctionLib.apply(\n 'Tanh', input.device, [input],\n outputs=[input if inplace else None])",
"def tanh(data):\n return _make.tanh(data)",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def __tanh_old(self, x):\n return np.tanh(x)",
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def create_helper_trans_node(input_name, output_name, perm=None):\n attrs = {}\n if perm is not None:\n attrs['perm'] = perm\n trans_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_name],\n outputs=[output_name],\n name=output_name,\n **attrs\n )\n return [trans_node]",
"def convert_sigmoid(node, **kwargs):\n return create_basic_op_node('Sigmoid', node, kwargs)",
"def tanh(self, x):\n self.x = x\n output = np.tanh(x)\n return output"
] | [
"0.7066365",
"0.69418204",
"0.62932664",
"0.6291011",
"0.6098471",
"0.6070739",
"0.60638016",
"0.6008856",
"0.595648",
"0.5951899",
"0.581182",
"0.57857805",
"0.57706344",
"0.5729434",
"0.5726495",
"0.5701578",
"0.56677294",
"0.5623807",
"0.56201595",
"0.560823",
"0.5586857",
"0.5565489",
"0.5509559",
"0.5492724",
"0.5486368",
"0.5470053",
"0.54476357",
"0.5417114",
"0.54128027",
"0.5404889"
] | 0.760391 | 0 |
Map MXNet's acos operator attributes to onnx's acos operator and return the created node. | def convert_acos(node, **kwargs):
return create_basic_op_node('Acos', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_cos(node, **kwargs):\n return create_basic_op_node('Cos', node, kwargs)",
"def create_dot1p_to_cos_mapping(self, ports, rx_attr_flag=False, **kwargs):\n pass",
"def arccos(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arccos()))",
"def modify_dot1p_to_cos_mapping(self, ports, rx_attr_flag=False, **kwargs):\n pass",
"def cos(self):\n return type(self)(self.parent(),\n self._simplify(self._express.cos()))",
"def acos(data):\n return _make.acos(data)",
"def acos(self, a):\n return math.acos(a)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def acos(self, x):\n return self.arccos(x)",
"def acos (cls, x) :\n return Angle_R (math.acos (x))",
"def arccos(x):\n raise NotImplementedError",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def _arccosine(self, s1, s2, tf_embs):\n tf_pi = tf.constant(np.pi, dtype=tf.float64)\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')\n norms1 = tf.gather(tf_norms, s1)\n norms2 = tf.gather(tf_norms, s2)\n dot = tf.matmul(mat1, tf.transpose(mat2))\n norms = tf.matmul(norms1, tf.transpose(norms2))\n # We clip values due to numerical errors\n # which put some values outside the arccosine range.\n cosine = tf.clip_by_value(dot / norms, -1, 1)\n angle = tf.acos(cosine)\n # The 0 vector has norm 0, which generates a NaN.\n # We catch these NaNs and replace them with pi,\n # which ends up returning 0 similarity.\n angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)\n return 1 - (angle / tf_pi)",
"def cos(self) -> np.float64:\n\n return (self.node2.x - self.node1.x) / self.get_length()",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _common_onnx_node_to_singa_op(cls, onnx_node, inputs, opset_version):\n onnx_op_type = onnx_node.op_type\n assert onnx_op_type in cls._rename_operators, \"not support operator: {}\".format(\n onnx_op_type)\n autograd_op = getattr(autograd, cls._rename_operators[onnx_op_type])\n return None, autograd_op",
"def comp_add_ao(self):\n scene = self.set_as_active()\n scene.use_nodes = True\n tree = scene.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_rlayer = tree.nodes.new('CompositorNodeRLayers')\n node_rlayer.location = -300, 100\n node_rlayer.scene = scene\n node_rlayer.layer = w_var.rlname\n\n node_mixcolor = tree.nodes.new('CompositorNodeMixRGB')\n node_mixcolor.location = 0, 50\n node_mixcolor.blend_type = 'MULTIPLY'\n node_mixcolor.inputs[0].default_value = 0.730\n\n node_comp = tree.nodes.new('CompositorNodeComposite')\n node_comp.location = 300, 130\n\n node_viewer = tree.nodes.new('CompositorNodeViewer')\n node_viewer.location = 300, -100\n\n # connecting the nodes\n links = tree.links\n links.new(node_rlayer.outputs[0], node_mixcolor.inputs[1])\n links.new(node_rlayer.outputs[10], node_mixcolor.inputs[2])\n links.new(node_mixcolor.outputs[0], node_comp.inputs[0])\n links.new(node_mixcolor.outputs[0], node_viewer.inputs[0])\n\n for node in tree.nodes:\n node.select = False",
"def connectOri(cls, srcObj, pntInfoNode, anchorGrp):\n\t\t# Get Z Vector from normalizedNormal and normalizedTangent\n\t\tzVecNode = cmds.shadingNode('vectorProduct', asUtility=True, n=srcObj + '_Zvec')\n\t\tcmds.setAttr('%s.operation' % zVecNode, 2)\n\n\t\tcmds.connectAttr('%s.result.normalizedNormal' % pntInfoNode, '%s.input1' % zVecNode, force=True)\n\t\tif cmds.nodeType(pntInfoNode) == 'pointOnSurfaceInfo': # In case nurbs surface\n\t\t\tcmds.connectAttr('%s.result.normalizedTangentU' % pntInfoNode, '%s.input2' % zVecNode, force=True)\n\t\telse: # In case curve\n\t\t\tcmds.connectAttr('%s.result.normalizedTangent' % pntInfoNode, '%s.input2' % zVecNode, force=True)\n\n\t\t# Compose matrix node\n\t\tmatrix = cmds.shadingNode('fourByFourMatrix', asUtility=True, n=srcObj + '_matrix')\n\t\tif cmds.nodeType(pntInfoNode) == 'pointOnSurfaceInfo':\n\t\t\t# X Vector is normalizedTangentU\n\t\t\tcmds.connectAttr('%s.normalizedTangentUX' % pntInfoNode, '%s.in00' % matrix, force=True)\n\t\t\tcmds.connectAttr('%s.normalizedTangentUY' % pntInfoNode, '%s.in01' % matrix, force=True)\n\t\t\tcmds.connectAttr('%s.normalizedTangentUZ' % pntInfoNode, '%s.in02' % matrix, force=True)\n\n\t\t\t# Y Vector is normalizedNormal\n\t\t\tcmds.connectAttr('%s.normalizedNormalX' % pntInfoNode, '%s.in10' % matrix, force=True)\n\t\t\tcmds.connectAttr('%s.normalizedNormalY' % pntInfoNode, '%s.in11' % matrix, force=True)\n\t\t\tcmds.connectAttr('%s.normalizedNormalZ' % pntInfoNode, '%s.in12' % matrix, force=True)\n\n\t\telse: # In case curve\n\t\t\t# X Vector is curve's normalizedTangent\n\t\t\tcmds.connectAttr('%s.normalizedTangentX' % pntInfoNode, '%s.in00' % matrix, force=True)\n\t\t\tcmds.connectAttr('%s.normalizedTangentY' % pntInfoNode, '%s.in01' % matrix, force=True)\n\t\t\tcmds.connectAttr('%s.normalizedTangentZ' % pntInfoNode, '%s.in02' % matrix, force=True)\n\n\t\t\t# Y Vector is normalizedNormal\n\t\t\tcmds.setAttr('%s.in10' % matrix, cmds.getAttr('%s.normalizedNormalX' % pntInfoNode))\n\t\t\tcmds.setAttr('%s.in11' % matrix, cmds.getAttr('%s.normalizedNormalY' % pntInfoNode))\n\t\t\tcmds.setAttr('%s.in12' % matrix, cmds.getAttr('%s.normalizedNormalZ' % pntInfoNode))\n\n\t\t# Z Vector is the result of cross product with normal and tangent\n\t\tcmds.connectAttr('%s.outputX' % zVecNode, '%s.in20' % matrix, force=True)\n\t\tcmds.connectAttr('%s.outputY' % zVecNode, '%s.in21' % matrix, force=True)\n\t\tcmds.connectAttr('%s.outputZ' % zVecNode, '%s.in22' % matrix, force=True)\n\n\t\tcmds.connectAttr('%s.positionX' % pntInfoNode, '%s.in30' % matrix, force=True)\n\t\tcmds.connectAttr('%s.positionY' % pntInfoNode, '%s.in31' % matrix, force=True)\n\t\tcmds.connectAttr('%s.positionZ' % pntInfoNode, '%s.in32' % matrix, force=True)\n\n\t\t# Decompose matrix\n\t\tdeMatrix = cmds.shadingNode('decomposeMatrix', asUtility=True, n=srcObj + 'deMatrix')\n\t\tcmds.connectAttr('%s.output' % matrix, '%s.inputMatrix' % deMatrix)\n\n\t\t# Connect to anchor group\n\t\tcmds.connectAttr('%s.outputTranslate' % deMatrix, '%s.translate' % anchorGrp, force=True)\n\t\tcmds.connectAttr('%s.outputRotate' % deMatrix, '%s.rotate' % anchorGrp, force=True)",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node",
"def _action_op_u1(self, plaq):\n # if self.link_type == 'U1':\n # return np.cos(plaq)\n return tf.math.cos(plaq)",
"def acos(self):\r\n getcontext().prec += 2\r\n arg = self + (self*self - 1).sqrt1()\r\n ans = self.__class__(0, -1) * arg.ln()\r\n getcontext().prec -= 2\r\n return +ans",
"def convert_atan(node, **kwargs):\n return create_basic_op_node('Atan', node, kwargs)",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def cos(self, a):\n return math.cos(a)",
"def acos(x):\n return 0.0",
"def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node",
"def get_CC_operators():\n i = symbols('i', below_fermi=True, cls=Dummy)\n a = symbols('a', above_fermi=True, cls=Dummy)\n t_ai = AntiSymmetricTensor('t', (a,), (i,))\n ai = NO(Fd(a)*F(i))\n i, j = symbols('i,j', below_fermi=True, cls=Dummy)\n a, b = symbols('a,b', above_fermi=True, cls=Dummy)\n t_abij = AntiSymmetricTensor('t', (a, b), (i, j))\n abji = NO(Fd(a)*Fd(b)*F(j)*F(i))\n\n T1 = t_ai*ai\n T2 = Rational(1, 4)*t_abij*abji\n return (T1, T2)",
"def acos(value): # pragma: no cover\n if value < -1:\n value = -1.0\n elif value > 1:\n value = 1.0\n return np.arccos(value)",
"def newChemAtom(self, **attrlinks):\n return ChemAtom(self, **attrlinks)"
] | [
"0.66714954",
"0.5832328",
"0.5820055",
"0.54579884",
"0.5427718",
"0.54265505",
"0.5377508",
"0.5375877",
"0.5216904",
"0.5200265",
"0.5194074",
"0.51886547",
"0.51454943",
"0.50762826",
"0.50367916",
"0.50269526",
"0.50130564",
"0.50080115",
"0.498638",
"0.49777353",
"0.49717242",
"0.49679458",
"0.4958146",
"0.49454916",
"0.4935142",
"0.49246165",
"0.4923161",
"0.49140206",
"0.49071792",
"0.48961923"
] | 0.72537535 | 0 |
Map MXNet's asin operator attributes to onnx's asin operator and return the created node. | def convert_asin(node, **kwargs):
return create_basic_op_node('Asin', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)",
"def arcsin(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arcsin()))",
"def in_(self, other):\n if hasattr(other, 'cypher'):\n results = other.all()\n t = []\n for x in results:\n t.append(getattr(x, self.label))\n else:\n t = other\n return InClauseElement(self, t)",
"def __get_isin(instrument):\n return instrument['isin']",
"def in_(self, other: Any) -> ColumnOperators:\n return self.operate(in_op, other)",
"def inE(self, *labels, **kwargs):\r\n return self._simple_traversal('inE', labels, **kwargs)",
"def inE(self, *labels, **kwargs):\n return self._simple_traversal('inE', labels, **kwargs)",
"def asin(data):\n return _make.asin(data)",
"def toInfix(self):\n return _libsbml.Association_toInfix(self)",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node",
"def asin(self, a):\n return math.asin(a)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def singa_op_to_onnx_node(cls, op, op_t):\n optype = cls._get_singa_op_type(op)\n # wether the operator needs special handler\n if optype in cls._special_operators:\n translator = getattr(cls, cls._special_operators[optype])\n else:\n translator = cls._common_singa_tensor_to_onnx_node\n nodes = translator(op, op_t)\n if not isinstance(nodes, collections.Iterable):\n nodes = [nodes]\n nodes = [node for node in nodes if node is not None]\n return nodes",
"def _common_onnx_node_to_singa_op(cls, onnx_node, inputs, opset_version):\n onnx_op_type = onnx_node.op_type\n assert onnx_op_type in cls._rename_operators, \"not support operator: {}\".format(\n onnx_op_type)\n autograd_op = getattr(autograd, cls._rename_operators[onnx_op_type])\n return None, autograd_op",
"def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node",
"def asin(self, asin):\n\n self._asin = asin",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def inV(self, *labels, **kwargs):\r\n return self._simple_traversal('inV', labels, **kwargs)",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def convert_atan(node, **kwargs):\n return create_basic_op_node('Atan', node, kwargs)",
"def inV(self, *labels, **kwargs):\n return self._simple_traversal('inV', labels, **kwargs)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _create_cast(cls, onnx_node, inputs, opset_version):\n to = onnx_node.getattr(\"to\")\n # singa only supports float32 and int32\n map_dict = {\n TensorProto.FLOAT: tensor.float32, # FLOAT to float32\n TensorProto.UINT8: None, # UINT8\n TensorProto.INT8: tensor.int32, # INT8 to int32\n TensorProto.UINT16: None, # UINT16\n TensorProto.INT16: tensor.int32, # INT16 to int32\n TensorProto.INT32: tensor.int32, # INT32 to int32\n TensorProto.INT64: tensor.int32, # INT64 to int32\n TensorProto.STRING: None, # stirng\n TensorProto.BOOL: None, # bool\n }\n to = map_dict[to]\n assert to != None, \"not support cast type: {}\".format(to)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(to)",
"def asin(self, x):\n return self.arcsin(x)",
"def convert_sin(node, **kwargs):\n return create_basic_op_node('Sin', node, kwargs)",
"def mapping_to(\n self, node_name: str, input_name: str, fan_in_index: Optional[int] = None\n ) -> \"InputMapping\":\n check.str_param(node_name, \"node_name\")\n check.str_param(input_name, \"input_name\")\n check.opt_int_param(fan_in_index, \"fan_in_index\")\n\n return InputMapping(\n graph_input_name=self.name,\n mapped_node_name=node_name,\n mapped_node_input_name=input_name,\n fan_in_index=fan_in_index,\n graph_input_description=self.description,\n dagster_type=self.dagster_type,\n )",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def _create_elu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)",
"def toInfix(self, usingId=False):\n return _libsbml.FbcAssociation_toInfix(self, usingId)"
] | [
"0.5615745",
"0.5525026",
"0.5475919",
"0.5348382",
"0.52133733",
"0.512875",
"0.5086857",
"0.50501585",
"0.49973157",
"0.49967095",
"0.49640378",
"0.49598986",
"0.49447057",
"0.49313256",
"0.49242523",
"0.4889783",
"0.48892346",
"0.48825702",
"0.4869105",
"0.48690882",
"0.48423418",
"0.4827175",
"0.4776217",
"0.47752482",
"0.47452107",
"0.4714077",
"0.47048306",
"0.46999744",
"0.469538",
"0.46870238"
] | 0.68138564 | 0 |
Map MXNet's atan operator attributes to onnx's atan operator and return the created node. | def convert_atan(node, **kwargs):
return create_basic_op_node('Atan', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_acos(node, **kwargs):\n return create_basic_op_node('Acos', node, kwargs)",
"def atan (cls, x) :\n return Angle_R (math.atan (x))",
"def convert_tan(node, **kwargs):\n return create_basic_op_node('Tan', node, kwargs)",
"def convert_asin(node, **kwargs):\n return create_basic_op_node('Asin', node, kwargs)",
"def atan(self, a):\n return math.atan(a)",
"def atan(self, x):\n return self.arctan(x)",
"def arctan(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arctan()))",
"def atan(data):\n return _make.atan(data)",
"def atan(self):\r\n getcontext().prec += 2\r\n im1 = self.__class__(0, 1) * self\r\n arg = (1 - im1) / (1 + im1)\r\n ans = self.__class__(0, 0.5) * arg.ln()\r\n getcontext().prec -= 2\r\n return +ans",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def convert_tanh(node, **kwargs):\n return create_basic_op_node('Tanh', node, kwargs)",
"def atan(x):\n return 0.0",
"def to_axang(self) -> Tuple[np.ndarray, float]:\n denom = np.linalg.norm(self.v)\n angle = 2.0*np.arctan2(denom, self.w)\n axis = np.zeros(3) if angle==0.0 else self.v/denom\n return axis, angle",
"def arctan(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.arctan(obj.val)\n\t\tder = 1 / (1 + (obj.val) ** 2)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val,der)\n\telse:\n\t\treturn np.arctan(obj)",
"def atan2 (cls, y, x) :\n return Angle_R (math.atan2 (y, x))",
"def _angle_from_tan(\n axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool\n):\n\n i1, i2 = {\"X\": (2, 1), \"Y\": (0, 2), \"Z\": (1, 0)}[axis]\n if horizontal:\n i2, i1 = i1, i2\n even = (axis + other_axis) in [\"XY\", \"YZ\", \"ZX\"]\n if horizontal == even:\n return torch.atan2(data[..., i1], data[..., i2])\n if tait_bryan:\n return torch.atan2(-data[..., i2], data[..., i1])\n return torch.atan2(data[..., i2], -data[..., i1])",
"def Atn(num):\n return math.atan(float(num))",
"def _atand(v):\n return math.degrees(math.atan(v))",
"def arctan(x):\n raise NotImplementedError",
"def to_axang(self) -> Tuple[np.ndarray, float]:\n return self.to_axisangle()",
"def _atan2(y, x):\n tan = tf.atan(y / (x + 1e-8)) # this returns in -pi/2 .. pi/2\n\n one_map = tf.ones_like(tan)\n\n # correct quadrant error\n correction = tf.where(tf.less(x + 1e-8, 0.0), 3.141592653589793*one_map, 0.0*one_map)\n tan_c = tan + correction # this returns in -pi/2 .. 3pi/2\n\n # bring to positive values\n correction = tf.where(tf.less(tan_c, 0.0), 2*3.141592653589793*one_map, 0.0*one_map)\n tan_zero_2pi = tan_c + correction # this returns in 0 .. 2pi\n\n # make symmetric\n correction = tf.where(tf.greater(tan_zero_2pi, 3.141592653589793), -2*3.141592653589793*one_map, 0.0*one_map)\n tan_final = tan_zero_2pi + correction # this returns in -pi .. pi\n return tan_final",
"def point_to_node_azimuth(self, point, node=None, out=None):\n return point_to_point_azimuth(point, self._get_coord_at_node(node), out=out)",
"def antipode(self, element):\n return self(element.lift().antipode())",
"def tan(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tan()))",
"def arctanh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arctanh()))",
"def atan(val):\r\n if not isinstance(val, dec.Decimal):\r\n val = dec.Decimal(val)\r\n # atan(-x) = -atan(x)\r\n sgn = dec.Decimal(1).copy_sign(val)\r\n val = abs(val)\r\n pi_val = pi()\r\n context = getcontext()\r\n context.prec += 2\r\n if val == dec.Decimal('Infinity'):\r\n ans = (pi_val / 2).copy_sign(sgn)\r\n context.prec -= 2\r\n return +ans\r\n # atan(x) = pi/2 - atan(1/x)\r\n if val > 1:\r\n off = pi_val / 2\r\n val = 1 / val\r\n else:\r\n off = 0\r\n # atan(x) = atan(y) + atan((x - y) / (1 + x*y))\r\n if val > 0.5:\r\n at_hlf = atan_half()\r\n val = (val - dec.Decimal(0.5)) / (1 + val/2)\r\n else:\r\n at_hlf = 0\r\n num1 = 1\r\n num2 = val * val\r\n den1 = 3\r\n den2 = 1 + num2\r\n term = val / den2\r\n total = term\r\n while True:\r\n term *= 4 * num1 * num1 * num2 / den1 / (den1 - 1) / den2\r\n if term == 0 or term.logb() < total.logb() - context.prec:\r\n if total == 0 or abs(total).logb() < -context.prec:\r\n context.prec -= 2\r\n return sgn * dec.Decimal(0)\r\n total += at_hlf\r\n if off != 0:\r\n total = off - total \r\n context.prec -= 2\r\n return +(sgn * total)\r\n total += term\r\n num1 += 1\r\n den1 += 2",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def point_to_node_angle(self, point, node=None, out=None):\n return point_to_point_angle(point, self._get_coord_at_node(node), out=out)"
] | [
"0.65847087",
"0.6237189",
"0.6214043",
"0.60928404",
"0.6050979",
"0.5992298",
"0.5854942",
"0.5778332",
"0.5749585",
"0.5689177",
"0.56575197",
"0.5505356",
"0.5486537",
"0.5469867",
"0.54422086",
"0.5406414",
"0.5399801",
"0.5374126",
"0.5345535",
"0.5344025",
"0.53360784",
"0.5325639",
"0.5313096",
"0.53042626",
"0.52776736",
"0.52589",
"0.52575076",
"0.5252731",
"0.5249143",
"0.52324283"
] | 0.78846675 | 0 |
Map MXNet's sigmoid operator attributes to onnx's Sigmoid operator and return the created node. | def convert_sigmoid(node, **kwargs):
return create_basic_op_node('Sigmoid', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def _create_hardsigmoid(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.2)\n beta = onnx_node.getattr(\"beta\", 0.5)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha, beta)",
"def create_sigmoid(x, bias):\n\n return tf.nn.sigmoid(tf.nn.bias_add(x, bias))",
"def sigmoid(input, inplace=False):\n return FunctionLib.apply(\n 'Sigmoid', input.device, [input],\n outputs=[input if inplace else None])",
"def convert_logsigmoid(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.log(_op.tensor.sigmoid(x))\n g.add_node(op.output(\"Out\")[0], out)",
"def sigmoid(data):\n return _make.sigmoid(data)",
"def _tanh_to_sigmoid(x):\n return x * 0.5 + 0.5",
"def convert_hardsigmoid(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n alpha = float(attrs.get(\"alpha\", 0.2))\n beta = float(attrs.get(\"beta\", 0.5))\n\n node = onnx.helper.make_node(\n 'HardSigmoid',\n input_nodes,\n [name],\n alpha=alpha,\n beta=beta,\n name=name\n )\n return [node]",
"def activation_sigmoid_custom(self):\n self.value = 1 / (1 + np.e ** (-4.9 * self.value))",
"def convert_hard_sigmoid(g, op, block):\n\n slope = op.attr(\"slope\")\n offset = op.attr(\"offset\")\n x = g.get_node(op.input(\"X\")[0])\n out = x * _expr.const(slope) + _expr.const(offset)\n out = _op.clip(out, 0, 1)\n g.add_node(op.output(\"Out\")[0], out)",
"def sigmoid(module, x):\n _import_modules()\n if module in [np, ma]:\n return sp.special.sigmoid(x)\n elif module == torch:\n return module.sigmoid(x)\n elif module == jnp:\n return jax.nn.sigmoid(x)\n elif module == tf:\n return module.nn.sigmoid(x)\n raise UnknownModuleException(f\"Module {module.__name__} not supported.\")",
"def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_silu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.multiply(x, _op.sigmoid(x))\n g.add_node(op.output(\"Out\")[0], out)",
"def hardsigmoid(input, inplace=False):\n return FunctionLib.apply(\n 'HardSigmoid', input.device, [input],\n outputs=[input if inplace else None], alpha=1. / 6., beta=0.5)",
"def sigmoid_update_hid(self,x):\n \n sigmoid_activation = T.reshape(self.bhid, [self.num_hidden,1]) +\\\n T.dot(T.transpose(self.W),x)\n \n return T.nnet.sigmoid(sigmoid_activation)",
"def sigmoid_with_binary_xentropy(z):\n\treturn sigmoid(z)",
"def sigmoid(x):\n return 1 / (1 + exp(-x))",
"def sigmoid(x):\n return 1 / (1 + exp(-x))",
"def sigmoid(x, exponent):\n \n return 1/(1+np.exp(-exponent*x))-0.5",
"def sigmoid(x):\n return 1.0/(1.0+exp(-x))",
"def test_sigmoid(self):\n activation_name = 'Sigmoid'\n args = {}\n\n activation = activation_factory.create(activation_name, **args)\n self.assertEqual(activation._get_name(), activation_name)\n\n x = torch.empty(10)\n y = activation(x)\n assert_array_equal(y, torch.sigmoid(x))",
"def _tanh_sigmoid(tensor):\n return 2 * sigmoid(2 * tensor) - 1",
"def sigmoid(self, x):\n self.x = x\n output = 1 / (1 + np.exp(-x))\n return output",
"def sigmoid(x):\n return 1 / (1 + math.exp(-x))",
"def _sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(X,W,b):\n preActivation = np.dot(X, W) + b\n return (1.0)/(1.0 + np.exp(-preActivation))",
"def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))",
"def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))",
"def sigmoid(x):\n\treturn 1.0/(1.0+math.exp(-(x-0.5)*12.0))"
] | [
"0.7370613",
"0.7041682",
"0.6546022",
"0.648128",
"0.6382078",
"0.6377375",
"0.6302575",
"0.6283234",
"0.6230437",
"0.6167952",
"0.6156127",
"0.60304093",
"0.59833264",
"0.5979358",
"0.5970586",
"0.593866",
"0.59101063",
"0.5903408",
"0.5903408",
"0.5856696",
"0.58536077",
"0.58438665",
"0.58317214",
"0.5831614",
"0.58285683",
"0.5826949",
"0.58171135",
"0.579285",
"0.579285",
"0.5792262"
] | 0.7811297 | 0 |
Map MXNet's relu operator attributes to onnx's Relu operator and return the created node. | def convert_relu(node, **kwargs):
return create_basic_op_node('Relu', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def relu(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0.)",
"def relu6(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0., max_value=6.)",
"def relu(x, name):\n\n with tf.name_scope(name):\n outputs = tf.nn.relu(x)\n # Return layer's output\n return outputs",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def convert_leakyrelu(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n initializer = kwargs[\"initializer\"]\n\n act_type = attrs.get(\"act_type\", \"leaky\")\n alpha = float(attrs.get(\"slope\", 0.25))\n\n act_name = {\"elu\": \"Elu\", \"leaky\": \"LeakyRelu\", \"prelu\": \"PRelu\",\n \"selu\": \"Selu\"}\n\n reshape_val_name = 'reshape' + str(kwargs[\"idx\"])\n input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]\n\n reshape_value = np.array([1, -1, 1, 1], dtype='int64')\n dims = np.shape(reshape_value)\n\n shape_node = onnx.helper.make_tensor_value_info(reshape_val_name, input_type, dims)\n initializer.append(\n onnx.helper.make_tensor(\n name=reshape_val_name,\n data_type=input_type,\n dims=dims,\n vals=reshape_value,\n raw=False,\n )\n )\n\n slope_op_name = 'slope' + str(kwargs[\"idx\"])\n\n lr_node = []\n if act_type == \"prelu\" or act_type == \"selu\":\n reshape_slope_node = onnx.helper.make_node(\n 'Reshape',\n inputs=[input_nodes[1], reshape_val_name],\n outputs=[slope_op_name],\n name=slope_op_name\n )\n\n node = onnx.helper.make_node(\n act_name[act_type],\n inputs=[input_nodes[0], slope_op_name],\n outputs=[name],\n name=name)\n\n lr_node.append(shape_node)\n lr_node.append(reshape_slope_node)\n lr_node.append(node)\n else:\n node = onnx.helper.make_node(\n act_name[act_type],\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha)\n lr_node.append(node)\n return lr_node",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def _create_leakyrelu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.01)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def _relu(layer):\n return tf.nn.relu(layer)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def create_relu(x, bias):\n\n return tf.nn.relu(tf.nn.bias_add(x, bias))",
"def _create_elu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def lrelu(self):\n return self.add_layer(lrelu)",
"def convert_lrn(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n alpha = float(attrs.get(\"alpha\", 0.0001))\n beta = float(attrs.get(\"beta\", 0.75))\n bias = float(attrs.get(\"knorm\", 1.0))\n size = int(attrs.get(\"nsize\"))\n\n lrn_node = onnx.helper.make_node(\n \"LRN\",\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=size\n )\n\n return [lrn_node]",
"def convert_relu6(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.clip(x, 0.0, 6.0)\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def createRotoPaintNodeMI():\n return gr()",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def to_uml_json_node(**kwargs):\n return {\n \"id\": kwargs[\"id\"],\n \"ops\": [\n {\n \"op\": kwargs[\"op\"],\n \"name\": kwargs[\"name\"],\n \"path\": kwargs[\"path\"],\n \"metatype\": kwargs[\"metatype\"],\n \"stereotype\": kwargs[\"stereotype\"],\n \"attributes\": kwargs[\"attributes\"],\n }\n ],\n }",
"def getNode(self):\n node = Node.getNode(self)\n node.tag = 'relnode'\n return(node)",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def node_mapping(self):\n ...",
"def convert_elu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n alpha = op.attr(\"alpha\")\n alpha = _expr.const(-1.0 * alpha, dtype=dtype)\n out = alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(x)) + _op.nn.relu(x)\n g.add_node(op.output(\"Out\")[0], out)"
] | [
"0.6200267",
"0.60108554",
"0.57318294",
"0.57223237",
"0.55151415",
"0.5508822",
"0.55065703",
"0.5483933",
"0.5419298",
"0.5369124",
"0.53125054",
"0.52744555",
"0.52538085",
"0.5234616",
"0.5223587",
"0.52179486",
"0.51698667",
"0.51464564",
"0.51462084",
"0.51363164",
"0.5132095",
"0.51043993",
"0.5093787",
"0.5085155",
"0.50844693",
"0.50824434",
"0.5080686",
"0.5075576",
"0.5019208",
"0.501465"
] | 0.7071261 | 0 |
Map MXNet's Activation operator attributes to onnx's Tanh/Relu operator and return the created node. | def convert_activation(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
act_type = attrs["act_type"]
# Creating a dictionary here, but if this titlecase pattern
# mxnet_name.title()
act_types = {
"tanh": "Tanh",
"relu": "Relu",
"sigmoid": "Sigmoid",
"softrelu": "Softplus",
"softsign": "Softsign"
}
act_name = act_types.get(act_type)
if act_name:
node = onnx.helper.make_node(
act_name,
input_nodes,
[name],
name=name
)
else:
raise AttributeError(
"Activation %s not implemented or recognized in the converter" % act_type
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def build_activation(activation: str) -> nn.Module:\n if hasattr(nn, activation):\n return getattr(nn, activation)()\n elif activation == \"Swish\":\n return Swish()\n else:\n raise Exception(\"{} invalid activation function.\".format(activation))",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def get_activation_function(func_name):\n return {\n 'linear': lambda x: x,\n 'relu': lambda x: x * (x > 0),\n 'elu': lambda x: x * (x >= 0) + (T.exp(x) - 1) * (x < 0),\n 'softmax': T.nnet.softmax,\n 'tanh': T.tanh,\n 'log_softmax': log_softmax,\n 'sigmoid': T.nnet.sigmoid\n }[func_name]",
"def apply_activation(self, tens):\n if(self.activation == \"ReLU\"): # pylint: disable=no-else-return\n return tf.nn.relu(tens)\n elif(self.activation == \"Leaky_ReLU\"):\n return tf.nn.leaky_relu(tens)\n elif(self.activation == \"Tanh\"):\n return tf.nn.tanh(tens)\n elif(self.activation == \"Sigmoid\"):\n return tf.nn.sigmoid(tens)\n elif(self.activation == \"Linear\"):\n return tens\n else:\n raise InvalidActivationError(self.activation)",
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def activation(self):\n return self.__activation",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def output_layer_activation(x):\n return x",
"def add_activation(self, op, input_name, name=None, attr={}):\n attr['alpha'] = 1.0\n attr['beta'] = 1.0\n if 'op' == 'Selu':\n attr['alpha'] = 1.6732632423543772848170429916717\n attr['beta'] = 1.0507009873554804934193349852946\n\n return self._build_op(op, [input_name], name=name, attr=attr)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def get_activation_function(actfn):\n if actfn is None or actfn == 'leakyrelu':\n def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)\n elif actfn == 'gelu':\n def create_actfn(): return nn.GELU()\n elif actfn == 'relu':\n def create_actfn(): return nn.ReLU()\n elif actfn == 'swish' or actfn == 'silu':\n def create_actfn(): return nn.SiLU()\n else:\n raise Exception('Unknown activation function ' + str(actfn))\n return create_actfn",
"def activation_func(activation, inplace=False):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=inplace)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.2, inplace=inplace)],\n ['selu', nn.SELU(inplace=inplace)],\n ['none', nn.Identity()]\n ])[activation]",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def test_get_hyperflex_node_by_moid(self):\n pass",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def get_activation(self, activation_string):\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return self.gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)",
"def _new_learning_node(self, initial_stats=None, parent_node=None,\n is_active=True):\n if initial_stats is None:\n initial_stats = {}\n\n if is_active:\n return AdaActiveLearningNodeRegressor(initial_stats, parent_node,\n random_state=self.random_state)\n else:\n prediction_option = self.leaf_prediction\n if prediction_option == self._TARGET_MEAN:\n return InactiveLearningNodeMean\n else:\n return InactiveLearningNodePerceptron",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name",
"def get_activation(act):\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n return nn.LeakyReLU(0.1)\n elif act == 'identity':\n return IdentityActivation()\n elif act == 'elu':\n return ELU()\n elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:\n return nn.Activation(act)\n else:\n raise NotImplementedError\n else:\n return act",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def _activation(self,components,activation):\r\n \r\n if activation == \"ReLU\":\r\n components.append(nn.ReLU())\r\n elif activation == \"Sigmoid\":\r\n components.append(nn.Sigmoid())\r\n else:\r\n raise Exception(\"Invalid activation fn: \"+activation)"
] | [
"0.6525348",
"0.6170288",
"0.6057955",
"0.583563",
"0.5773526",
"0.5732154",
"0.55703044",
"0.5537313",
"0.54962003",
"0.5491791",
"0.54823685",
"0.548114",
"0.54719126",
"0.54137206",
"0.5379066",
"0.53318",
"0.5318398",
"0.53031856",
"0.5283017",
"0.52783245",
"0.5272741",
"0.52717596",
"0.5258059",
"0.5252894",
"0.52434903",
"0.52431625",
"0.5234899",
"0.5232821",
"0.5231174",
"0.52260196"
] | 0.6768296 | 0 |
Map MXNet's pad operator attributes to onnx's Pad operator and return the created node. | def convert_pad(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mxnet_pad_width = convert_string_to_list(attrs.get("pad_width"))
onnx_pad_width = transform_padding(mxnet_pad_width)
pad_mode = attrs.get("mode")
if pad_mode == "constant":
pad_value = float(attrs.get("constant_value")) \
if "constant_value" in attrs else 0.0
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode='constant',
value=pad_value,
pads=onnx_pad_width,
name=name
)
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n auto_pad = onnx_node.get_attribute_value('auto_pad')\n pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis\n kernel_shape = onnx_node.get_attribute_value('kernel_shape')\n\n # Attribute 'auto_pad' is deprecated, but is currently used by CNTK\n if auto_pad:\n if auto_pad == 'VALID':\n pads = [0, 0] * len(kernel_shape)\n\n else:\n # SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.\n # In case of odd number add the extra padding at the end for SAME_UPPER and at the\n # beginning for SAME_LOWER.\n def pad_value(kernel_dim): # type: (int) -> float\n return (kernel_dim - 1.0) / 2.0\n\n pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n ceil(pad_value(dim)) for dim in kernel_shape]\n pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n floor(pad_value(dim)) for dim in kernel_shape]\n pads = pads_starts + pads_ends\n\n verify_symmetric_padding(onnx_node, pads)\n\n pad_h, pad_w, pad_d = 0, 0, 0\n if pads and len(pads) == 2: # ONNX input axes NCHW\n pad_h, pad_w = pads\n if pads and len(pads) == 3: # ONNX input axes NCHWD\n pad_h, pad_w, pad_d = pads\n if pads and len(pads) == 4: # ONNX input axes NCHW\n pad_h, pad_w, _, _ = pads\n elif pads and len(pads) == 6: # ONNX input axes NCHWD\n pad_h, pad_w, pad_d, _, _, _ = pads\n\n return pad_h, pad_w, pad_d",
"def convert_padding(g, op, block):\n\n input_x = g.get_node(op.input(\"X\")[0])\n input_padding = op.input(\"Paddings\")\n if input_padding:\n padding = g.get_node(input_padding[0])\n padding = infer_value(padding, g.get_params()).numpy().tolist()\n else:\n padding = op.attr(\"paddings\")\n padding = op.attr(\"paddings\")\n value = op.attr(\"value\")\n data_format = op.attr(\"data_format\")\n mode = op.attr(\"mode\")\n assert mode != \"circular\", \"Don't support mod='circular' for PaddlePaddle's padding\"\n if mode == \"replicate\":\n mode = \"edge\"\n\n pad_len = len(padding)\n new_paddings = [0] * (pad_len + 4)\n for i in range(0, pad_len, 2):\n index = -1 - i\n if data_format[:2] != \"NC\":\n index = -3 - i\n new_paddings[index] = padding[i + 1]\n new_paddings[index - 1] = padding[i]\n\n new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]\n\n out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)\n g.add_node(op.output(\"Out\")[0], out)",
"def pad_pattern():\n pattern = is_op(\"nn.pad\")(wildcard(), is_constant())\n return pattern",
"def pad(self, *args, **kwargs):\n return _image.image_pad(self, *args, **kwargs)",
"def build(self):\n pad_size_tmp = list(self.pad_size)\n\n # This handles the case where the padding is equal to the image size\n if pad_size_tmp[0] == self.input_size[0]:\n pad_size_tmp[0] -= 1\n pad_size_tmp[1] -= 1\n if pad_size_tmp[2] == self.input_size[1]:\n pad_size_tmp[2] -= 1\n pad_size_tmp[3] -= 1\n # Pytorch expects its padding as [left, right, top, bottom]\n self.padding_module = ReflectionPad2d([pad_size_tmp[2], pad_size_tmp[3],\n pad_size_tmp[0], pad_size_tmp[1]])",
"def pad(input, pad, mode='constant', value=0):\n ndim = input.ndimension()\n pads_begin, pads_end = [0] * ndim, [0] * ndim\n for i in range(len(pad) // 2):\n pads_begin[ndim - 1 - i] = pad[i * 2]\n pads_end[ndim - 1 - i] = pad[i * 2 + 1]\n mode = {'constant': 'CONSTANT', 'reflect': 'REFLECT',\n 'replicate': 'EDGE', 'circular': 'EDGE'}[mode]\n return FunctionLib.apply(\n 'Pad', input.device, [input], mode=mode, value=float(value),\n ndim=ndim, pads=pads_begin + pads_end)",
"def pad(self):\n return self._pad",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def pad(self):\n return self.PAD",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def pad(self) -> dict:\n raise NotImplementedError",
"def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def __init__(self, padding, padding_mode, **kwargs):\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)",
"def pad(self, nxp, nyp):\n assert (nxp > self.nx)\n assert (nyp > self.ny)\n assert (np.mod(nxp - self.nx, 2) == 0)\n assert (np.mod(nyp - self.ny, 2) == 0)\n\n ret = rmap(nx=nxp, dx=self.dx, ny=nyp, dy=self.dy)\n ret.map[(nyp - self.ny) / 2:(nyp + self.ny) / 2, (nxp - self.nx) / 2:(\n nxp + self.nx) / 2] = self.map\n return ret",
"def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )",
"def pad_token(self):\r\n if self._pad_token is None:\r\n logger.error(\"Using pad_token, but it is not set yet.\")\r\n return self._pad_token",
"def pad_conv_pattern():\n pattern = is_op(\"nn.pad\")(wildcard(), is_constant())\n pattern = is_op(\"nn.conv2d\")(pattern, is_constant())\n pattern = pattern.optional(lambda x: is_op(\"nn.bias_add\")(x, is_constant()))\n pattern = pattern.optional(lambda x: is_op(\"add\")(x, is_constant()))\n pattern = pattern.optional(\n lambda x: is_tuple_get_item(\n is_op(\"nn.batch_norm\")(\n x, is_constant(), is_constant(), is_constant(), is_constant()\n )\n )\n )\n pattern = pattern.optional(is_op(\"nn.relu\"))\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern",
"def pad_model():\n\n inputs = tf.keras.Input(shape=(10, 10, 3,))\n x = tf.keras.layers.Conv2D(16, (1, 1))(inputs)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]))\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [1, 1]]))\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), constant_values=2)\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), mode='SYMMETRIC')\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"pad_model\")(x)\n return outputs",
"def __init__(self, pad_mask):\n self.nonpad_ids = None\n self.dim_origin = None\n\n with tf.name_scope(\"pad_reduce/get_ids\"):\n pad_mask = tf.reshape(pad_mask, [-1]) # Flatten the batch\n # nonpad_ids contains coordinates of zeros rows (as pad_mask is\n # float32, checking zero equality is done with |x| < epsilon, with\n # epsilon=1e-9 as standard, here pad_mask only contains positive values\n # so tf.abs would be redundant)\n self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))\n self.dim_origin = tf.shape(pad_mask)[:1]",
"def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c",
"def pad_img(image, label):\n paddings = [[2,2],[2,2],[0,0]]\n return tf.pad(image, paddings, mode=\"CONSTANT\", constant_values=0.0), label",
"def padid(self):\r\n return self.word2idx.get(PAD, 0)",
"def set_Pad(self, value):\n super(ImageInputSet, self)._set_input('Pad', value)",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def get_pad_info(self, index):\n if index == 0:\n return self.pad_token\n elif index == 1:\n return self.pad_id\n else:\n raise ValueError(\"Wrong index for get pad token information......\")",
"def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])"
] | [
"0.57889557",
"0.5579466",
"0.54904",
"0.5398082",
"0.52853227",
"0.5275017",
"0.5262226",
"0.52524114",
"0.51949155",
"0.51621807",
"0.5159108",
"0.508906",
"0.50823164",
"0.5075376",
"0.5003475",
"0.49345222",
"0.49055016",
"0.49049303",
"0.49036154",
"0.4897707",
"0.4888823",
"0.4874883",
"0.48440993",
"0.48412853",
"0.48209506",
"0.4817222",
"0.48054925",
"0.4769608",
"0.4737322",
"0.47355524"
] | 0.7317738 | 0 |
create extra tensor node from numpy values | def create_helper_tensor_node(input_vals, output_name, kwargs):
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[input_vals.dtype]
tensor_node = onnx.helper.make_tensor_value_info(
name=output_name,
elem_type=data_type,
shape=input_vals.shape
)
kwargs["initializer"].append(
onnx.helper.make_tensor(
name=output_name,
data_type=data_type,
dims=input_vals.shape,
vals=input_vals.flatten(),
raw=False,
)
)
return [tensor_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_constant(cls, onnx_node, inputs, opset_version):\n tmp_tensor = onnx_node.getattr('value')\n np_dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[tmp_tensor.data_type]\n np_tensor = np.frombuffer(tmp_tensor.raw_data, dtype=np_dtype)\n if np_tensor.dtype == \"int64\":\n np_tensor = np_tensor.astype(np.int32)\n # todo, we cannot support scalar tensor\n if np.ndim(np_tensor) == 0:\n np_tensor = np.array(np_tensor, ndmin=1)\n return None, np_tensor",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def create_helper_build_values_node(\n inputs, output_name,\n dtype, kwargs, axis=0\n ):\n values = []\n tensor_nodes = []\n for idx, inp in enumerate(inputs):\n if not isinstance(inp, (str, bytes)):\n inp, = create_helper_tensor_node(\n np.array([inp], dtype=dtype),\n output_name + \"__value\" + str(idx),\n kwargs\n )\n tensor_nodes.append(inp)\n inp = inp.name\n values.append(inp)\n concat_node, = create_helper_concat_node(values, output_name, axis=axis)\n return tensor_nodes + [concat_node,]",
"def _build_tensor(self, ndarray):\n\n ndarray = np.asarray(ndarray).astype(self.dtype)\n return tf1.placeholder_with_default(\n ndarray, shape=ndarray.shape if self.use_static_shape else None)",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def tensor(data, **context):\n raise NotImplementedError",
"def make_node(self, x, y, ilist):\r\n x_ = as_cuda_ndarray_variable(x)\r\n y_ = as_cuda_ndarray_variable(y)\r\n ilist_ = tensor.as_tensor_variable(ilist)\r\n\r\n convert_map = {8: tensor.basic._convert_to_int8,\r\n 16: tensor.basic._convert_to_int16,\r\n 32: tensor.basic._convert_to_int32,\r\n 64: tensor.basic._convert_to_int64\r\n }\r\n intwidth = theano.gof.compiledir.python_int_bitwidth()\r\n ilist_ = convert_map[intwidth](ilist_)\r\n\r\n assert x_.type.dtype == y_.type.dtype\r\n assert x_.type.ndim >= y_.type.ndim\r\n\r\n if ilist_.type.dtype[:3] not in ('int', 'uin'):\r\n raise TypeError('index must be integers')\r\n if ilist_.type.broadcastable != (False,):\r\n raise TypeError('index must be vector')\r\n if x_.type.ndim == 0:\r\n raise TypeError('cannot index into a scalar')\r\n if x_.type.broadcastable[0]:\r\n # the caller should have made a copy of x len(ilist) times\r\n raise TypeError('cannot index into a broadcastable dimension')\r\n\r\n return Apply(self, [x_, y_, ilist_], [x_.type()])",
"def _create_gather(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n node.input.append(op.name + \":indices\")\n return node",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def to_tensor(self): \n raise NotImplementedError",
"def _create_slice(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n for attr in ['starts', 'ends', 'axes', 'steps']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def tensor(*args, **kwargs):\n return Tensor(*args, **kwargs)",
"def _create_constantOfShape(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n tensor_type = onnx.TensorProto.FLOAT if isinstance(\n op.value, float) else onnx.TensorProto.INT32\n tensor_value = onnx.helper.make_tensor(\"value\", tensor_type, [1],\n [op.value])\n node.attribute.extend([\n helper.make_attribute('value', tensor_value),\n ])\n return node",
"def tt(ndarray):\n\n\tif not isinstance(ndarray, torch.Tensor):\n\n\t\tif not isinstance(ndarray, np.ndarray):\n\t\t\tndarray = np.array(ndarray)\n\n\t\tif torch.cuda.is_available():\n\t\t\tndarray = Variable(torch.from_numpy(ndarray).float().cuda(), requires_grad=False)\n\t\telse:\n\t\t\tndarray = Variable(torch.from_numpy(ndarray).float(), requires_grad=False)\n\n\treturn ndarray",
"def _from_numpy(array):\n return tf.constant(array)",
"def GraphFn(self, inp):\n tensor = inp * 2.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[1])\n tensor = tensor + 3.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[2])\n tensor = tensor * 4.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[3])\n tensor += tensor + 5.0\n return array_ops.identity(tensor, name='output_0')",
"def create_variable(arr, dtype='float32', device=None, requires_grad=True, backend='autograd'):\n args = {}\n if backend == 'autograd':\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = anp.array(arr, **args)\n elif backend == 'pytorch':\n if dtype is not None:\n args['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n if device is not None:\n args['device'] = device\n args['requires_grad'] = requires_grad\n var = tc.tensor(arr, **args)\n return var",
"def do_decode(self, value, decode_fn):\n del decode_fn\n tensor_proto = value.tensor_value\n tensor = constant(tensor_util.MakeNdarray(tensor_proto))\n return tensor",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def tn(value):\n\n\tif not isinstance(value, np.ndarray):\n\n\t\tif isinstance(value, torch.Tensor):\n\t\t\tvalue = value.detach()\n\t\t\tvalue = value.numpy()\n\n\t\telse:\n\t\t\tvalue = np.array(value)\n\n\treturn value",
"def make_node(self, x, y, ilist):\r\n x_ = as_gpuarray_variable(x)\r\n y_ = as_gpuarray_variable(y)\r\n ilist_ = as_gpuarray_variable(ilist)\r\n\r\n assert x_.type.dtype == y_.type.dtype\r\n assert x_.type.ndim >= y_.type.ndim\r\n\r\n if ilist_.type.dtype[:3] not in ('int', 'uin'):\r\n raise TypeError('index must be integers')\r\n if ilist_.type.broadcastable != (False,):\r\n raise TypeError('index must be vector')\r\n if x_.type.ndim == 0:\r\n raise TypeError('cannot index into a scalar')\r\n if x_.type.broadcastable[0]:\r\n # the caller should have made a copy of x len(ilist) times\r\n raise TypeError('cannot index into a broadcastable dimension')\r\n\r\n return gof.Apply(self, [x_, y_, ilist_], [x_.type()])",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _build_tensor(args):\n cells,iptr,indx,data,tid,nproc,selected,log1p = args\n\n for i in range(0+tid,len(cells),nproc):\n index = selected[i]\n sidx = iptr[index]\n # find the number of gene entries for\n # this cell\n if index < len(iptr) - 1:\n # find length to next cell\n nentries = iptr[index + 1] - sidx \n else:\n # find length to the end\n nentries = len(data) - sidx\n\n for j in range(0,nentries):\n if log1p:\n cells[i][indx[sidx+j]] = np.log(1+(data[sidx+j]))\n else:\n cells[i][indx[sidx+j]] = (data[sidx+j])\n #cells[i][indx[sidx+j]] = float(data[sidx+j])",
"def _add_node(self, input_tensors, output_tensors):\n raise NotImplementedError",
"def create_nodes(self):",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node"
] | [
"0.6670378",
"0.6648633",
"0.63642025",
"0.62701637",
"0.6126209",
"0.61022186",
"0.6095189",
"0.60797447",
"0.6028599",
"0.6016147",
"0.6001126",
"0.59771246",
"0.59069496",
"0.5865564",
"0.5849665",
"0.58315593",
"0.5821287",
"0.57957333",
"0.57812244",
"0.57678527",
"0.572931",
"0.57270825",
"0.5725941",
"0.56967825",
"0.56738436",
"0.56627834",
"0.5650216",
"0.564409",
"0.5614921",
"0.5614076"
] | 0.68433195 | 0 |
create extra reshape node with static shape | def create_helper_reshape_node(input_name, output_name, shape, kwargs):
shape_tensor_node, = create_helper_tensor_node(
np.asarray(shape, dtype=np.int64), output_name + "__shape", kwargs
)
reshape_node = onnx.helper.make_node(
"Reshape",
inputs=[input_name, shape_tensor_node.name],
outputs=[output_name],
name=output_name
)
return [shape_tensor_node, reshape_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n return node",
"def convert_reshape(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n output_shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(output_shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"reshape_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=output_shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n\n not_supported_shape = [-2, -3, -4]\n\n for val in output_shape_list:\n if val in not_supported_shape:\n raise AttributeError(\"Reshape: Shape value not supported in ONNX\", val)\n\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, reshape_node]",
"def _create_reshape(cls, onnx_node, inputs, opset_version):\n shape = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(shape)",
"def convert_reshape(g, op, block):\n\n input_shape = op.input(\"Shape\")\n input_shape_tensor = op.input(\"ShapeTensor\")\n data = g.get_node(op.input(\"X\")[0])\n if input_shape:\n new_shape = g.get_node(input_shape[0])\n elif input_shape_tensor:\n new_shape = []\n for shape_name in input_shape_tensor:\n shape = g.get_node(shape_name)\n if len(infer_shape(shape)) == 0:\n shape = _op.reshape(shape, [-1])\n new_shape.append(shape)\n new_shape = _op.concatenate(new_shape, axis=0)\n new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())\n if infered:\n new_shape = new_shape.tolist()\n else:\n new_shape = op.attr(\"shape\")\n out = _op.reshape(data, new_shape)\n g.add_node(op.output(\"Out\")[0], out)",
"def reshape(tensor, newshape):\n raise NotImplementedError",
"def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = nest.flatten(target_shape)",
"def _special_handle_reshape(cls, op, X, W):\n node_name = op.name + \":shape\"\n return [\n numpy_helper.from_array(np.array(op.shape, dtype=np.int64),\n node_name)\n ]",
"def reshape(self, new_shape):\n return self.__class__(pos=self.pos.reshape(new_shape),\n vel=self.vel.reshape(new_shape),\n frame=self.frame)",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def add_reshape(self, input_name, shape, name=None, attr={}):\n return self._build_op('Reshape', [input_name, shape], name=name)",
"def _reshape_function(image, label):\n# image = tf.expand_dims(image, axis=0)\n image = tf.expand_dims(image, axis=-1)\n return image, label",
"def reshape(self, *shape):\n newTensor = super(MKLTensor, self).reshape(*shape)\n newTensor.set_mkl(self)\n return newTensor",
"def __init__(self, incoming, shape, name='ReshapeLayer'):\n super(ReshapeLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.shape = shape\n self.out = tf.zeros(self.get_output_shape())\n self.name = name",
"def add_input_and_output_shape(self, input_shape, output_shape):",
"def test_jax_Reshape_concrete_shape():\n a = vector(\"a\")\n x = reshape(a, a.shape)\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])\n\n x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2))\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_shape(g, op, block):\n\n x = g.get_node(op.input(\"Input\")[0])\n out = shape_of(x, dtype=\"int32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def reshape(x, shape):\n return Reshape(shape)(x)",
"def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)",
"def convert_unsqueeze(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axes = sorted(op.attr(\"axes\"))\n for axis in axes:\n x = _op.expand_dims(x, axis=axis, num_newaxis=1)\n g.add_node(op.output(\"Out\")[0], x)",
"def reshape(self, *shape):\n return F.Reshape.apply(self, shape)",
"def reshape_output_shape_0(input_shape): \n shape_1 = input_shape[0]\n shape_2 = input_shape[1]\n shape_3 = input_shape[2]\n return(shape_1, shape_2, shape_3, 1)",
"def convert_expand_dims(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n input_nodes,\n [name],\n axes=[axis],\n name=name,\n )\n return [node]",
"def reshape(self, *shape):\n return Signal(self._initial_value.reshape(*shape),\n name=\"%s.reshape(%s)\" % (self.name, shape),\n base=self.base)",
"def convert_shape(node, **kwargs):\n return create_basic_op_node('Shape', node, kwargs)",
"def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\")\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes)",
"def ashape(node):\n shp = node.shape\n assert shp is not None\n return shp",
"def local_scalar_reshape(node):\r\n if isinstance(node.op, T.Reshape):\r\n x, shp = node.inputs\r\n if x.ndim == 0 and T.get_vector_length(shp) == 0:\r\n return [x]",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def translate_shape(shape, x_shift, y_shift):"
] | [
"0.78215057",
"0.7107456",
"0.69461006",
"0.67939496",
"0.6732763",
"0.6498097",
"0.6490464",
"0.6484981",
"0.6472887",
"0.6448186",
"0.6354322",
"0.6187885",
"0.61778134",
"0.6151803",
"0.613624",
"0.61251277",
"0.6119176",
"0.61077476",
"0.6069696",
"0.60536623",
"0.60507816",
"0.60202926",
"0.5965346",
"0.59591776",
"0.5939819",
"0.5903656",
"0.5901582",
"0.58935237",
"0.58922136",
"0.5838157"
] | 0.736639 | 1 |
create extra concat node | def create_helper_concat_node(inputs, output_name, axis=0):
concat_node = onnx.helper.make_node(
"Concat",
inputs=inputs,
outputs=[output_name],
name=output_name,
axis=axis,
)
return [concat_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"dim\", 1))\n concat_node = onnx.helper.make_node(\n \"Concat\",\n input_nodes,\n [name],\n axis=axis,\n name=name\n )\n return [concat_node]",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def concatenate_data():",
"def _rewrite_concat(self, node: saldag.Concat):\n\n if node.requires_mpc():\n node.is_mpc = True\n if len(node.children) > 1 and node.is_boundary():\n fork_node(node)",
"def anchor():\n return 'concat'",
"def _rewrite_concat(self, node: saldag.Concat):\n\n if node.is_lower_boundary():\n\n out_stored_with = node.out_rel.stored_with\n for par in node.parents:\n if not par.is_root():\n par.out_rel.stored_with = copy.copy(out_stored_with)\n node.is_mpc = False",
"def _create_concat(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.attrs[\"axis\"]\n if factor < 0:\n factor = len(inputs[0].shape\n ) + factor # in order to support the negative axis\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def _rewrite_concat(self, node: saldag.Concat):\n\n assert (not node.is_lower_boundary())\n\n out_stored_with = node.out_rel.stored_with\n ordered_pars = node.get_sorted_parents()\n for parent in ordered_pars:\n par_stored_with = parent.out_rel.stored_with\n if par_stored_with != out_stored_with:\n out_rel = copy.deepcopy(parent.out_rel)\n out_rel.rename(out_rel.name + \"_close\")\n out_rel.stored_with = copy.copy(out_stored_with)\n # create and insert close node\n store_op = saldag.Close(out_rel, None)\n store_op.is_mpc = True\n saldag.insert_between(parent, node, store_op)",
"def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)",
"def __init__(self):\r\n super(AppendNode, self).__init__()",
"def append(planNode):\n description = \"append operation\"\n return description",
"def _rewrite_concat(self, node: saldag.Concat):\n\n # Copy over columns from existing relation\n out_rel_cols = node.out_rel.columns\n\n # Combine per-column collusion sets\n for idx, col in enumerate(out_rel_cols):\n columns_at_idx = [in_rel.columns[idx] for in_rel in node.get_in_rels()]\n col.coll_sets = utils.coll_sets_from_columns(columns_at_idx)",
"def ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)",
"def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())",
"def concat_immediate(self, other: \"Linked[T]\") -> None:\n self.forward.concat(other)",
"def concat(inp):\n if(type(inp) == tuple):\n return\n if(inp.getName() == '&'):\n if(inp.getFirst().getName() == 'tt' and inp.getSec() is not None):\n inp.setName(inp.getSec().getName())\n inp.setFirst(inp.getSec().getFirst())\n inp.setSec(inp.getSec().getSec())\n if(inp.getSec() is None):\n return\n if(inp.getSec().getName() == 'tt' and inp.getFirst() is not None):\n inp.setName(inp.getFirst().getName())\n if(inp.getName() in doubles or inp.getName() in singles):\n inp.setFirst(inp.getFirst().getFirst())\n inp.setSec(inp.getFirst().getSec())\n else:\n inp.setAtom()",
"def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def concat(seq1, seq2):\n if type_tag(seq1) == type_tag(seq2):\n return seq1 + seq2\n else:\n types = (type_tag(seq1), type_tag(seq2))\n if types in concat.adders:\n return concat.adders[types](seq1, seq2)",
"def concat(a, b):\n return torch.cat((a, b), 1)",
"def testAppendAdditional(self):\n\n self.node.desc = 'first description'\n\n self.assertEqual(\n ['first description', ],\n self.node.desc\n )\n\n self.node.desc = 'second description'\n\n self.assertEqual(\n ['first description', 'second description'],\n self.node.desc\n )",
"def assemble_col(c1, c2):\n c1.extend(c2)\n return c1",
"def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)",
"def concat(self, other):\n assert isinstance(other, Tuple)\n return Tuple(self.spaces + other.spaces)",
"def q_append_leaf(node, q):\n for i in range(q): node.addkid(Node(\"*\"))",
"def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val",
"def concat_pattern():\n pattern = is_tuple(None)\n pattern = is_op(\"concatenate\")(pattern)\n\n return pattern",
"def concatenate(self, other: \"CFG\") -> \"CFG\":\n start_temp = Variable(\"#STARTCONC#\")\n temp_0 = Terminal(\"#0CONC#\")\n temp_1 = Terminal(\"#1CONC#\")\n production0 = Production(start_temp, [temp_0, temp_1])\n cfg_temp = CFG({start_temp},\n {temp_0, temp_1},\n start_temp,\n {production0})\n return cfg_temp.substitute({temp_0: self,\n temp_1: other})"
] | [
"0.7233822",
"0.70961404",
"0.6707305",
"0.6454889",
"0.64343643",
"0.6412976",
"0.6336564",
"0.63213706",
"0.60288745",
"0.60278517",
"0.59759504",
"0.58231926",
"0.57735884",
"0.57064074",
"0.5681187",
"0.565455",
"0.5654235",
"0.5651357",
"0.5630985",
"0.5547733",
"0.55437636",
"0.5523315",
"0.55146444",
"0.5507128",
"0.5506724",
"0.53741807",
"0.53531706",
"0.53277117",
"0.53121245",
"0.52949035"
] | 0.7367524 | 0 |
create extra expand node | def create_helper_expand_node(input_name, output_name, expand_shape):
expand_node = onnx.helper.make_node(
"Expand",
inputs=[input_name, expand_shape],
outputs=[output_name],
name=output_name,
)
return [expand_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutate_expand_node(\n child, node=None, pb_en_out_link=config.MUTPB_EN_OUT_LINK):\n # TODO: can maybe be improved by sparqling\n if not node:\n nodes = list(child.nodes)\n node = random.choice(nodes)\n new_triple, _, _ = _mutate_expand_node_helper(node, pb_en_out_link)\n return child + (new_triple,)",
"def expand(self, node):\n node.expand(self.selPolicy)",
"def _expand_cldr(self):\n# global cldr\n self.tree.item('cldr', open=True, \\\n values=[self._count_children('cldr'), ''])",
"def setExpanded(self):",
"def expand_children(self, parent):\n pass",
"def _expand_node(expand_n, base_cost, randomizer):\n\n for next_n, props in nb[expand_n].items():\n randomizer -= 1\n total_cost = props['weight'] + base_cost\n e_cost = (total_cost, props['weight'], randomizer)\n\n # Check for tree membership as this signifies a loop back to the tree\n if next_n not in scanned or e_cost < scanned[next_n] and not tree.has_node(next_n):\n heappush(queue, (e_cost[0], e_cost[1], e_cost[2], next_n))\n scanned[next_n] = e_cost\n p[next_n] = expand_n",
"def __expandNodes(self, node):\n for childNode in node.children():\n if childNode.expanded:\n idx = self.__bookmarksModel.nodeIndex(childNode)\n idx = self.__proxyModel.mapFromSource(idx)\n self.bookmarksTree.setExpanded(idx, True)\n self.__expandNodes(childNode)",
"def _expand_node(self, node, dependency_tree, is_verb_node=False):\n expanded_node = [(node[\"address\"], node[\"word\"])]\n\n for dependency in node[\"deps\"]:\n if dependency == \"rel\":\n continue\n\n # Ignore noun and object phrases\n if is_verb_node and dependency in (\"nsubj\", \"dobj\"):\n continue\n\n for address in node[\"deps\"][dependency]:\n expanded_node.extend(self._expand_node(dependency_tree[\"nodes\"][address], dependency_tree, is_verb_node))\n\n return expanded_node",
"def show_expanded(tree, expand_macros, **kw):\n new_tree = hq[wrap_simple(\n unhygienic[log], u[macropy.core.unparse(tree)],\n ast_literal[tree])]\n return new_tree",
"def expand(node):\n if node.isTerminal():\n return node\n\n # Get the next unexplored state\n nextState = node.exploreChildNode()\n\n # If all states are already explored, recurse\n if nextState is not None:\n return nextState\n else:\n return expand(node.UCB1())",
"def expansion(self, actions):\n for action in actions: \n self.children[action[0]] = TreeNode()",
"def convert_expand(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n if op.input(\"Shape\"):\n sizes = g.get_node(op.input(\"Shape\")[0])\n else:\n sizes = op.attr(\"shape\")\n\n if isinstance(sizes, _expr.Expr):\n sizes = try_infer_value(sizes, parameters=g.get_params())[0]\n\n if isinstance(sizes, np.ndarray):\n sizes = sizes.tolist()\n\n out = _op.broadcast_to(x, sizes)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)",
"def _mutate_expand_node_helper(node, pb_en_out_link=config.MUTPB_EN_OUT_LINK):\n var_edge = gen_random_var()\n var_node = gen_random_var()\n if random.random() < pb_en_out_link:\n new_triple = (node, var_edge, var_node)\n else:\n new_triple = (var_node, var_edge, node)\n return new_triple, var_node, var_edge",
"def _expand_approved(self):\n# global approved\n self.tree.item('approved', open=True, \\\n values=[self._count_children('approved'), ''])",
"def expand(self):\n self._express = self._express.expand()\n self._del_derived()\n return self",
"def _expand_unknown(self):\n# global unknown\n self.tree.item('unknown', open=False, \\\n values=[self._count_children('unknown'), ''])",
"def set_expand(self, expand):\n\n self.props[\"expand\"] = expand",
"def addChild(node):",
"def expand(self, legal_deletions, c):\n for legal in legal_deletions:\n move = legal[1]\n\n # create a (move:ELLS-node) item for all child nodes.\n self.children[move] = ELLS_node(delete(self.state, move),\n move,\n legal[0],\n parent=self,\n c=c)\n self.isExpanded = True",
"def q_append_leaf(node, q):\n for i in range(q): node.addkid(Node(\"*\"))",
"def expand(self, policy):\n if self.children != {}: return\n actionWeights = policy(self.state)\n for action in actionWeights:\n succ = self.state.succ(self.state.player, action)\n self.children[action] = TreeNode(succ, actionWeights[action], self)",
"def expand(self):\n self.vertices[-1, :] = self.expanded",
"def expand(self):\n # distribution = self.net.predict_distribution(self.pos)\n self.children = []\n for c in self.pos.moves():\n pos2 = self.pos.move(c)\n # 如果存在斩杀,children应为空值(即表面以结束游戏?)\n if pos2 is int:\n self.children = [Treenode(self.net, pos2, c)]\n node = Treenode(self.net, pos2, move=c)\n node.v += 1\n tree_update([self, node], node.pos.simulate(self.net))\n self.children.append(node)",
"def nexpand(self):\n return self._Dstar.nexpand",
"def expandPhEDExNode(target):\n while True:\n report, node = (yield)\n sentPhedex = False\n for subnode in node.children:\n if subnode.name == \"phedex\":\n target.send((report, subnode))\n sentPhedex = True\n if not sentPhedex:\n target.send((report, node))",
"def _generateExpandedEOCs(self, obj, **args):\n return []",
"def expand(self, *args, **kwargs):\n\t\tif hasattr(self.parent, \"queriedTable\"):\n\t\t\treturn self.parent.queriedTable.expand(*args, **kwargs)\n\t\telse:\n\t\t\treturn self.parent.rd.expand(*args, **kwargs)",
"def addnode(self, parent, tag, **kw):\n kw = {k: v for k, v in kw.items() if v is not None}\n return et.SubElement(parent, tag, **kw)",
"def add_node(self, node):"
] | [
"0.6341064",
"0.6288607",
"0.62806153",
"0.6086077",
"0.5995333",
"0.5976243",
"0.59042525",
"0.58790493",
"0.58487433",
"0.5824369",
"0.5777672",
"0.57740724",
"0.5769714",
"0.5760391",
"0.5726708",
"0.5703688",
"0.56856406",
"0.5679697",
"0.5621045",
"0.5600409",
"0.5491694",
"0.5466531",
"0.54559875",
"0.54353535",
"0.5433097",
"0.54277223",
"0.5422331",
"0.5390922",
"0.5381784",
"0.5374303"
] | 0.7317582 | 0 |
create extra shape node for specified input node | def create_helper_shape_node(input_name, output_name):
shape_node = onnx.helper.make_node(
"Shape",
inputs=[input_name],
outputs=[output_name],
name=output_name,
)
return [shape_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_shape(node, **kwargs):\n return create_basic_op_node('Shape', node, kwargs)",
"def add_input_and_output_shape(self, input_shape, output_shape):",
"def ashape(node):\n shp = node.shape\n assert shp is not None\n return shp",
"def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n return node",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def add_shape(self, input_name, attr=None, name=None):\n if attr is None:\n attr = {}\n return self._build_op('Shape', [input_name], attr=attr, name=name)",
"def convert_shape(g, op, block):\n\n x = g.get_node(op.input(\"Input\")[0])\n out = shape_of(x, dtype=\"int32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def create_helper_expand_node(input_name, output_name, expand_shape):\n expand_node = onnx.helper.make_node(\n \"Expand\",\n inputs=[input_name, expand_shape],\n outputs=[output_name],\n name=output_name,\n )\n return [expand_node]",
"def create_helper_reshape_node(input_name, output_name, shape, kwargs):\n shape_tensor_node, = create_helper_tensor_node(\n np.asarray(shape, dtype=np.int64), output_name + \"__shape\", kwargs\n )\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[input_name, shape_tensor_node.name],\n outputs=[output_name],\n name=output_name\n )\n\n return [shape_tensor_node, reshape_node]",
"def add_shape(self, input_name, name=None):\n return self._build_op('Shape', [input_name], name=name)",
"def _create_node(\n self,\n name,\n ):\n pass",
"def add_node(graph, node_name, label, shape='record', style='filled', fillcolor='lightgrey'):\n node = Node(name=node_name, shape=shape, style=style, fillcolor=fillcolor, label=label)\n graph.add_node(node)\n return node",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def add_node(self, node):",
"def _create_constantOfShape(cls, onnx_node, inputs, opset_version):\n value = onnx_node.getattr(\"value\", 0)\n if isinstance(value, onnx.TensorProto):\n value = numpy_helper.to_array(value)[0].item()\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(value)",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def build_graph_from_input(self, input_node):\n raise NotImplementedError",
"def _create_constantOfShape(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n tensor_type = onnx.TensorProto.FLOAT if isinstance(\n op.value, float) else onnx.TensorProto.INT32\n tensor_value = onnx.helper.make_tensor(\"value\", tensor_type, [1],\n [op.value])\n node.attribute.extend([\n helper.make_attribute('value', tensor_value),\n ])\n return node",
"def addToNode(self, refnode):\n ele = inkex.etree.Element('{http://www.w3.org/2000/svg}'+self.type)\n\n ele.set('cx',str(self.center[0]))\n ele.set('cy',str(self.center[1]))\n if self.rmax:\n ele.set('ry',str(self.radius))\n ele.set('rx',str(self.rmax))\n ele.set('transform', 'rotate(%3.2f,%f,%f)'%(numpy.degrees(self.angle),self.center[0],self.center[1]))\n else:\n ele.set('r',str(self.radius))\n refnode.xpath('..')[0].append(ele)\n return ele",
"def __init__(self, shape, input_var=None):\n\n self.output = layers.InputLayer(shape, input_var=input_var)",
"def create_new_node(subgraph, prev_node, label, bb):\n return add_node(subgraph, update_node_name(prev_node.get_name(), bb-1), label=update_bb_string(label, bb-1))",
"def new_node(name):\n\n return name, []",
"def create_nodes(self):",
"def write_node_shp(self,shpname,extra_fields=[]):\n assert len(extra_fields)==0 # not yet supported!\n\n # zero-based index of node (why does write_edge_shp create 1-based ids?)\n base_dtype = [('node_id',np.int32)]\n\n node_geoms=[geometry.Point( self.nodes['x'][i] )\n for i in self.valid_node_iter() ]\n\n node_data=self.nodes[~self.nodes['deleted']].copy()\n\n # don't need to write all of the original fields out:\n node_data=utils.recarray_del_fields(node_data,['x','deleted'])\n\n wkb2shp.wkb2shp(shpname,input_wkbs=node_geoms,fields=node_data,\n overwrite=True)",
"def add_node(graph, node, parent, label):\n neg = node['neg']\n pos = node['pos']\n total = str(neg + pos)\n neg = str(neg)\n pos = str(pos)\n samples_info = total + ' samples\\n' + neg + ' of class 0, ' + pos + ' of class 1'\n if 'final_class' in node:\n legend = str(node['id']) + '. final class is ' + str(node['final_class'])\n new_node = pydot.Node(legend)\n else:\n legend = str(node['id']) + '. ' + node['split_attr'] + \\\n ' < ' + str(node['split_value']) + '\\n' + samples_info\n new_node = pydot.Node(legend)\n graph.add_node(new_node)\n if parent:\n graph.add_edge(pydot.Edge(parent, new_node, label=str(label),labelfontcolor=\"#009933\", fontsize=\"10.0\", color=\"blue\"))\n if 'left_child' in node:\n add_node(graph, node['left_child'], new_node, True)\n if 'right_child' in node:\n add_node(graph, node['right_child'], new_node, False)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def add_node (self, node):\n raise NotImplementedError",
"def define_path(self, node): \n if node.childrens!=[]:\n for child in node.childrens:\n node_child = child['node']\n node_child.times_used+=1\n self.define_path(node_child)\n \n \n #take care of not used nodes, set their gradient to 0\n for node in self.input_node:\n if node.times_used==0:\n node.gradient=np.zeros((node.output_dim, self.output_node.output_dim))",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node",
"def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node"
] | [
"0.6897857",
"0.6676977",
"0.65623456",
"0.6395802",
"0.6266121",
"0.6219381",
"0.6160772",
"0.6141284",
"0.6076785",
"0.60708976",
"0.60563433",
"0.6043666",
"0.6030497",
"0.59740275",
"0.59445137",
"0.5934724",
"0.5927032",
"0.5918327",
"0.59053826",
"0.58898586",
"0.5859484",
"0.58504",
"0.58375406",
"0.5787492",
"0.5777548",
"0.5769806",
"0.5761656",
"0.5738349",
"0.57169104",
"0.56873035"
] | 0.7563421 | 0 |
Map MXNet's dot operator attributes to onnx's MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes. | def convert_dot(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
input_node_a = input_nodes[0]
input_node_b = input_nodes[1]
trans_a_node = None
trans_b_node = None
trans_a = get_boolean_attribute_value(attrs, "transpose_a")
trans_b = get_boolean_attribute_value(attrs, "transpose_b")
op_name = "transpose" + str(kwargs["idx"])
if trans_a:
input_node_a = op_name + "_a"
trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a)
if trans_b:
input_node_b = op_name + "_b"
trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=[input_node_a, input_node_b],
outputs=[name],
name=name
)
if not trans_a and not trans_b:
return [matmul_node]
elif trans_a and not trans_b:
return [trans_a_node, matmul_node]
elif trans_b and not trans_a:
return [trans_b_node, matmul_node]
else:
return [trans_a_node, trans_b_node, matmul_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transpose_dot(self, other):\n from divisi2 import operators\n return operators.transpose_dot(self, other)",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]",
"def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)",
"def _dot(self, s1, s2, tf_embs):\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n return tf.matmul(mat1, tf.transpose(mat2))",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node",
"def TensorDot(self, axes):\r\n return lambda a, b: tensordot(a, b, axes)",
"def convert_matmul(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[0]), g.get_node(op.input(\"Y\")[0])]\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if op.has_attr(\"trans_x\"):\n # for matmul_v2\n trans_x = op.attr(\"trans_x\")\n trans_y = op.attr(\"trans_y\")\n else:\n # for matmul\n trans_x = op.attr(\"transpose_X\")\n trans_y = op.attr(\"transpose_Y\")\n if trans_x:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n inputs[0] = _op.transpose(inputs[0], axes=perm)\n if trans_y:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n inputs[1] = _op.transpose(inputs[1], axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(inputs[0], dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1], dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(inputs[0], a_shape, 3)\n b = flatten_to_nd(inputs[1], b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n inputs[1] = _op.expand_dims(inputs[1], 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n out = _op.nn.dense(inputs[0], input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if op.has_attr(\"alpha\"):\n alpha = op.attr(\"alpha\")\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha).astype(\"float32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval",
"def dot(a, b):\n return np.vdot(a.arr,b.arr)",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node",
"def dot(input, other, allow_tf32=True, out_dtype=float32, _builder=None):\n allow_tf32 = _constexpr_to_value(allow_tf32)\n out_dtype = _constexpr_to_value(out_dtype)\n return semantic.dot(input, other, allow_tf32, out_dtype, _builder)",
"def test_dot_mm(self):\n self.check_dot_mm(dot2, dot3, \"np.dot()\")",
"def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t",
"def test_matmul_vm(self):\n self.check_dot_vm(matmul_usecase, None, \"'@'\")",
"def local_lift_transpose_through_dot(node):\r\n if not (isinstance(node.op, T.DimShuffle)\r\n and node.op.new_order == (1, 0)):\r\n return False\r\n if not (node.inputs[0].owner\r\n and isinstance(node.inputs[0].owner.op, T.Dot)):\r\n return False\r\n x, y = node.inputs[0].owner.inputs\r\n\r\n if x.ndim == y.ndim == 2:\r\n return [T.dot(y.T, x.T)]",
"def test_matmul_mm(self):\n self.check_dot_mm(matmul_usecase, None, \"'@'\")",
"def bprop_matmul(self):\n ta = self.transpose_a\n tb = self.transpose_b\n mul1 = P.MatMul(transpose_a=(ta and tb),\n transpose_b=(ta or (not tb)))\n mul2 = P.MatMul(transpose_a=((not ta) or tb),\n transpose_b=(ta and tb))\n\n def bprop(x, w, out, dout):\n if ta:\n dx = mul1(w, dout)\n else:\n dx = mul1(dout, w)\n if tb:\n dw = mul2(dout, x)\n else:\n dw = mul2(x, dout)\n return dx, dw\n return bprop",
"def add_matmul(self, input_names, name=None, attr={}):\n return self._build_op('MatMul', input_names, name=name, attr=attr)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def convert_dot(g, op, block):\n\n # x, y should be 1D or 2D tensor\n # when it's 2D tensor, the first dimension means batch dimension\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n\n out = _op.sum(_op.multiply(x, y), axis=[-1], keepdims=True)\n g.add_node(op.output(\"Out\")[0], out)",
"def _mdot_r(a, b):\r\n if type(a) == types.TupleType:\r\n if len(a) > 1:\r\n a = mdot(*a)\r\n else:\r\n a = a[0]\r\n if type(b) == types.TupleType:\r\n if len(b) > 1:\r\n b = mdot(*b)\r\n else:\r\n b = b[0]\r\n return np.dot(a, b)",
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def dot(x, y):\n if isinstance(x, tf.SparseTensor) and isinstance(y, tf.SparseTensor):\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x,y)\n return res",
"def dot(pepx1, pepx2):\n\n Ls = pepx1.shape\n assert pepx2.shape==Ls, '[dot]: sizes of pepx1 and pepx2 are not equal'\n new_pepx = np.empty(Ls, dtype=np.object)\n new_lams = np.empty(pepx1.lambdas.shape, dtype=np.object)\n\n # if np.all([ pepx1[i].ndim==3 and pepx2[i].ndim==3 for i in np.ndenumerate(pepx1) ]):\n # return peps_dot(pepx1,pepx2)\n # else:\n for idx in np.ndindex(Ls):\n len_dp1 = len(pepx1.phys_bonds[idx])\n len_dp2 = len(pepx2.phys_bonds[idx])\n ax1 = [0,2,4,6] + range(8, 8+len_dp1)\n ax2 = [1,3,5,7] + range(8+len_dp1-1,8+len_dp1+len_dp2-1)\n ax2[-len_dp2] = ax1[-1] # contract vertical bonds (mpx1 down with mpx2 up)\n new_site = np.einsum(pepx1[idx],ax1,pepx2[idx],ax2)\n new_pepx[idx] = tf.reshape(new_site,'ii,ii,ii,ii,...',group_ellipsis=False)\n\n i,j = idx\n for xx in range(new_lams.shape[2]):\n new_lams[i,j,xx] = np.outer(pepx1.lambdas[i,j,xx], pepx2.lambdas[i,j,xx]).reshape(-1)\n # print new_lams[i,j,xx].shape\n\n return PEPX_GL(new_pepx,new_lams) #,pepx1.phys_bonds)",
"def dot(a, b):\n if issparse(a) or issparse(b):\n return dot_sparse(a, b)\n try:\n return a.dot(b)\n except AttributeError:\n return a @ b",
"def dot(a, b):\r\n a, b = as_tensor_variable(a), as_tensor_variable(b)\r\n\r\n if a.ndim == 0 or b.ndim == 0:\r\n return a * b\r\n elif a.ndim > 2 or b.ndim > 2:\r\n return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])\r\n else:\r\n return _dot(a, b)",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def _tensor_batch_dot(t1: Tensor, t2: Tensor) -> Tensor:\n\n msg = (\n \"Please ensure each batch member has the same feature dimension. \"\n f\"First input has {torch.numel(t1) / t1.shape[0]} features, and \"\n f\"second input has {torch.numel(t2) / t2.shape[0]} features.\"\n )\n assert torch.numel(t1) / t1.shape[0] == torch.numel(t2) / t2.shape[0], msg\n\n return torch.mm(\n t1.view(t1.shape[0], -1),\n t2.view(t2.shape[0], -1).T,\n )",
"def test_matmul_vv(self):\n self.check_dot_vv(matmul_usecase, \"'@'\")"
] | [
"0.5712867",
"0.56176335",
"0.554931",
"0.5514312",
"0.5504516",
"0.54418373",
"0.5436186",
"0.54350364",
"0.54332083",
"0.5432765",
"0.5416694",
"0.53294843",
"0.53082806",
"0.5298505",
"0.52948254",
"0.5290505",
"0.52547127",
"0.52381",
"0.5236047",
"0.5189316",
"0.5149108",
"0.51431876",
"0.5138114",
"0.5137843",
"0.51301354",
"0.5123159",
"0.51118344",
"0.5074786",
"0.50734305",
"0.50717086"
] | 0.6954135 | 0 |
Map MXNet's _linalg_gemm2 operator attributes to onnx's MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes. Return multiple nodes created. | def convert_linalg_gemm2(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
# Getting the attributes and assigning default values.
alpha = float(attrs.get("alpha", 1.0))
trans_a = get_boolean_attribute_value(attrs, "transpose_a")
trans_b = get_boolean_attribute_value(attrs, "transpose_b")
op_name = "transpose" + str(kwargs["idx"])
if alpha == 1.0 and trans_a == 0 and trans_b == 0:
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=input_nodes,
outputs=[name],
name=name
)
return [matmul_node]
elif trans_a == 1 and trans_b == 0:
op_name = "transpose" + str(kwargs["idx"])
node_name = op_name+"_a"
trans_a_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[0]],
outputs=[op_name+"_a"],
name=node_name
)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=[node_name, input_nodes[1]],
outputs=[name],
name=name
)
return [trans_a_node, matmul_node]
elif trans_a == 0 and trans_b == 1:
node_name = op_name + "_b"
trans_b_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[1]],
outputs=[op_name+"_b"],
name=node_name
)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=[input_nodes[0], node_name],
outputs=[name],
name=name
)
return [trans_b_node, matmul_node]
else:
node_name_a = op_name+"_a"
trans_a_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[0]],
outputs=[op_name+"_a"],
name=node_name_a
)
node_name_b = op_name + "_b"
trans_b_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[1]],
outputs=[op_name+"_b"],
name=node_name_b
)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=input_nodes,
outputs=[name],
name=name
)
return [trans_a_node, trans_b_node, matmul_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def _fix_gemm(self, op_name, inputs, old_attr):\n op = getattr(mx.sym, op_name, None)\n alpha = float(old_attr.get('alpha', 1.0))\n beta = float(old_attr.get('beta', 1.0))\n transA = int(old_attr.get('transA', 0))\n transB = int(old_attr.get('transB', 0))\n if transA:\n inputs[0] = mx.sym.transpose(inputs[0], axes=(1, 0))\n if not transB:\n inputs[1] = mx.sym.transpose(inputs[1], axes=(1, 0))\n new_inputs = [alpha*inputs[0], inputs[1], beta*inputs[2]]\n new_attr = {'num_hidden' : self._params[inputs[2].name].shape[0]}\n return op, new_inputs, new_attr",
"def _create_gemm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n alpha = onnx_node.getattr('alpha', 1.)\n beta = onnx_node.getattr('beta', 1.)\n transA = onnx_node.getattr('transA', 0)\n transB = onnx_node.getattr('transB', 0)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(alpha=alpha,\n beta=beta,\n transA=transA,\n transB=transB)",
"def convert_dot(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n input_node_a = input_nodes[0]\n input_node_b = input_nodes[1]\n\n trans_a_node = None\n trans_b_node = None\n\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if trans_a:\n input_node_a = op_name + \"_a\"\n trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a)\n if trans_b:\n input_node_b = op_name + \"_b\"\n trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b)\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_node_a, input_node_b],\n outputs=[name],\n name=name\n )\n\n if not trans_a and not trans_b:\n return [matmul_node]\n elif trans_a and not trans_b:\n return [trans_a_node, matmul_node]\n elif trans_b and not trans_a:\n return [trans_b_node, matmul_node]\n else:\n return [trans_a_node, trans_b_node, matmul_node]",
"def convert_matmul(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[0]), g.get_node(op.input(\"Y\")[0])]\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if op.has_attr(\"trans_x\"):\n # for matmul_v2\n trans_x = op.attr(\"trans_x\")\n trans_y = op.attr(\"trans_y\")\n else:\n # for matmul\n trans_x = op.attr(\"transpose_X\")\n trans_y = op.attr(\"transpose_Y\")\n if trans_x:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n inputs[0] = _op.transpose(inputs[0], axes=perm)\n if trans_y:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n inputs[1] = _op.transpose(inputs[1], axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(inputs[0], dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1], dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(inputs[0], a_shape, 3)\n b = flatten_to_nd(inputs[1], b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n inputs[1] = _op.expand_dims(inputs[1], 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n out = _op.nn.dense(inputs[0], input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if op.has_attr(\"alpha\"):\n alpha = op.attr(\"alpha\")\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha).astype(\"float32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def local_gemm_to_gemv(node):\r\n if node.op == gemm_no_inplace:\r\n z, a, x, y, b = node.inputs\r\n if z.broadcastable == x.broadcastable == (True, False):\r\n r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b)\r\n return [r.dimshuffle('x', 0)]\r\n if z.broadcastable == y.broadcastable == (False, True):\r\n r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b)\r\n return [r.dimshuffle(0, 'x')]",
"def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node",
"def local_gpu_gemv(node):\r\n gemvs = (tensor.blas.Gemv,\r\n tensor.blas_c.CGemv,\r\n )\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op, gemvs):\r\n z, a, x, y, b = host_input.owner.inputs\r\n return [gpu_gemv_no_inplace(\r\n gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y),\r\n b)]\r\n if isinstance(node.op, gemvs):\r\n z, a, x, y, b = node.inputs\r\n x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))\r\n y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))\r\n z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))\r\n if x_on_gpu or y_on_gpu or z_on_gpu:\r\n return [host_from_gpu(\r\n gpu_gemv_no_inplace(\r\n gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y),\r\n b))]\r\n return False",
"def local_gpu_gemm(node):\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n tensor.blas.Gemm):\r\n z, a, x, y, b = host_input.owner.inputs\r\n return [gpu_gemm_no_inplace(gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y),\r\n b)]\r\n if isinstance(node.op, tensor.blas.Gemm):\r\n z, a, x, y, b = node.inputs\r\n x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))\r\n y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))\r\n z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))\r\n if x_on_gpu or y_on_gpu or z_on_gpu:\r\n return [host_from_gpu(gpu_gemm_no_inplace(gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y),\r\n b))]\r\n return False",
"def test_gemm_opt0():\r\n X, Y, Z, a, b = XYZab()\r\n\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a + Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) + b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a - Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) - b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z - a * T.dot(X, Y)])\r\n\r\n #with transposes (transposes should be pushed through dot in canonicalize)\r\n just_gemm([X, Y, Z, a, b], [b * Z.T - a * T.dot(Y.T, X.T)])\r\n just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * T.dot(X, Y).T])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y).T],\r\n ishapes=[(5, 3), (3, 4), (4, 5), (), ()])\r\n\r\n #with N multiplications instead of just one\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z + a * b * a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z - a * b * a * T.dot(X, Y)])",
"def convert_bmm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n y = _op.transpose(y, [0, 2, 1])\n out = _op.nn.batch_matmul(x, y)\n g.add_node(op.output(\"Out\")[0], out)",
"def Mxform(x1,y1,x2,y2):\n return Jones.toMueller([[np.dot(x2,x1), np.dot(x2, y1)], [np.dot(y2,x1), np.dot(y2,y1)]])",
"def compute(self, node, input_vals):\n mat_A = input_vals[0]\n mat_B = input_vals[1]\n if node.matmul_attr_trans_A:\n mat_A = mat_A.T\n if node.matmul_attr_trans_B:\n mat_B = mat_B.T\n return np.matmul(mat_A, mat_B)",
"def matrix_mult(m1, m2):\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m2[0])):\n\t\t\te.append(row_times_column(m1,i,m2,j))\n\t\ttemp.append(e)\n\treturn temp",
"def convert_meshgrid(g, op, block):\n\n inputs = op.input(\"X\")\n x = [g.get_node(i) for i in inputs]\n outs = _op.meshgrid(x, indexing=\"ij\")\n for i, out in enumerate(outs):\n g.add_node(op.output(\"Out\")[i], out)",
"def local_dot22_to_ger_or_gemv(node):\r\n if node.op == _dot22:\r\n x, y = node.inputs\r\n xb = x.broadcastable\r\n yb = y.broadcastable\r\n one = T.as_tensor_variable(numpy.asarray(1, dtype=x.dtype))\r\n zero = T.as_tensor_variable(numpy.asarray(0, dtype=x.dtype))\r\n if xb[1] and yb[0]:\r\n # x and y are both vectors so this might qualifies for a GER\r\n xv = x.dimshuffle(0)\r\n yv = y.dimshuffle(1)\r\n\r\n zeros = T.zeros([x.shape[0], y.shape[1]], dtype=x.dtype)\r\n rval = ger(zeros, one, xv, yv)\r\n return [rval]\r\n if xb[0] and yb[1]:\r\n # x and y are both vectors so this qualifies for a sdot / ddot\r\n # TODO: Theano doesn't have a sdot, but gemv is better than _dot22\r\n xv = x.dimshuffle(1)\r\n zeros = T.zeros([1], x.dtype)\r\n rval = gemv_no_inplace(zeros, one, y.T, xv, zero)\r\n return [rval.dimshuffle('x', 0)]\r\n if xb[0] and not yb[0] and not yb[1]:\r\n # x is vector, y is matrix so try gemv\r\n xv = x.dimshuffle(1)\r\n zeros = T.zeros([y.shape[1]], x.dtype)\r\n rval = gemv_no_inplace(zeros, one, y.T, xv, zero)\r\n return [rval.dimshuffle('x', 0)]\r\n if not xb[0] and not xb[1] and yb[1]:\r\n # x is matrix, y is vector, try gemv\r\n yv = y.dimshuffle(0)\r\n zeros = T.zeros([x.shape[0]], dtype=x.dtype)\r\n rval = gemv_no_inplace(zeros, one, x, yv, zero)\r\n return [rval.dimshuffle(0, 'x')]",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 2\r\n if node.matmul_attr_trans_A :\r\n input_vals[0] = input_vals[0].T\r\n if node.matmul_attr_trans_B :\r\n input_vals[1] = input_vals[1].T\r\n return np.matmul(input_vals[0] , input_vals[1])",
"def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])",
"def convert_mv(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Vec\")[0])\n y = _op.expand_dims(y, axis=-1)\n y = _op.transpose(y)\n out = _op.nn.dense(x, y)\n out = _op.squeeze(out, axis=[-1])\n g.add_node(op.output(\"Out\")[0], out)",
"def wrap_compute_conv2d_gemm(topi_compute):\n\n def _compute_conv2d_gemm(attrs, inputs, out_type):\n padding = attrs.get_int_tuple(\"padding\")\n strides = attrs.get_int_tuple(\"strides\")\n dilation = attrs.get_int_tuple(\"dilation\")\n out_dtype = attrs.get_str(\"out_dtype\")\n channels = attrs[\"channels\"]\n kernel_size = attrs[\"kernel_size\"]\n out_dtype = inputs[0].dtype if out_dtype in (\"same\", \"\") else out_dtype\n return [\n topi_compute(\n inputs[0], inputs[1], strides, padding, dilation, out_dtype, kernel_size, channels\n )\n ]\n\n return _compute_conv2d_gemm",
"def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def makeTransformations(epsg1, epsg2):\n sr1 = osr.SpatialReference()\n sr1.ImportFromEPSG(epsg1)\n preventGdal3axisSwap(sr1)\n sr2 = osr.SpatialReference()\n sr2.ImportFromEPSG(epsg2)\n preventGdal3axisSwap(sr2)\n tr1to2 = osr.CoordinateTransformation(sr1, sr2)\n tr2to1 = osr.CoordinateTransformation(sr2, sr1)\n return (tr1to2, tr2to1)",
"def common_optimization(m):\n logger.info(\"Doing nodes fusion and replacement... \")\n m = other.polish_model(m)\n g = m.graph\n other.transpose_B_in_Gemm(g)\n fusing.fuse_BN_into_Gemm(g)\n fusing.fuse_BN_with_Reshape_into_Gemm(g)\n fusing.fuse_Gemm_into_Gemm(g)\n fusing.fuse_consecutive_reducemean(g)\n fusing.fuse_slice_nodes_into_conv(g)\n fusing.fuse_relu_min_into_clip(g)\n other.duplicate_shared_Flatten(g)\n replacing.replace_average_pool_with_GAP(g)\n\n m = other.polish_model(m)\n g = m.graph\n\n replacing.replace_Squeeze_with_Reshape(g)\n replacing.replace_Unsqueeze_with_Reshape(g)\n replacing.replace_Reshape_with_Flatten(g)\n replacing.replace_ReduceMean_with_GlobalAveragePool(g)\n replacing.replace_Sum_with_Adds(g)\n replacing.replace_constant_input_concat_with_pad(g)\n other.topological_sort(g)\n return m",
"def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)",
"def matrix_mult(m1, m2):\n pass",
"def matrix_multiply_mapper(m, element):\n\n name, i, j, value = element\n\n if name == \"A\":\n for k in range(m):\n yield ((i, k), (j, value))\n \n else:\n for k in range(m):\n yield((k, j), (i, value))"
] | [
"0.6389869",
"0.62474555",
"0.59834313",
"0.5800283",
"0.57034826",
"0.56631166",
"0.5639739",
"0.542331",
"0.5402771",
"0.53584194",
"0.53427106",
"0.53198403",
"0.5156288",
"0.51285404",
"0.50952226",
"0.5079322",
"0.50783587",
"0.5072455",
"0.5072435",
"0.5067956",
"0.5063722",
"0.50567645",
"0.5028325",
"0.5002907",
"0.49286252",
"0.4895705",
"0.48596543",
"0.48572835",
"0.48297635",
"0.47984168"
] | 0.7864781 | 0 |
Map MXNet's Pooling operator attributes to onnx's MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators based on the input node's attributes and return the created node. | def convert_pooling(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
kernel = eval(attrs["kernel"])
pool_type = attrs["pool_type"] if attrs.get("pool_type") else "max"
stride = eval(attrs["stride"]) if attrs.get("stride") else (1, 1)
global_pool = get_boolean_attribute_value(attrs, "global_pool")
p_value = attrs.get('p_value', 'None')
pooling_convention = attrs.get('pooling_convention', 'valid')
ceil_mode = False
if pooling_convention == 'full':
if onnx.__version__ < "1.5.0":
pooling_warning = "Pooling: ONNX lower than 1.5.0 doesn't support pooling_convention. " \
"This might lead to shape or accuracy issues. " \
"https://github.com/onnx/onnx/issues/549"
ceil_mode = True
logging.warning(pooling_warning)
pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
pad_dims = pad_dims + pad_dims
pool_types = {"max": "MaxPool", "avg": "AveragePool", "lp": "LpPool"}
global_pool_types = {"max": "GlobalMaxPool", "avg": "GlobalAveragePool",
"lp": "GlobalLpPool"}
if pool_type == 'lp' and p_value == 'None':
raise AttributeError('ONNX requires a p value for LpPool and GlobalLpPool')
if global_pool:
if pool_type == 'lp':
node = onnx.helper.make_node(
global_pool_types[pool_type],
input_nodes, # input
[name],
p=int(p_value),
name=name
)
else:
node = onnx.helper.make_node(
global_pool_types[pool_type],
input_nodes, # input
[name],
name=name
)
else:
if pool_type == 'lp':
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
p=int(p_value),
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)
else:
if onnx.__version__ >= "1.5.0":
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name,
ceil_mode=ceil_mode
)
else:
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def _fix_pooling(self, op_name, inputs, new_attr):\n pool_type = 'avg' if op_name == 'AveragePool' else 'max'\n stride = new_attr.get('strides')\n kernel = new_attr.get('kernel_shape')\n padding = new_attr.get('pads')\n pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding)\n new_pad_op = mx.sym.pad(inputs[0], mode='constant', pad_width=pad_width)\n new_pooling_op = mx.sym.Pooling(new_pad_op, pool_type=pool_type,\n stride=stride, kernel=kernel)\n return new_pooling_op",
"def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node",
"def _create_max_avg_pool(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(\n onnx_node.attrs[\"pads\"]) if \"pads\" in onnx_node.attrs else (0, 0)\n stride = tuple(onnx_node.getattr('strides', (1, 1)))\n # default the odd_padding is 0, once there are same pad mode, we modify it\n # for odd_padding, please refer the autegrade.py\n odd_padding = (0, 0, 0, 0)\n if \"auto_pad\" in onnx_node.attrs:\n auto_pad = utils.force_unicode(onnx_node.attrs['auto_pad'])\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n padding, odd_padding = utils.get_padding_shape(\n auto_pad, inputs[0].shape[2:], kernel, stride)\n\n # not support count_include_pad and auto_pad\n if \"count_include_pad\" in onnx_node.attrs or \"ceil_mode\" in onnx_node.attrs:\n raise ValueError(\n \"Not implemented yet for count_include_pad or ceil_mode\")\n\n # only support 2d\n if len(kernel) != 2:\n raise ValueError(\"Not implemented yet\")\n\n is_max = onnx_node.op_type == 'MaxPool'\n x = inputs[0]\n if x.device.id() == -1:\n handle = singa.PoolingHandle(x.data, kernel, stride, padding,\n is_max)\n else:\n handle = singa.CudnnPoolingHandle(x.data, kernel, stride, padding,\n is_max)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(handle, odd_padding)",
"def _create_globalaveragepool(cls, onnx_node, inputs, opset_version):\n data_format = onnx_node.getattr(\"data_format\", 'channels_first')\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(data_format)",
"def test_pool_consistency(self) -> None:\n x = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3, 3])\n )\n input_ops = {'X': cast(Operator, x)}\n\n MaxPool(\n 'max_pool1',\n [1, 2, 2, 3],\n Float32(),\n input_ops,\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[2, 2]\n )\n\n print(\"Consistency test for pooling operator passed!\")",
"def _pool_op(self, in_obj, pool_axes):\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(pool_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n output_axes = self._output_axes(in_obj,\n pad_int)\n poolparams = make_poolparams(self.pool_type,\n self.pool_shape,\n self.strides,\n pad_int)\n return ng.pooling(poolparams,\n in_obj,\n axes=output_axes)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def pool(self) -> NodePool:\n\n return self._pool",
"def make_pool(X, y, prelabeled=np.arange(5)):\n y = y.argmax(axis=1)\n # a set of labels is already labeled by the oracle\n y_train_labeled = np.array([None] * len(y))\n #y_train_labeled =np.empty((len(y), 2))* np.nan\n y_train_labeled[prelabeled] = y[prelabeled]\n\n # we are making a pool of the train data\n # the 'prelabeled' labels of the dataset are already labeled.\n return Dataset(X, y_train_labeled), Dataset(X, y)",
"def pool_layer( x, wpool, padding, name ):\n top = tf.layers.max_pooling2d( x, \n 2, \n [2, wpool], \n padding=padding, \n name=name )\n return top",
"def __init__(self, poolIndex, cls):\n super(P1, self).__init__(poolIndex, \"abstractnode\", [\"edges\", \"map\"], [None for i in range(0, 0)], cls)",
"def test_n_minus_f_pool_processes_attrib(looper, nodeSet,\n sdk_pool_handle,\n sdk_wallet_steward):\n make_pool_n_minus_f_nodes(looper, nodeSet)\n\n sdk_add_raw_attribute(looper, sdk_pool_handle, sdk_wallet_steward, 'foo', 'bar')",
"def pooler_layer(self):\n return self._pooler_layer",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name",
"def __init__(self, ratio=1, mode=\"average\"):\n if mode == \"max\":\n self.__pooling = EquiangularMaxPool(ratio)\n self.__unpooling = EquiangularMaxUnpool(ratio)\n else:\n self.__pooling = EquiangularAvgPool(ratio)\n self.__unpooling = EquiangularAvgUnpool(ratio)",
"def get_nodepool_labels(self) -> Union[Dict[str, str], None]:\n return self.agentpool_context.get_nodepool_labels()",
"def schedule_pool_arm_cpu(attrs, outs, target):\n layout = attrs.layout\n avg_pool = isinstance(attrs, relay.op.op_attrs.AvgPool2DAttrs)\n with target:\n if (\n avg_pool\n and target.features.has_dsp\n and layout in (\"NCW\", \"NCHW\")\n or not avg_pool\n and target.features.has_dsp\n and layout in (\"NWC\", \"NHWC\")\n ):\n return topi.arm_cpu.schedule_pool(outs, layout)\n return topi.x86.schedule_pool(outs, layout)",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def max_pool(self, x, name=\"\"):\n return tf.nn.max_pool(x, ksize=self.mp_size, strides=self.mp_stride,\n padding=self.mp_padding, name=name)",
"def _pool(\n pool_mode,\n nd_util,\n input,\n kernel_size,\n stride=1,\n padding=0,\n ceil_mode=False,\n):\n return FunctionLib.apply(\n 'Pool',\n input.device,\n [input],\n kernel_shape=nd_util(kernel_size),\n strides=nd_util(stride),\n pads=nd_util(padding),\n mode=pool_mode,\n ceil_mode=ceil_mode,\n )",
"def max_pooling(self, filter_):\n return self.add_layer(max_pooling, filter_)",
"def create_mapping(max_onnx_opset_version, extra_opsets):\n mapping = {constants.ONNX_DOMAIN: max_onnx_opset_version}\n if extra_opsets:\n for extra_opset in extra_opsets:\n mapping[extra_opset.domain] = extra_opset.version\n ops_mapping = {}\n domain_to_ops_mapping = collections.defaultdict(dict)\n for domain, opsets in tf_op.get_opsets().items():\n for target_opset, op_map in enumerate(opsets):\n m = mapping.get(domain)\n if m:\n if target_opset <= m and op_map:\n domain_to_ops_mapping[domain].update(ops_mapping)\n ops_mapping.update(op_map)\n\n tf_op._MAPPING = ops_mapping\n tf_op._DOMAIN_MAPPING = domain_to_ops_mapping\n return ops_mapping",
"def pooling(self):\n return self.__pooling",
"def create_max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')",
"def max_pool_2x2(self, x,name=\"\"):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], \n padding=\"VALID\",\n name=name\n )",
"def convert_pool2d(g, op, block):\n\n adaptive = op.attr(\"adaptive\")\n ceil_mode = op.attr(\"ceil_mode\")\n global_pooling = op.attr(\"global_pooling\")\n ksize = op.attr(\"ksize\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n pooling_type = op.attr(\"pooling_type\")\n data_format = op.attr(\"data_format\")\n\n if global_pooling:\n adaptive = True\n ksize = [1, 1]\n\n input_x = g.get_node(op.input(\"X\")[0])\n _, _, in_h, in_w = infer_shape(input_x)\n\n op_map = {\"avg\": \"avg_pool2d\", \"max\": \"max_pool2d\"}\n\n strides = op.attr(\"strides\")\n if isinstance(strides, int):\n strides = [strides, strides]\n if isinstance(ksize, int):\n ksize = [ksize, ksize]\n if isinstance(paddings, int):\n paddings = [paddings] * 2\n\n if padding_algorithm == \"VALID\":\n paddings = [0, 0]\n elif padding_algorithm == \"SAME\":\n input_x = autopad(input_x, strides, ksize)\n paddings = [0, 0]\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 2:\n paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]\n elif len(paddings) == 4:\n paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]\n else:\n msg = f'Value {padding_algorithm} in attribute \"padding\" of operator Pool2d is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n # handle with special case\n # while kernel size less than input size\n # shrink kernel size to input size\n if (\n not isinstance(in_h, _op.Expr)\n and padding_algorithm == \"EXPLICIT\"\n and in_h + paddings[0] + paddings[2] < ksize[0]\n ):\n ksize[0] = in_h\n if (\n not isinstance(in_w, _op.Expr)\n and padding_algorithm == \"EXPLICIT\"\n and in_w + paddings[1] + paddings[3] < ksize[1]\n ):\n ksize[1] = in_w\n\n if not adaptive:\n if pooling_type == \"avg\":\n exclusive = op.attr(\"exclusive\")\n out = _op.nn.avg_pool2d(\n input_x,\n pool_size=ksize,\n strides=strides,\n padding=paddings,\n ceil_mode=ceil_mode,\n count_include_pad=not exclusive,\n )\n else:\n out = getattr(_op.nn, op_map[pooling_type])(\n input_x, pool_size=ksize, strides=strides, padding=paddings, ceil_mode=ceil_mode\n )\n else:\n out = getattr(_op.nn, \"adaptive_\" + op_map[pooling_type])(\n input_x, output_size=ksize, layout=data_format\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def pool(self):\n return self._properties.get('pool')",
"def create_pool(self, context, pool):\n LOG.info(\"Received request 'Create Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool\n }\n # REVISIT(jiahao) M:N pool is not yet implemented.\n self._send_event(lb_const.EVENT_CREATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])"
] | [
"0.6422184",
"0.6282772",
"0.61572933",
"0.6030548",
"0.59187293",
"0.5839444",
"0.5670332",
"0.55067396",
"0.546821",
"0.53994155",
"0.5370744",
"0.5343035",
"0.52772164",
"0.5261299",
"0.5253784",
"0.52436227",
"0.5200312",
"0.51936644",
"0.51801187",
"0.51740825",
"0.51138467",
"0.5105015",
"0.5068508",
"0.5058825",
"0.5052989",
"0.50443333",
"0.5025875",
"0.50222284",
"0.5012439",
"0.4990189"
] | 0.7350024 | 0 |
Map MXNet's exp operator attributes to onnx's Exp operator and return the created node. | def convert_exp(node, **kwargs):
return create_basic_op_node('Exp', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))",
"def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.exp())",
"def expr(self):\n return self._express",
"def expon(*args, **kws) -> core.Expon:\n X, Y, kws = util.parseargs(*args, **kws)\n assert \"exp\" in kws\n return core.Expon(X, Y, **kws)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.exp(), diag_shape=self.diag_shape)",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def exp(obj):\n\tif isinstance(obj, Variable):\n\t\t\n\t\tval = np.exp(obj.val)\n\t\tder = np.exp(obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.exp(obj)",
"def expIP(self):\n np.exp(self.t, out=self.t)\n return self",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def exp(self):\n\t\tval = np.exp(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.exp(self.val)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = np.multiply(to_multiply, self.der)\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def expression(self, p):\n num_type, first, second = get_type_first_second_of_binary_operation(p.expression, p.term)\n\n opcode_type = I_for_int_R_for_float(num_type)\n opcode_action = \"ADD\" if p.ADDOP == \"+\" else \"SUB\"\n opcode = opcode_type + opcode_action\n\n temp = next(g_generate_temp_variable_name)\n temp_variables_values_dict[temp] = temp\n\n qaud_code(f\"{opcode} {temp} {first} {second}\")\n return Expression(num_type, temp)",
"def convert_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def expval(op, dm):\n return np.tensordot(op, dm, ([0, 1], [0, 1]))",
"def exp(x):\n if isinstance(x, int):\n x = Expression(x)\n return _exp(x)",
"def exp(data):\n return _make.exp(data)",
"def exp(self):\n return Factor().__build( VarSet(self.v) , np.exp(self.t) )",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def exp(tensor):\n return _elementary_op(tensor, np.exp, np.exp)",
"def exp(self, X, U):\n raise NotImplementedError",
"def exp(x):\n raise NotImplementedError",
"def __init__(self, str_exp=None, kind=None, scanner=None):\n self.kind = None\n self.name = 'undef'\n self.attr = None\n self.child = None\n self.left = None\n self.right = None\n self.code = None\n\n if str_exp is not None:\n logging.debug('========== EXP in init(NODE): SEXP = [' + str_exp + ']')\n scanner = lex.Scanner(rules)\n scanner.setString(str_exp)\n\n if kind is not None: # create an empty node\n self.kind = kind\n return\n\n if scanner is None:\n raise Exception('Fatal Error: scanner not defined')\n\n while scanner.curToken().type in FIRST:\n\n if scanner.curToken().type == LITERAL:\n self.name = scanner.curToken().name\n self.code = LITERAL\n self.kind = ATOM\n scanner.move()\n\n elif scanner.curToken().type == LPAREN:\n scanner.move() # skip the parentheses\n\n tmp = Exp(scanner=scanner) # tree of the expression between parentheses\n self.kind = tmp.kind\n self.attr = tmp.attr\n self.name = tmp.name\n self.left = tmp.left\n self.right = tmp.right\n self.child = tmp.child\n\n if scanner.curToken().type != RPAREN:\n raise ParserException(\"')' expected\")\n scanner.move()\n\n elif isUnitary(scanner.curToken().type):\n self.kind = UNARY\n self.name = scanner.curToken().name\n self.code = scanner.curToken().type\n\n # if token_type == ATTRIB # this is for existence and foreach\n\n scanner.move()\n self.child = Exp(scanner=scanner)\n\n # the scanner has been moved to a successive token\n if scanner.curToken().type == NULLTOKEN:\n break\n\n # check for infix operators\n if isBinary(scanner.curToken().type):\n operator_name = scanner.curToken().name\n operator_type = scanner.curToken().type\n scanner.move()\n\n # move the current node to the left of the tree\n lnode = Exp(kind=self.kind)\n lnode.name = self.name\n lnode.attr = self.attr\n lnode.child = self.child\n lnode.left = self.left\n lnode.right = self.right\n lnode.code = self.code\n\n # this node became the handler aka the binary operator\n self.code = operator_type\n self.name = operator_name\n self.kind = BINARY\n self.left = lnode\n # lookup the second child of the operator\n self.right = Exp(scanner=scanner)",
"def sym_exp_map(cls, q, eta):\n sqrt_q = q ** 0.5\n return sqrt_q * Quaternion.exp(eta) * sqrt_q",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def exp(module, x):\n _import_modules()\n if module in [np, ma]:\n return np.exp(x)\n elif module == torch:\n return torch.exp(x)\n elif module == jnp:\n return jnp.exp(x)\n elif module == tf:\n return tf.math.exp(x)\n raise UnknownModuleException(f\"Module {module.__name__} not supported.\")",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def exp_map(cls, q, eta):\n return q * Quaternion.exp(eta)",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors"
] | [
"0.63551813",
"0.6045158",
"0.5848142",
"0.58281314",
"0.5785661",
"0.5784021",
"0.5707887",
"0.56942314",
"0.5616022",
"0.5590413",
"0.5572627",
"0.5540329",
"0.5512824",
"0.5411621",
"0.53919",
"0.5374348",
"0.535856",
"0.5315022",
"0.5296157",
"0.5277749",
"0.52776116",
"0.52670443",
"0.5260635",
"0.5253377",
"0.5226181",
"0.520674",
"0.5206623",
"0.52063775",
"0.5200142",
"0.51646894"
] | 0.7500115 | 0 |
Map MXNet's _copy operator attributes to onnx's Identity operator and return the created node. | def convert_copy(node, **kwargs):
return create_basic_op_node('Identity', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def copy_as_new(self) -> \"Individual\":\n return Individual(self.main_node.copy(), to_pipeline=self._to_pipeline)",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def clone(self):\n if self.result_id is not None:\n new_id = self.module.new_id()\n else:\n new_id = None\n return Instruction(self.module, self.op_name, new_id, self.type_id,\n self.operands[:])",
"def copy(self):\n copy = Node(self.ident)\n for k, v in self.iteritems():\n copy[k] = v\n return copy",
"def clone(self):\n return XLNodeID(self._node_id)",
"def clone(self):\n tmp = self.my_operator\n self.my_operator = None\n new = copy.copy(self)\n self.my_operator = tmp\n return new",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def get_copied_op(org_instance, graph, scope=\"\"):\n\n #The name of the copied instance\n if scope != '':\n new_name = scope + '/' + org_instance.name\n else:\n new_name = org_instance.name\n\n return graph.as_graph_element(new_name, allow_tensor=True,\n allow_operation=True)",
"def to_op(self):\n raise NotImplementedError",
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def copy(self):\n kopy = self.__class__()\n # Copy the source net\n kopy.source_net = nx.DiGraph(self.source_net)\n return kopy",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def copy(self):\n node_new = Node(self.state.copy(), self.parent, self.children.copy(), self.RRT, self.path_length)\n node_new.vs = self.vs.copy()\n node_new.RRT = self.RRT\n node_new.observed = self.observed\n node_new.observation_node = self.observation_node\n node_new.observation_area = self.observation_area\n\n return node_new",
"def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\n else:\n node.input.append(\"\")\n return node",
"def _copy_output(src: Graph, dst: Graph):\n for n_src, n_dst in zip(src.nodes, dst.nodes):\n if n_src.op == 'output':\n n_dst.meta = n_src.meta",
"def copy(self):\n\t\t\n\t\taCopy = self.__class__()\n\t\taCopy.mip = self.mip\n\t\taCopy.msg = self.msg\n\t\taCopy.options = self.options\n\t\treturn aCopy",
"def __copy__(self):\n result = Node()\n result.data = copy.copy(self.data)\n if self.left:\n result.left = copy.copy(self.left)\n if self.right:\n result.right = copy.copy(self.right)\n return result",
"def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)",
"def clone(self, **kwargs):\n new_inst = MetaTensor(self.as_tensor().clone(**kwargs))\n new_inst.__dict__ = deepcopy(self.__dict__)\n return new_inst",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )"
] | [
"0.6040742",
"0.59664553",
"0.59002656",
"0.58588904",
"0.5807119",
"0.5798085",
"0.572463",
"0.57207906",
"0.56919736",
"0.56849563",
"0.567741",
"0.5644937",
"0.55868983",
"0.55787975",
"0.5568176",
"0.55487376",
"0.5499865",
"0.5467768",
"0.5450066",
"0.5446349",
"0.54340404",
"0.53856814",
"0.5381963",
"0.5375562",
"0.532201",
"0.53041244",
"0.52769804",
"0.5261116",
"0.52446026",
"0.52424115"
] | 0.7632108 | 0 |
Map MXNet's identity operator attributes to onnx's ConstantFill operator and return the created node. | def convert_identity(node, **kwargs):
return create_basic_op_node('ConstantFill', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_copy(node, **kwargs):\n return create_basic_op_node('Identity', node, kwargs)",
"def _create_constantOfShape(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n tensor_type = onnx.TensorProto.FLOAT if isinstance(\n op.value, float) else onnx.TensorProto.INT32\n tensor_value = onnx.helper.make_tensor(\"value\", tensor_type, [1],\n [op.value])\n node.attribute.extend([\n helper.make_attribute('value', tensor_value),\n ])\n return node",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def get_identity_op(self, space, domain, range_, dtr):\n return bempp.api.operators.boundary.sparse.identity(\n # this can make the kernel crash if not set correctly\n space[domain], space[range_], space[dtr]\n )",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def identity(cls):\n return super().identity(4, 4)",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node",
"def manually_get_block_identity_op(self, space):\n i = self.get_identity_op(space, OP_DOM, OP_DOM, OP_DUA)\n I = assembly.BlockedOperator(2, 2)\n I[0, 0] = i\n I[1, 1] = i\n return I",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def identity(self):\r\n self.piDD = {\"[1]\": None}\r\n self.top_node = \"[1]\"\r\n self.dim = 0",
"def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def identity(cls):\n return super().identity(3, 3)",
"def identity(n, dtype=np.float32, constant=False):\n return Tensor(np.identity(n, dtype), constant=constant)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def makeIdentity(self) -> None:\n ...",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Zeroslike(%s)\" % node_A.name\r\n return new_node",
"def createRotoPaintNodeMI():\n return gr()",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def test_identity_multiple(self, dev):\n dev = qml.device(dev, wires=2)\n\n @qml.qnode(dev)\n def circuit():\n qml.PauliX(0)\n return qml.expval(qml.Identity(0)), qml.expval(qml.Identity(1))\n\n assert np.allclose(circuit(), np.ones(2))",
"def convert_fill_constant(g, op, block):\n\n value = op.attr(\"value\")\n shape = block.var(op.output(\"Out\")[0]).shape\n dtype = op.attr(\"dtype\")\n dtype = _convert_dtype_value(dtype)\n value = _expr.const(value).astype(dtype)\n if \"ValueTensor\" in op.input_names and op.input(\"ValueTensor\"):\n shape = g.get_node(op.input(\"ValueTensor\")[0])\n if \"ShapeTensor\" in op.input_names and op.input(\"ShapeTensor\"):\n shape = g.get_node(op.input(\"ShapeTensor\")[0])\n\n if isinstance(shape, _expr.Expr):\n shape = try_infer_value(shape, parameters=g.get_params())[0]\n\n if isinstance(shape, np.ndarray):\n shape = shape.tolist()\n\n out = _op.full(value, shape=shape, dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)",
"def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Zeroslike(%s)\" % node_A.name\n return new_node"
] | [
"0.5793247",
"0.55757725",
"0.53495646",
"0.5342002",
"0.5303849",
"0.5282525",
"0.52815294",
"0.5273177",
"0.5256617",
"0.5214077",
"0.5213285",
"0.5184088",
"0.51712525",
"0.51261264",
"0.511096",
"0.50941354",
"0.5083022",
"0.50758964",
"0.5066469",
"0.50372505",
"0.5021455",
"0.5007537",
"0.5003187",
"0.49992627",
"0.49828473",
"0.4969259",
"0.4965528",
"0.49502343",
"0.49433",
"0.4934118"
] | 0.76081395 | 0 |
Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator based on the input node's attributes and return the created node. | def convert_instancenorm(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
eps = float(attrs.get("eps", 0.001))
node = onnx.helper.make_node(
'InstanceNormalization',
inputs=input_nodes,
outputs=[name],
name=name,
epsilon=eps)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_instance_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n epsilon = op.attr(\"epsilon\")\n\n scale = center = True\n out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale)\n g.add_node(op.output(\"Y\")[0], out)",
"def convert_l2normalization(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mode = attrs.get(\"mode\", \"instance\")\n\n if mode != \"channel\":\n raise AttributeError(\"L2Normalization: ONNX currently supports channel mode only\")\n\n l2norm_node = onnx.helper.make_node(\n \"LpNormalization\",\n input_nodes,\n [name],\n axis=1, # channel only\n name=name\n )\n return [l2norm_node]",
"def convert_norm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n ord = int(attrs.get(\"ord\", 2))\n\n onnx_op_name = \"ReduceL1\" if ord == 1 else \"ReduceL2\"\n\n if axes:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]\n else:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]",
"def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_batchnorm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n momentum = float(attrs.get(\"momentum\", 0.9))\n eps = float(attrs.get(\"eps\", 0.001))\n\n bn_node = onnx.helper.make_node(\n \"BatchNormalization\",\n input_nodes,\n [name],\n name=name,\n epsilon=eps,\n momentum=momentum,\n # MXNet computes mean and variance per channel for batchnorm.\n # Default for onnx is across all spatial features. Relying on default\n # ONNX behavior of spatial=1 for ONNX opset 8 and below. As the spatial\n # attribute is deprecated in opset 9 and above, not explicitly encoding it.\n )\n return [bn_node]",
"def _create_batchnorm(cls, op, op_t):\n # first, we init batchnorm node\n epsilon = 1e-5 # the epsilon value used in singa\n bn_node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n bn_node.attribute.extend([\n helper.make_attribute('momentum', op.handle.factor),\n helper.make_attribute('epsilon', epsilon),\n ])\n # then we add nodes of scal, bias, mean, var\n nodes = []\n running_values = {\"mean\": op.running_mean, \"var\": op.running_var}\n for tmp_name, running_value in running_values.items():\n node_name = op.name + \":\" + tmp_name\n bn_node.input.append(node_name)\n\n nodes.append(bn_node)\n return nodes",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def get_norm_layer():\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n return norm_layer",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def instance_norm_model():\n inputs = tf.keras.Input(shape=(16, 16, 3,))\n x = tf.keras.layers.Conv2D(16, (3, 3))(inputs)\n x = tf.contrib.layers.instance_norm(x)\n return x",
"def convert_layer_norm(g, op, block):\n\n begin_norm_axis = op.attr(\"begin_norm_axis\")\n epsilon = op.attr(\"epsilon\")\n x = g.get_node(op.input(\"X\")[0])\n bias_input = op.input(\"Bias\")\n scale_input = op.input(\"Scale\")\n\n x_shape = infer_shape(x)\n assert begin_norm_axis in (\n len(x_shape) - 1,\n -1,\n ), \"Support only normalization over last one dimension.\"\n\n if bias_input:\n bias = g.get_node(bias_input[0])\n else:\n bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))\n\n if scale_input:\n scale = g.get_node(scale_input[0])\n else:\n scale = _expr.const(np.ones(x_shape[begin_norm_axis]))\n\n out = _op.nn.layer_norm(\n x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True\n )\n g.add_node(op.output(\"Y\")[0], out)",
"def convert_random_normal(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n mean = float(attrs.get(\"loc\", 0))\n scale = float(attrs.get(\"scale\", 1.0))\n shape = convert_string_to_list(attrs.get('shape', '[]'))\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]\n\n node = onnx.helper.make_node(\n 'RandomNormal',\n input_nodes,\n [name],\n mean=mean,\n scale=scale,\n dtype=dtype,\n shape=shape,\n name=name\n )\n return [node]",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _add_node_attributes(self):\n ensemble_mapping = SankeyLayout._ensemble_map(\n df=self.supergraph.gf.df, nxg=self.nxg, columns=SankeyLayout._COLUMNS\n )\n for idx, key in enumerate(ensemble_mapping):\n nx.set_node_attributes(self.nxg, name=key, values=ensemble_mapping[key])\n\n dataset_mapping = {}\n for run in self.runs:\n dataset_mapping[run] = SankeyLayout._dataset_map(\n df=self.supergraph.gf.df,\n nxg=self.nxg,\n tag=run,\n columns=SankeyLayout._COLUMNS,\n )\n nx.set_node_attributes(\n self.nxg, name=self.supergraph.tag, values=dataset_mapping[run]\n )",
"def convert_batch_norm(g, op, block):\n\n ipt_name = op.input(\"X\")[0]\n scale_name = op.input(\"Scale\")[0]\n bias_name = op.input(\"Bias\")[0]\n mean_name = op.input(\"Mean\")[0]\n variance_name = op.input(\"Variance\")[0]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.batch_norm(\n g.get_node(ipt_name),\n g.get_node(scale_name),\n g.get_node(bias_name),\n g.get_node(mean_name),\n g.get_node(variance_name),\n epsilon=epsilon,\n )\n g.add_node(op.output(\"Y\")[0], out[0])",
"def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsupported axis {attrs.get_int('axis')} in operator relay lrn operator. \"\n f\"Only axis = 1 is supported by Onnx.\"\n )\n\n return {\"alpha\": attrs.alpha, \"beta\": attrs.beta, \"bias\": attrs.bias, \"size\": attrs.size}",
"def instance_norm(images, epsilon=1e-5, name='instance_norm'):\n means = tf.reduce_mean(images, axis=[1, 2], keep_dims=True)\n stddevs = tf.sqrt(tf.reduce_mean(tf.square(images - means), axis=[1, 2], keep_dims=True))\n results = (images - means) / (stddevs + epsilon)\n with tf.variable_scope(None, default_name=name):\n biases = tf.get_variable('biases', shape=images.get_shape()[-1].value, dtype=images.dtype,\n initializer=tf.zeros_initializer())\n scales = tf.get_variable('scales', shape=images.get_shape()[-1].value, dtype=images.dtype,\n initializer=tf.ones_initializer())\n return results*scales + biases",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def convert_group_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n num_groups = op.attr(\"groups\")\n epsilon = op.attr(\"epsilon\")\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n out = _op.nn.group_norm(\n x,\n gamma=gamma,\n beta=beta,\n num_groups=num_groups,\n axis=1,\n epsilon=epsilon,\n center=True,\n scale=True,\n )\n g.add_node(op.output(\"Y\")[0], out)",
"def _create_elu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped",
"def _create_batchnorm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n factor = onnx_node.getattr('momentum', 0.9)\n if x.device.id() == -1:\n handle = singa.BatchNormHandle(factor, x.data)\n else:\n handle = singa.CudnnBatchNormHandle(factor, x.data)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return handle, forward",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def get_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # declare the dict of class instance\n # where we'll store the normalization parameter\n self.param_norm = {'features': {}, 'targets': {}}\n for feat_type, feat_names in self.select_feature.items():\n self.param_norm['features'][feat_type] = {}\n for name in feat_names:\n self.param_norm['features'][feat_type][name] = NormParam(\n )\n self.param_norm['targets'][self.select_target] = MinMaxParam()\n\n # read the normalization\n self._read_norm()\n\n # make array for fast access\n self.feature_mean, self.feature_std = [], []\n for feat_type, feat_names in self.select_feature.items():\n for name in feat_names:\n self.feature_mean.append(\n self.param_norm['features'][feat_type][name].mean)\n self.feature_std.append(\n self.param_norm['features'][feat_type][name].std)\n\n self.target_min = self.param_norm['targets'][self.select_target].min\n self.target_max = self.param_norm['targets'][self.select_target].max",
"def polyNormalizeUV(*args, centerOnTile: bool=True, normalizeDirection: Union[int, bool]=0,\n normalizeType: Union[int, bool]=0, preserveAspectRatio: bool=True, caching:\n bool=True, constructionHistory: bool=True, createNewMap: bool=True,\n insertBeforeDeformers: bool=True, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, uvSetName: AnyStr=\"\", worldSpace: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node"
] | [
"0.68201405",
"0.6210069",
"0.6158336",
"0.5857216",
"0.5686284",
"0.56264687",
"0.55710334",
"0.55510676",
"0.54553306",
"0.5442757",
"0.54066503",
"0.53916603",
"0.53645897",
"0.536249",
"0.5269938",
"0.5257382",
"0.52224195",
"0.51939166",
"0.5191482",
"0.51271725",
"0.5073918",
"0.5026502",
"0.5024269",
"0.49813375",
"0.4980995",
"0.49624044",
"0.49612302",
"0.4939744",
"0.48886734",
"0.48872906"
] | 0.74606353 | 0 |
Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators based on the input node's attributes and return the created node. | def convert_leakyrelu(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
initializer = kwargs["initializer"]
act_type = attrs.get("act_type", "leaky")
alpha = float(attrs.get("slope", 0.25))
act_name = {"elu": "Elu", "leaky": "LeakyRelu", "prelu": "PRelu",
"selu": "Selu"}
reshape_val_name = 'reshape' + str(kwargs["idx"])
input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
reshape_value = np.array([1, -1, 1, 1], dtype='int64')
dims = np.shape(reshape_value)
shape_node = onnx.helper.make_tensor_value_info(reshape_val_name, input_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=reshape_val_name,
data_type=input_type,
dims=dims,
vals=reshape_value,
raw=False,
)
)
slope_op_name = 'slope' + str(kwargs["idx"])
lr_node = []
if act_type == "prelu" or act_type == "selu":
reshape_slope_node = onnx.helper.make_node(
'Reshape',
inputs=[input_nodes[1], reshape_val_name],
outputs=[slope_op_name],
name=slope_op_name
)
node = onnx.helper.make_node(
act_name[act_type],
inputs=[input_nodes[0], slope_op_name],
outputs=[name],
name=name)
lr_node.append(shape_node)
lr_node.append(reshape_slope_node)
lr_node.append(node)
else:
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name,
alpha=alpha)
lr_node.append(node)
return lr_node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_leakyrelu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.01)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def convert_relu(node, **kwargs):\n return create_basic_op_node('Relu', node, kwargs)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def convert_leaky_relu(g, op, block):\n\n alpha = op.attr(\"alpha\")\n x = g.get_node(op.input(\"X\")[0])\n out = _op.nn.leaky_relu(x, alpha=alpha)\n g.add_node(op.output(\"Out\")[0], out)",
"def _convert_to_leaky_relu_action(graph: Graph, matches: dict):\n mul_op = matches['mul_op']\n mul_value_data = matches['const_data']\n mul_data = matches['mul_data']\n input_data = matches['data']\n max_op = matches['max_op']\n max_data = max_op.out_node()\n\n # Check that all nodes satisfies conversion requirements\n if len(max_op.in_nodes()) > 2:\n log.debug('Maximum layer ({}) can not participate in conversion to leaky ReLU due to it has more than two '\n 'inputs ({})'.format(max_op.id, len(max_op.in_nodes())))\n return\n\n if mul_value_data.has_valid('value') and mul_value_data.value.size != 1:\n log.debug('Mul layer ({}) can not participate in conversion to leaky ReLU due to value {}'\n ''.format(mul_op.id, mul_value_data.soft_get('value')))\n return\n\n value = mul_value_data.value.item(0)\n\n if len(mul_data.out_nodes()) > 1:\n log.debug('Mul layer({}) can not participate in conversion to leaky ReLU due to it has more than one consumer'\n ''.format(mul_op.id))\n return\n\n # Disconnect data nodes from ops\n graph.remove_edge(max_op.id, max_data.id)\n graph.remove_edge(input_data.id, mul_op.id)\n graph.remove_edge(input_data.id, max_op.id)\n\n # Create new ReLU operation\n relu_op = LeakyReLU(graph, dict(name=\"LeakyReLU_\", negative_slope=value))\n relu_op.create_node_with_data(inputs=[input_data], data_nodes=max_data)\n\n log.debug('Successful conversion from {} {} to ReLU with negative slope (leaky ReLU)'\n ''.format(max_op.id, mul_op.id))",
"def convert_elu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n alpha = op.attr(\"alpha\")\n alpha = _expr.const(-1.0 * alpha, dtype=dtype)\n out = alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(x)) + _op.nn.relu(x)\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_elu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def test_leaky_relu(self):\n activation_name = 'LeakyReLU'\n args = {}\n\n activation = activation_factory.create(activation_name, **args)\n self.assertEqual(activation._get_name(), activation_name)",
"def convert_lrn(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n alpha = float(attrs.get(\"alpha\", 0.0001))\n beta = float(attrs.get(\"beta\", 0.75))\n bias = float(attrs.get(\"knorm\", 1.0))\n size = int(attrs.get(\"nsize\"))\n\n lrn_node = onnx.helper.make_node(\n \"LRN\",\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=size\n )\n\n return [lrn_node]",
"def leaky_relu(input, negative_slope=0.01, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=float(negative_slope))",
"def test_leaky_relu_activation(self):\n self.assertEqual(\n [-0.0050, 0.5000], list(af.LeakyRelu(0.01).output(np.array([-0.5, 0.5]))))\n self.assertEqual([0.01, 1], list(af.LeakyRelu(\n 0.01).derivative(np.array([-0.5, 0.5]))))",
"def lrelu(self):\n return self.add_layer(lrelu)",
"def create_relu(x, bias):\n\n return tf.nn.relu(tf.nn.bias_add(x, bias))",
"def convert_relu6(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.clip(x, 0.0, 6.0)\n g.add_node(op.output(\"Out\")[0], out)",
"def leaky_relu(x, slope=0.2):\n return LeakyReLU(slope)(x)",
"def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsupported axis {attrs.get_int('axis')} in operator relay lrn operator. \"\n f\"Only axis = 1 is supported by Onnx.\"\n )\n\n return {\"alpha\": attrs.alpha, \"beta\": attrs.beta, \"bias\": attrs.bias, \"size\": attrs.size}",
"def relu(x, name):\n\n with tf.name_scope(name):\n outputs = tf.nn.relu(x)\n # Return layer's output\n return outputs",
"def leaky_relu(features, alpha=0.2, name=None):\n with ops.name_scope(name, \"LeakyRelu\", [features, alpha]) as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n if features.dtype.is_integer:\n features = math_ops.cast(features, dtypes.float32)\n if isinstance(alpha, np.ndarray):\n alpha = alpha.item()\n return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)",
"def test_relu(self):\n activation_name = 'ReLU'\n args = {}\n\n activation = activation_factory.create(activation_name, **args)\n self.assertEqual(activation._get_name(), activation_name)\n\n x = torch.ones(10) * -1\n y = activation(x)\n self.assertEqual(len(torch.nonzero(y, as_tuple=False)), 0)",
"def leaky_relu(x, alpha, name=None):\n import tensorflow as tf\n with tf.name_scope(name, 'leaky_relu_{}'.format(alpha)):\n return tf.nn.relu(x) - alpha * tf.nn.relu(-x)",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def _bn_relu(x, bn_name=None, relu_name=None):\n norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x)\n return Activation(\"relu\", name=relu_name)(norm)",
"def init_leaky_relu(m, a=None):\n if not isinstance(m, torch.nn.Conv2d):\n return\n if a is None:\n a = nn.modules.activation.LeakyReLU().negative_slope\n nn.init.kaiming_uniform_(m.weight, a=a)",
"def _relu(layer):\n return tf.nn.relu(layer)",
"def __init__(self, id, node_type=NodeType.HIDDEN, activation=F.relu, layer_type=nn.Conv2d,\n conv_window_size=3, conv_stride=1, max_pool_size=2):\n\n super(ModuleNEATNode, self).__init__(id, node_type)\n\n batch_norm_chance = 0.65 # chance that a new node will start with batch norm\n use_batch_norm = random.random() < batch_norm_chance\n\n dropout_chance = 0.2 # chance that a new node will start with drop out\n use_dropout = random.random() < dropout_chance\n\n max_pool_chance = 0.3 # chance that a new node will start with drop out\n use_max_pool = random.random() < max_pool_chance\n\n self.activation = Mutagen(F.relu, F.leaky_relu, torch.sigmoid, F.relu6,\n discreet_value=activation, name=\"activation function\",\n mutation_chance=0.15) # TODO try add in Selu, Elu\n\n conv_out_features = 25 + random.randint(0, 25)\n linear_out_features = 100 + random.randint(0, 100)\n\n linear_submutagens = \\\n {\n \"regularisation\": Mutagen(None, nn.BatchNorm1d,\n discreet_value=nn.BatchNorm1d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout, discreet_value=nn.Dropout if use_dropout else None, sub_mutagens=\n {\n nn.Dropout: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.15, start_range=0,\n end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=linear_out_features,\n start_range=10,\n end_range=1024, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n conv_submutagens = {\n \"conv_window_size\": Mutagen(3, 5, 7, discreet_value=conv_window_size, mutation_chance=0.13),\n\n \"conv_stride\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_stride, start_range=1,\n end_range=5),\n\n \"reduction\": Mutagen(None, nn.MaxPool2d, discreet_value=nn.MaxPool2d if use_max_pool else None,\n sub_mutagens=\n {\n nn.MaxPool2d: {\"pool_size\": Mutagen(\n value_type=ValueType.WHOLE_NUMBERS, current_value=max_pool_size, start_range=2,\n end_range=5)}\n }, mutation_chance=0.15),\n\n \"regularisation\": Mutagen(None, nn.BatchNorm2d, discreet_value=nn.BatchNorm2d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout2d, discreet_value=nn.Dropout2d if use_dropout else None, sub_mutagens=\n {\n nn.Dropout2d: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.1,\n start_range=0, end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_out_features, start_range=1,\n end_range=100, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n if use_linears and not use_convs:\n self.layer_type = Mutagen(nn.Linear, discreet_value=nn.Linear,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Linear: linear_submutagens}\n )\n if use_convs and not use_linears:\n self.layer_type = Mutagen(nn.Conv2d, discreet_value=nn.Conv2d,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Conv2d: conv_submutagens})\n if use_convs and use_linears:\n self.layer_type = Mutagen(nn.Conv2d, nn.Linear, discreet_value=layer_type,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={\n nn.Conv2d: conv_submutagens,\n nn.Linear: linear_submutagens\n }, name=\"deep layer type\", mutation_chance=0.08)",
"def convert_thresholded_relu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n threshold = op.attr(\"threshold\")\n threshold = _expr.const(threshold, dtype)\n zero = _expr.const(0, dtype=dtype)\n out = tvm.relay.where(x > threshold, x, zero)\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_gemm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n alpha = onnx_node.getattr('alpha', 1.)\n beta = onnx_node.getattr('beta', 1.)\n transA = onnx_node.getattr('transA', 0)\n transB = onnx_node.getattr('transB', 0)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(alpha=alpha,\n beta=beta,\n transA=transA,\n transB=transB)",
"def test_relu_activation(self):\n self.assertEqual([0, 0.5], list(\n af.Relu().output(np.array([-0.5, 0.5]))))\n self.assertEqual([0, 1], list(\n af.Relu().derivative(np.array([-0.5, 0.5]))))",
"def _bn_relu(input):\n\tnorm = BatchNormalization(axis=3)(input)\n\treturn Activation(\"relu\")(norm)"
] | [
"0.6645275",
"0.64907336",
"0.64269954",
"0.6418857",
"0.6116449",
"0.59967774",
"0.5878855",
"0.5858891",
"0.5726611",
"0.567721",
"0.5667963",
"0.56440437",
"0.55542094",
"0.5381494",
"0.5375752",
"0.5368928",
"0.535867",
"0.5342397",
"0.5317905",
"0.5286128",
"0.5262697",
"0.5247314",
"0.5247211",
"0.52410185",
"0.5218378",
"0.51830333",
"0.51575506",
"0.5155447",
"0.51345444",
"0.5119535"
] | 0.67764103 | 0 |
Map MXNet's RNN operator attributes to onnx's RNN operator and return the created node. | def convert_RNN(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
nodes = []
# ============================== Attributes ==============================
mode = attrs['mode'].upper()
rnn_kwargs = {}
if mode != 'LSTM':
raise NotImplementedError(
"Only LSTM mode RNN conversion to ONNX is currently supported."
)
hidden_size = rnn_kwargs['hidden_size'] = int(attrs.get("state_size"))
if eval(attrs.get('bidirectional', 'False')):
rnn_kwargs['direction'] = 'bidirectional'
num_directions = 2
else:
rnn_kwargs['direction'] = 'forward'
num_directions = 1
clip_min = eval(attrs.get('lstm_state_clip_min', 'None'))
clip_max = eval(attrs.get('lstm_state_clip_max', 'None'))
if clip_min is not None or clip_max is not None:
# ONNX LSTMs have the `clip` attribute, however it seems to give
# slightly different results, when compared to the MXNet equivalent
raise NotImplementedError(
"Conversion of RNNs with lstm_state_clip_min/max "
"to ONNX is currently not supported."
)
if eval(attrs.get('lstm_state_clip_nan', 'False')):
raise NotImplementedError(
"ONNX RNN operator doesn't support lstm_state_clip_nan"
)
if eval(attrs.get('use_sequence_length', 'False')):
# This can maybe be implemented using the `sequence_len` optional input
raise NotImplementedError(
"Conversion of RNNs with variable input sequence length "
"to ONNX is currently not supported."
)
if eval(attrs.get('num_layers', '1')) != 1:
raise NotImplementedError(
"Conversion of RNNs with num_layers > 1 "
"to ONNX is currently not supported."
)
if eval(attrs.get('p', '0')) != 0:
# WARNING! The `p` attribute in mxnet is "dropout probability" while
# the `p` optional input of ONNX LSTMs is the peephole weights tensor.
raise NotImplementedError(
"Conversion of RNNs with dropout "
"to ONNX is currently not supported."
)
if eval(attrs.get('projection_size', 'None')) is not None:
raise NotImplementedError(
"Conversion of RNNs with custom projection_size "
"to ONNX is currently not supported."
)
if not eval(attrs.get('state_outputs', 'True')):
raise NotImplementedError(
"Conversion of RNNs with state_outputs=False "
"to ONNX is currently not supported."
)
# ============================== Parameters ==============================
# (See _rnn_param_concat for part 1 of this comment section)
# Unfortunately, mxnets version of _rnn_param_concat concatenates *ALL*
# the parameters, instead of grouping them like ONNX. The workaround,
# used here, is that the _rnn_param_concat node conversion code will
# produce multiple nodes with names ending in rnn_param_concatN__P
# (Where P is the parameter group name W, R or B)
# We then use regular expressions to get the "extra outputs" of the
# _rnn_param_concat node.
x, param_concat, *initial_states = input_nodes
param_pattern = re.compile(r'(.*rnn_param_concat[0-9]+__)[WRB]$')
if not param_pattern.match(param_concat):
# ToDo: Maybe do something more sane after Issue #17621 gets resolved
raise NotImplementedError(
"The order of RNN parameters is different between mxnet and ONNX. "
"Currently, an automatic conversion is only possible, if the RNN "
"parameters were concatenated using the internal "
"_rnn_param_concat operator."
)
w, r, b = (
param_pattern.sub(r'\1' + param, param_concat)
for param in 'WRB'
)
# The second conversion step handles
# * parameter shapes, since mxnet uses flattened parameters, while
# ONNX requires specific tensor shapes
# * gate order, since both frameworks require the weights and biases
# of the 4 basic gates (forget, input, cell and output) to be
# concatenated, but in different order
# ([ifco] for mxnet and [iofc] for ONNX)
def fix_rnn_parameter(p, p_shape_in, p_shape_out, p_order=(0, 3, 1, 2)):
p_ = p
# 1) Reshape flat parameters to their original shape, such that
# the gates are concatenated along axis=1
p_reshaped_in = create_helper_reshape_node(
p, p_ + "__reshaped_in", p_shape_in, kwargs
)
nodes.extend(p_reshaped_in)
p = p_reshaped_in[-1].name
# 2) Use a Gather node to pick gates along axis=1, permuting them
p_reordered = create_helper_gather_node(
p, p_ + "__reordered", p_order, kwargs, axis=1
)
nodes.extend(p_reordered)
p = p_reordered[-1].name
# 3) Reshape the parameters to their final shape, squeezing the gate
# and hidden dimensions together
p_reshaped_out = create_helper_reshape_node(
p, p_ + "__reshaped_out", p_shape_out, kwargs
)
nodes.extend(p_reshaped_out)
return p_reshaped_out[-1].name
w = fix_rnn_parameter(
w,
p_shape_in=(num_directions, 4, hidden_size, -1),
p_shape_out=(num_directions, 4 * hidden_size, -1),
)
r = fix_rnn_parameter(
r,
p_shape_in=(num_directions, 4, hidden_size, hidden_size),
p_shape_out=(num_directions, 4 * hidden_size, hidden_size),
)
b = fix_rnn_parameter(
b,
p_shape_in=(2 * num_directions, 4, hidden_size),
p_shape_out=(num_directions, 8 * hidden_size),
)
# ============================= Inputs/States ============================
input_shape = create_helper_shape_node(x, x + "__shape")
nodes.extend(input_shape)
input_shape = input_shape[-1].name
batch_size = create_helper_gather_node(
input_shape,
x + "__batch_size",
indices=[1],
axis=0,
kwargs=kwargs,
)
nodes.extend(batch_size)
batch_size = batch_size[-1].name
state_shape = create_helper_build_values_node(
[num_directions, batch_size, hidden_size],
name + "__state_shape",
dtype=np.int64,
kwargs=kwargs,
)
nodes.extend(state_shape)
state_shape = state_shape[-1].name
expanded_states = []
for state in initial_states:
expanded_state = create_helper_expand_node(
state, state + "__expanded", state_shape
)
nodes.extend(expanded_state)
expanded_states.append(expanded_state[-1].name)
initial_states = expanded_states
# =========================== RNN node/outputs ===========================
y_out = [onnx.helper.make_node(
mode, # RNN or LSTM or GRU
inputs=[x, w, r, b, '', *initial_states],
outputs=[name + '__Y'],
name=name + '__Y',
**rnn_kwargs
)]
nodes.extend(y_out)
y = y_out[-1].name
# We are almost done. The only thing left to do is to convert the output
# of the RNN node from the [S, D, B, H] layout, which ONNX returns
# to the [S, B, D*H] layout, which mxnet uses
# 1) Transpose [S, D, B, H] -> [S, B, D, H]
y_perm = (0, 2, 1, 3)
y_transposed = create_helper_trans_node(
y, y + "__transposed", y_perm
)
nodes.extend(y_transposed)
y = y_transposed[-1].name
# 2) Reshape [S, B, D, H] -> [S, B, D*H]
y_shape = (0, 0, -1)
y_reshaped = create_helper_reshape_node(y, name, y_shape, kwargs)
nodes.extend(y_reshaped)
return nodes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def createRotoPaintNodeMI():\n return gr()",
"def convert_lrn(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n alpha = float(attrs.get(\"alpha\", 0.0001))\n beta = float(attrs.get(\"beta\", 0.75))\n bias = float(attrs.get(\"knorm\", 1.0))\n size = int(attrs.get(\"nsize\"))\n\n lrn_node = onnx.helper.make_node(\n \"LRN\",\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=size\n )\n\n return [lrn_node]",
"def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name",
"def convert_rnn_param_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n axis = int(attrs.get(\"dim\"))\n\n # mxnet RNN node and ONNX RNN/LSTM/GRU nodes\n # use different ways to store their parameters\n\n # The conversion between these formats is broken into 2 steps\n # The first step (performed here in _rnn_param_concat) regroups the\n # flattened parameters according to the table below.\n # The second step corrects the shapes and orders of gates and is\n # performed and described in more detail in the RNN node\n\n # mxnet [ONNX] -> ONNX (group)\n # i2h_weights [W (+ WB)] -> W (input weights)\n # h2h_weights [R (+ RB)] -> R (recurrence weights)\n # i2h_biases [Wb (+ WBb)] -> B = [Wb + Rb (+ WBb + RBb)]\n # h2h_biases [Rb (+ RBb)] -> (biases)\n\n split = len(input_nodes) // 2\n weights, biases = input_nodes[:split], input_nodes[split:]\n i2h_weights = weights[::2]\n h2h_weights = weights[1::2]\n i2h_biases = biases[::2]\n h2h_biases = biases[1::2]\n reordered_biases = [\n bias\n for pair in zip(i2h_biases, h2h_biases)\n for bias in pair\n ]\n\n # The order of mxnet parameters in the inputs is:\n # [\n # '{}{}_{}_{}'.format(d, l, g, t)\n # for t in ['weight', 'bias']\n # for l in range(num_layers)\n # for d in ['l', 'r'][:num_directions]\n # for g in ['i2h', 'h2h']\n # ]\n\n w = onnx.helper.make_node(\n \"Concat\",\n inputs=i2h_weights,\n outputs=[name + \"__W\"],\n axis=axis,\n name=name + \"__W\"\n )\n r = onnx.helper.make_node(\n \"Concat\",\n inputs=h2h_weights,\n outputs=[name + \"__R\"],\n axis=axis,\n name=name + \"__R\"\n )\n b = onnx.helper.make_node(\n \"Concat\",\n inputs=reordered_biases,\n outputs=[name + \"__B\"],\n axis=axis,\n name=name + \"__B\"\n )\n return [w, r, b]",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def _create_custom_op_trainable_onnx_model():\n onnx_model = onnx.load(os.path.join(\"testdata\", \"custom_op_library\", \"custom_op_test.onnx\"))\n onnx_model.graph.value_info.append(\n onnx.helper.make_tensor_value_info(\"output_1\", onnx.TensorProto.FLOAT, [3, 5])\n )\n\n class CustomOpBlockWithLinear(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.linear = onnxblock.blocks.Linear(5, 10)\n\n def build(self, linear_input):\n return self.linear(linear_input)\n\n custom_op_block = CustomOpBlockWithLinear()\n with onnxblock.base(onnx_model) as model_accessor:\n model_accessor.model.opset_import.append(onnx.helper.make_opsetid(\"test.customop\", 1))\n model_accessor.model.opset_import.append(onnx.helper.make_opsetid(\"\", 14))\n model_accessor.model.ir_version = 7\n _ = custom_op_block(\"output_1\")\n\n return custom_op_block.to_model_proto()",
"def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def build_rpn_model(anchor_stride, anchors_per_location, depth, verbose = 0):\n print('\\n>>> RPN Layer ')\n \n input_feature_map = KL.Input(shape=[None, None, depth], name=\"input_rpn_feature_map\")\n \n if verbose:\n print(' Input_feature_map shape :', input_feature_map.shape)\n print(' anchors_per_location :', anchors_per_location)\n print(' depth :', depth)\n \n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n\n if verbose:\n print(' Input_feature_map shape :', input_feature_map.shape)\n print(' anchors_per_location :', anchors_per_location)\n print(' anchor_stride :', anchor_stride)\n \n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def map_output_and_node(cls, onnx_model: onnx.ModelProto):\n output2node = dict()\n for node in onnx_model.graph.node:\n for output_name in node.output:\n output2node[output_name] = node\n return output2node",
"def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper",
"def build_rpn_model(anchor_stride, anchors_per_location, depth):\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())"
] | [
"0.573704",
"0.5696308",
"0.5613529",
"0.56037074",
"0.55654407",
"0.5430018",
"0.54215723",
"0.5411738",
"0.540513",
"0.5397167",
"0.5390512",
"0.5343584",
"0.53244823",
"0.53124344",
"0.5312318",
"0.5296085",
"0.5295705",
"0.52775544",
"0.5250277",
"0.52421296",
"0.52008814",
"0.5182023",
"0.51716125",
"0.515662",
"0.51547945",
"0.5131349",
"0.51083606",
"0.51065147",
"0.50776124",
"0.50739676"
] | 0.6088374 | 0 |
Map MXNet's _rnn_param_concat operator attributes to onnx's Concat operator and return the created node. | def convert_rnn_param_concat(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("dim"))
# mxnet RNN node and ONNX RNN/LSTM/GRU nodes
# use different ways to store their parameters
# The conversion between these formats is broken into 2 steps
# The first step (performed here in _rnn_param_concat) regroups the
# flattened parameters according to the table below.
# The second step corrects the shapes and orders of gates and is
# performed and described in more detail in the RNN node
# mxnet [ONNX] -> ONNX (group)
# i2h_weights [W (+ WB)] -> W (input weights)
# h2h_weights [R (+ RB)] -> R (recurrence weights)
# i2h_biases [Wb (+ WBb)] -> B = [Wb + Rb (+ WBb + RBb)]
# h2h_biases [Rb (+ RBb)] -> (biases)
split = len(input_nodes) // 2
weights, biases = input_nodes[:split], input_nodes[split:]
i2h_weights = weights[::2]
h2h_weights = weights[1::2]
i2h_biases = biases[::2]
h2h_biases = biases[1::2]
reordered_biases = [
bias
for pair in zip(i2h_biases, h2h_biases)
for bias in pair
]
# The order of mxnet parameters in the inputs is:
# [
# '{}{}_{}_{}'.format(d, l, g, t)
# for t in ['weight', 'bias']
# for l in range(num_layers)
# for d in ['l', 'r'][:num_directions]
# for g in ['i2h', 'h2h']
# ]
w = onnx.helper.make_node(
"Concat",
inputs=i2h_weights,
outputs=[name + "__W"],
axis=axis,
name=name + "__W"
)
r = onnx.helper.make_node(
"Concat",
inputs=h2h_weights,
outputs=[name + "__R"],
axis=axis,
name=name + "__R"
)
b = onnx.helper.make_node(
"Concat",
inputs=reordered_biases,
outputs=[name + "__B"],
axis=axis,
name=name + "__B"
)
return [w, r, b] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"dim\", 1))\n concat_node = onnx.helper.make_node(\n \"Concat\",\n input_nodes,\n [name],\n axis=axis,\n name=name\n )\n return [concat_node]",
"def create_helper_concat_node(inputs, output_name, axis=0):\n concat_node = onnx.helper.make_node(\n \"Concat\",\n inputs=inputs,\n outputs=[output_name],\n name=output_name,\n axis=axis,\n )\n return [concat_node]",
"def _create_concat(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.attrs[\"axis\"]\n if factor < 0:\n factor = len(inputs[0].shape\n ) + factor # in order to support the negative axis\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def concatenate(vars, axis=-1):\n from deepy.core.neural_var import NeuralVariable\n if isinstance(vars[0], NeuralVariable):\n concat_var = Concatenate(axis=axis).compute(*vars)\n if axis == -1 or axis == vars[0].tensor.ndim - 1:\n concat_var.output_dim = sum([x.output_dim for x in vars], 0)\n else:\n concat_var = TT.concatenate(vars, axis)\n return concat_var",
"def ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def concat_model():\n x = tf.keras.Input(shape=[10, 10, 3, ])\n x1 = tf.keras.layers.Conv2D(5, (2, 2))(x)\n x2 = tf.keras.layers.Conv2D(6, (2, 2))(x)\n x3 = tf.keras.layers.Conv2D(7, (2, 2))(x)\n z = tf.keras.layers.concatenate([x2, x1, x3], axis=-1)\n z1 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z2 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z = tf.add(z1, z2)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"concat_model\")(z)\n return output",
"def join_vars(self, xs):\n return tf.concat(1, xs)",
"def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)",
"def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)",
"def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())",
"def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def concat_pattern():\n pattern = is_tuple(None)\n pattern = is_op(\"concatenate\")(pattern)\n\n return pattern",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def create_split_concat_net_const(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n import numpy as np\n\n concat_axis = 0\n concat_output_shape = input_shape.copy()\n concat_output_shape[concat_axis] *= 2\n\n const_number = np.prod(input_shape)\n constant = np.random.randint(-127, 127, const_number).astype(np.float)\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_dyn_concat', TensorProto.FLOAT, concat_output_shape)\n\n node_const_def = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=input_shape,\n vals=constant,\n ),\n )\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['const1'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n node_dyn_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=['input', 'output_concat'],\n outputs=['output_dyn_concat'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_split_def, node_concat_def, node_dyn_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net",
"def _rewrite_concat(self, node: saldag.Concat):\n\n if node.requires_mpc():\n node.is_mpc = True\n if len(node.children) > 1 and node.is_boundary():\n fork_node(node)",
"def concat(a, b):\n return torch.cat((a, b), 1)",
"def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)",
"def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)",
"def concat(vars, axis=-1):\n return concatenate(vars, axis)",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def _create_gather(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n node.input.append(op.name + \":indices\")\n return node",
"def conv_cond_concat(x, y):\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat(axis=3, values=[x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])",
"def anchor():\n return 'concat'",
"def conv_cond_concat(x, y):\n\n # Unfinished -- but not needed??\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat(4, [x , y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2] , y_shapes[3]])])"
] | [
"0.73427945",
"0.7049503",
"0.67380154",
"0.6468759",
"0.6443857",
"0.60110635",
"0.56656677",
"0.5663238",
"0.5535425",
"0.5531253",
"0.54857755",
"0.5458668",
"0.537242",
"0.531605",
"0.5313958",
"0.5275782",
"0.5205251",
"0.5205095",
"0.51890177",
"0.5185862",
"0.5184087",
"0.5184087",
"0.51741",
"0.5148766",
"0.51464295",
"0.51028526",
"0.50875086",
"0.50513035",
"0.5037368",
"0.49793765"
] | 0.7519251 | 0 |
Map MXNet's transpose operator attributes to onnx's Transpose operator and return the created node. | def convert_transpose(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
axes = attrs.get("axes", ())
if axes:
axes = tuple(map(int, re.findall(r'\d+', axes)))
transpose_node = onnx.helper.make_node(
"Transpose",
input_nodes,
[name],
perm=axes,
name=name
)
else:
transpose_node = onnx.helper.make_node(
"Transpose",
input_nodes,
[name],
name=name
)
return [transpose_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node",
"def _create_transpose(cls, onnx_node, inputs, opset_version):\n shape = inputs[0].shape\n perm = onnx_node.getattr(\"perm\", list(range(len(shape) - 1, -1, -1)))\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(perm)",
"def create_helper_trans_node(input_name, output_name, perm=None):\n attrs = {}\n if perm is not None:\n attrs['perm'] = perm\n trans_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_name],\n outputs=[output_name],\n name=output_name,\n **attrs\n )\n return [trans_node]",
"def convert_transpose(g, op, block):\n\n perm = op.attr(\"axis\")\n out = _op.transpose(g.get_node(op.input(\"X\")[0]), axes=perm)\n g.add_node(op.output(\"Out\")[0], out)",
"def T(self):\n return Op('transpose', self)",
"def transpose_op(node_A, perm=None):\n return TransposeOp()(node_A, perm)",
"def _make_major_transpose_nodes(inputs, scope, node_dict, prev_node, post):\n input_shape = node_dict[inputs[0]].attr[\"_output_shapes\"][0]\n input_rank = len(input_shape)\n\n perm_node = TensorflowNode(\n op_type=\"Const\",\n name=\"/\".join([scope, \"transpose\", \"perm\",\n get_unique_suffix()]),\n attr={\n \"value\": np.asarray([1, 0] + list(range(input_rank))[2:], np.int32),\n \"dtype\": data_type.tf2onnx(tf.int32),\n \"_output_shapes\": [input_rank]\n })\n\n if post:\n input_shape = [input_shape[i] for i in perm_node.attr[\"value\"]]\n prev_node.attr[\"_output_shapes\"] = [input_shape]\n\n trans_node = TensorflowNode(\n op_type=\"Transpose\",\n name=\"/\".join([scope, \"transpose\",\n get_unique_suffix()]),\n inputs=[inputs[0] if not post else prev_node.name, perm_node.name],\n attr={\n \"dtype\": data_type.tf2onnx(node_dict[inputs[0]].attr[\"T\"]),\n \"_output_shapes\":\n [[input_shape[i] for i in perm_node.attr[\"value\"]]]\n })\n return [perm_node, trans_node]",
"def transpose(self) -> None:\n ...",
"def T(self):\n if self._transpose_ops is None:\n self._transpose_ops = self._transpose()\n if not isinstance(self._transpose_ops, NNOp):\n raise ValueError(\"The _transposed method must return NNOp.\"\n \"but the returned object has type=%s\" %\n str(type(self._transpose_ops)))\n return self._transpose_ops",
"def transpose(self):\n return self._transpose",
"def Transpose(self):\n return _hypre.HypreParMatrix_Transpose(self)",
"def transpose(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n shape = (self._shape[1], self._shape[0])\n\n rdd = rdd.map(\n lambda m: (m[1], m[0], m[2])\n )\n\n return Matrix(rdd, shape,\n dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)",
"def getTransposeMatrix(self) -> CMatrix4:\n ...",
"def transpose(self):\n pass",
"def convert_dot(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n input_node_a = input_nodes[0]\n input_node_b = input_nodes[1]\n\n trans_a_node = None\n trans_b_node = None\n\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if trans_a:\n input_node_a = op_name + \"_a\"\n trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a)\n if trans_b:\n input_node_b = op_name + \"_b\"\n trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b)\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_node_a, input_node_b],\n outputs=[name],\n name=name\n )\n\n if not trans_a and not trans_b:\n return [matmul_node]\n elif trans_a and not trans_b:\n return [trans_a_node, matmul_node]\n elif trans_b and not trans_a:\n return [trans_b_node, matmul_node]\n else:\n return [trans_a_node, trans_b_node, matmul_node]",
"def transpose(tensor):\n raise NotImplementedError",
"def transpose(self):\n return self.from_rep(self.rep.transpose())",
"def transpose(self):\n return self._new(self.rep.transpose(), (self.cols, self.rows), self.domain)",
"def transpose(self, transposition):\r\n top_node = self.top_node\r\n self.top_node = self.transpose_helper(top_node, transposition)\r\n int(self.top_node.split('|')[0][1::].strip('()').split(', ')[0])\r\n self.run_clean_up()",
"def add_transpose(self, input_name, axes, name=None):\n return self._build_op('Transpose', [input_name, axes], name=name)",
"def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]",
"def transpose(self):\n trans = Matrix(self.ncols,self.nrows)\n for i in range(self.nrows):\n for j in range(self.ncols):\n trans.matrix[j][i] = self.matrix[i][j]\n return trans",
"def transpose():",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def transpose(self):\n data = np.transpose(self._data)\n return self.create(self.cols, self.rows, data)",
"def transpose(m):\n\n pass",
"def transpose(self):\n return Matrix([[self.data[r][c] for r in range(len(self.data))]\n for c in range(len(self.data[1]))])",
"def transpose(self, *args, **kwargs):\n return _image.image_transpose(self, *args, **kwargs)",
"def transpose(self):\n transposed_data = []\n for i in range(1, self.columns + 1):\n transposed_data.extend(self.column(i))\n\n return Matrix(rows = self.columns, columns = self.rows, data = transposed_data)",
"def transpose(self):\n data = [list(col) for col in zip(*self.data)]\n return self.__class__(self.n, self.m, data)"
] | [
"0.78956044",
"0.7164064",
"0.69711393",
"0.69439864",
"0.6919916",
"0.6686743",
"0.6489473",
"0.63730687",
"0.6343207",
"0.61527866",
"0.61493826",
"0.6142635",
"0.6139026",
"0.6132935",
"0.60538316",
"0.60287285",
"0.5990234",
"0.59900707",
"0.59613836",
"0.59398377",
"0.5923623",
"0.5914782",
"0.5907859",
"0.58956295",
"0.5890522",
"0.5850022",
"0.58499527",
"0.58423674",
"0.5825993",
"0.5825927"
] | 0.74619085 | 1 |
Map MXNet's LRN operator attributes to onnx's LRN operator and return the created node. | def convert_lrn(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
alpha = float(attrs.get("alpha", 0.0001))
beta = float(attrs.get("beta", 0.75))
bias = float(attrs.get("knorm", 1.0))
size = int(attrs.get("nsize"))
lrn_node = onnx.helper.make_node(
"LRN",
inputs=input_nodes,
outputs=[name],
name=name,
alpha=alpha,
beta=beta,
bias=bias,
size=size
)
return [lrn_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calcLnLFromNode(self, nd):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.calcLnLFromNode(self, nd)",
"def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsupported axis {attrs.get_int('axis')} in operator relay lrn operator. \"\n f\"Only axis = 1 is supported by Onnx.\"\n )\n\n return {\"alpha\": attrs.alpha, \"beta\": attrs.beta, \"bias\": attrs.bias, \"size\": attrs.size}",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def convert_relu(node, **kwargs):\n return create_basic_op_node('Relu', node, kwargs)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def convert_RNN(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n nodes = []\n\n # ============================== Attributes ==============================\n mode = attrs['mode'].upper()\n rnn_kwargs = {}\n if mode != 'LSTM':\n raise NotImplementedError(\n \"Only LSTM mode RNN conversion to ONNX is currently supported.\"\n )\n\n hidden_size = rnn_kwargs['hidden_size'] = int(attrs.get(\"state_size\"))\n if eval(attrs.get('bidirectional', 'False')):\n rnn_kwargs['direction'] = 'bidirectional'\n num_directions = 2\n else:\n rnn_kwargs['direction'] = 'forward'\n num_directions = 1\n\n clip_min = eval(attrs.get('lstm_state_clip_min', 'None'))\n clip_max = eval(attrs.get('lstm_state_clip_max', 'None'))\n if clip_min is not None or clip_max is not None:\n # ONNX LSTMs have the `clip` attribute, however it seems to give\n # slightly different results, when compared to the MXNet equivalent\n raise NotImplementedError(\n \"Conversion of RNNs with lstm_state_clip_min/max \"\n \"to ONNX is currently not supported.\"\n )\n\n if eval(attrs.get('lstm_state_clip_nan', 'False')):\n raise NotImplementedError(\n \"ONNX RNN operator doesn't support lstm_state_clip_nan\"\n )\n\n if eval(attrs.get('use_sequence_length', 'False')):\n # This can maybe be implemented using the `sequence_len` optional input\n raise NotImplementedError(\n \"Conversion of RNNs with variable input sequence length \"\n \"to ONNX is currently not supported.\"\n )\n\n if eval(attrs.get('num_layers', '1')) != 1:\n raise NotImplementedError(\n \"Conversion of RNNs with num_layers > 1 \"\n \"to ONNX is currently not supported.\"\n )\n\n if eval(attrs.get('p', '0')) != 0:\n # WARNING! The `p` attribute in mxnet is \"dropout probability\" while\n # the `p` optional input of ONNX LSTMs is the peephole weights tensor.\n raise NotImplementedError(\n \"Conversion of RNNs with dropout \"\n \"to ONNX is currently not supported.\"\n )\n\n if eval(attrs.get('projection_size', 'None')) is not None:\n raise NotImplementedError(\n \"Conversion of RNNs with custom projection_size \"\n \"to ONNX is currently not supported.\"\n )\n\n if not eval(attrs.get('state_outputs', 'True')):\n raise NotImplementedError(\n \"Conversion of RNNs with state_outputs=False \"\n \"to ONNX is currently not supported.\"\n )\n\n # ============================== Parameters ==============================\n\n # (See _rnn_param_concat for part 1 of this comment section)\n\n # Unfortunately, mxnets version of _rnn_param_concat concatenates *ALL*\n # the parameters, instead of grouping them like ONNX. The workaround,\n # used here, is that the _rnn_param_concat node conversion code will\n # produce multiple nodes with names ending in rnn_param_concatN__P\n # (Where P is the parameter group name W, R or B)\n # We then use regular expressions to get the \"extra outputs\" of the\n # _rnn_param_concat node.\n\n x, param_concat, *initial_states = input_nodes\n param_pattern = re.compile(r'(.*rnn_param_concat[0-9]+__)[WRB]$')\n if not param_pattern.match(param_concat):\n # ToDo: Maybe do something more sane after Issue #17621 gets resolved\n raise NotImplementedError(\n \"The order of RNN parameters is different between mxnet and ONNX. \"\n \"Currently, an automatic conversion is only possible, if the RNN \"\n \"parameters were concatenated using the internal \"\n \"_rnn_param_concat operator.\"\n )\n w, r, b = (\n param_pattern.sub(r'\\1' + param, param_concat)\n for param in 'WRB'\n )\n\n # The second conversion step handles\n # * parameter shapes, since mxnet uses flattened parameters, while\n # ONNX requires specific tensor shapes\n # * gate order, since both frameworks require the weights and biases\n # of the 4 basic gates (forget, input, cell and output) to be\n # concatenated, but in different order\n # ([ifco] for mxnet and [iofc] for ONNX)\n\n def fix_rnn_parameter(p, p_shape_in, p_shape_out, p_order=(0, 3, 1, 2)):\n p_ = p\n\n # 1) Reshape flat parameters to their original shape, such that\n # the gates are concatenated along axis=1\n p_reshaped_in = create_helper_reshape_node(\n p, p_ + \"__reshaped_in\", p_shape_in, kwargs\n )\n nodes.extend(p_reshaped_in)\n p = p_reshaped_in[-1].name\n\n # 2) Use a Gather node to pick gates along axis=1, permuting them\n p_reordered = create_helper_gather_node(\n p, p_ + \"__reordered\", p_order, kwargs, axis=1\n )\n nodes.extend(p_reordered)\n p = p_reordered[-1].name\n\n # 3) Reshape the parameters to their final shape, squeezing the gate\n # and hidden dimensions together\n p_reshaped_out = create_helper_reshape_node(\n p, p_ + \"__reshaped_out\", p_shape_out, kwargs\n )\n nodes.extend(p_reshaped_out)\n return p_reshaped_out[-1].name\n\n w = fix_rnn_parameter(\n w,\n p_shape_in=(num_directions, 4, hidden_size, -1),\n p_shape_out=(num_directions, 4 * hidden_size, -1),\n )\n\n r = fix_rnn_parameter(\n r,\n p_shape_in=(num_directions, 4, hidden_size, hidden_size),\n p_shape_out=(num_directions, 4 * hidden_size, hidden_size),\n )\n\n b = fix_rnn_parameter(\n b,\n p_shape_in=(2 * num_directions, 4, hidden_size),\n p_shape_out=(num_directions, 8 * hidden_size),\n )\n\n # ============================= Inputs/States ============================\n input_shape = create_helper_shape_node(x, x + \"__shape\")\n nodes.extend(input_shape)\n input_shape = input_shape[-1].name\n\n batch_size = create_helper_gather_node(\n input_shape,\n x + \"__batch_size\",\n indices=[1],\n axis=0,\n kwargs=kwargs,\n )\n nodes.extend(batch_size)\n batch_size = batch_size[-1].name\n\n state_shape = create_helper_build_values_node(\n [num_directions, batch_size, hidden_size],\n name + \"__state_shape\",\n dtype=np.int64,\n kwargs=kwargs,\n )\n nodes.extend(state_shape)\n state_shape = state_shape[-1].name\n\n expanded_states = []\n for state in initial_states:\n expanded_state = create_helper_expand_node(\n state, state + \"__expanded\", state_shape\n )\n nodes.extend(expanded_state)\n expanded_states.append(expanded_state[-1].name)\n initial_states = expanded_states\n\n # =========================== RNN node/outputs ===========================\n y_out = [onnx.helper.make_node(\n mode, # RNN or LSTM or GRU\n inputs=[x, w, r, b, '', *initial_states],\n outputs=[name + '__Y'],\n name=name + '__Y',\n **rnn_kwargs\n )]\n nodes.extend(y_out)\n y = y_out[-1].name\n\n # We are almost done. The only thing left to do is to convert the output\n # of the RNN node from the [S, D, B, H] layout, which ONNX returns\n # to the [S, B, D*H] layout, which mxnet uses\n\n # 1) Transpose [S, D, B, H] -> [S, B, D, H]\n y_perm = (0, 2, 1, 3)\n y_transposed = create_helper_trans_node(\n y, y + \"__transposed\", y_perm\n )\n nodes.extend(y_transposed)\n y = y_transposed[-1].name\n\n # 2) Reshape [S, B, D, H] -> [S, B, D*H]\n y_shape = (0, 0, -1)\n y_reshaped = create_helper_reshape_node(y, name, y_shape, kwargs)\n nodes.extend(y_reshaped)\n\n return nodes",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def _create_custom_op_trainable_onnx_model():\n onnx_model = onnx.load(os.path.join(\"testdata\", \"custom_op_library\", \"custom_op_test.onnx\"))\n onnx_model.graph.value_info.append(\n onnx.helper.make_tensor_value_info(\"output_1\", onnx.TensorProto.FLOAT, [3, 5])\n )\n\n class CustomOpBlockWithLinear(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.linear = onnxblock.blocks.Linear(5, 10)\n\n def build(self, linear_input):\n return self.linear(linear_input)\n\n custom_op_block = CustomOpBlockWithLinear()\n with onnxblock.base(onnx_model) as model_accessor:\n model_accessor.model.opset_import.append(onnx.helper.make_opsetid(\"test.customop\", 1))\n model_accessor.model.opset_import.append(onnx.helper.make_opsetid(\"\", 14))\n model_accessor.model.ir_version = 7\n _ = custom_op_block(\"output_1\")\n\n return custom_op_block.to_model_proto()",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def _new_learning_node(self, initial_stats=None, parent_node=None,\n is_active=True):\n if initial_stats is None:\n initial_stats = {}\n\n if is_active:\n return AdaActiveLearningNodeRegressor(initial_stats, parent_node,\n random_state=self.random_state)\n else:\n prediction_option = self.leaf_prediction\n if prediction_option == self._TARGET_MEAN:\n return InactiveLearningNodeMean\n else:\n return InactiveLearningNodePerceptron",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def _create_nrml():\n return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def lnhattr(shape):\n\n arnold_nodes = ('rcurve', 'cwdth', 'srate', 'ai_curve_shaderr', 'ai_curve_shaderg', 'ai_curve_shaderb')\n for ar in arnold_nodes:\n cmds.setAttr(shape + \".\" + ar, l=True, k=False, cb=False)",
"def node_mapping(self):\n ...",
"def convert_broadcast_lesser(node, **kwargs):\n return create_basic_op_node('Less', node, kwargs)",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def build_rpn_model(anchor_stride, anchors_per_location, depth, verbose = 0):\n print('\\n>>> RPN Layer ')\n \n input_feature_map = KL.Input(shape=[None, None, depth], name=\"input_rpn_feature_map\")\n \n if verbose:\n print(' Input_feature_map shape :', input_feature_map.shape)\n print(' anchors_per_location :', anchors_per_location)\n print(' depth :', depth)\n \n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n\n if verbose:\n print(' Input_feature_map shape :', input_feature_map.shape)\n print(' anchors_per_location :', anchors_per_location)\n print(' anchor_stride :', anchor_stride)\n \n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")",
"def convert_l2normalization(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mode = attrs.get(\"mode\", \"instance\")\n\n if mode != \"channel\":\n raise AttributeError(\"L2Normalization: ONNX currently supports channel mode only\")\n\n l2norm_node = onnx.helper.make_node(\n \"LpNormalization\",\n input_nodes,\n [name],\n axis=1, # channel only\n name=name\n )\n return [l2norm_node]",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name",
"def to_orb(self):\n node_id = int(self.idd)\n node_type = GLOB.gmplsTypes.NODETYPE_UNKNOWN\n if type(self.typee) == str:\n node_type = GLOB.gmplsTypes.NODETYPE_NETWORK\n\n node_orb = GLOB.gmplsTypes.nodeIdent(node_id, node_type)\n return node_orb",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper",
"def convert_norm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n ord = int(attrs.get(\"ord\", 2))\n\n onnx_op_name = \"ReduceL1\" if ord == 1 else \"ReduceL2\"\n\n if axes:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]\n else:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def lrelu(self):\n return self.add_layer(lrelu)",
"def n(label):\n global id\n node = pydot.Node(name=id, obj_dict=None, label=label)\n id += 1\n graph.add_node(node)\n return node",
"def convert_leakyrelu(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n initializer = kwargs[\"initializer\"]\n\n act_type = attrs.get(\"act_type\", \"leaky\")\n alpha = float(attrs.get(\"slope\", 0.25))\n\n act_name = {\"elu\": \"Elu\", \"leaky\": \"LeakyRelu\", \"prelu\": \"PRelu\",\n \"selu\": \"Selu\"}\n\n reshape_val_name = 'reshape' + str(kwargs[\"idx\"])\n input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]\n\n reshape_value = np.array([1, -1, 1, 1], dtype='int64')\n dims = np.shape(reshape_value)\n\n shape_node = onnx.helper.make_tensor_value_info(reshape_val_name, input_type, dims)\n initializer.append(\n onnx.helper.make_tensor(\n name=reshape_val_name,\n data_type=input_type,\n dims=dims,\n vals=reshape_value,\n raw=False,\n )\n )\n\n slope_op_name = 'slope' + str(kwargs[\"idx\"])\n\n lr_node = []\n if act_type == \"prelu\" or act_type == \"selu\":\n reshape_slope_node = onnx.helper.make_node(\n 'Reshape',\n inputs=[input_nodes[1], reshape_val_name],\n outputs=[slope_op_name],\n name=slope_op_name\n )\n\n node = onnx.helper.make_node(\n act_name[act_type],\n inputs=[input_nodes[0], slope_op_name],\n outputs=[name],\n name=name)\n\n lr_node.append(shape_node)\n lr_node.append(reshape_slope_node)\n lr_node.append(node)\n else:\n node = onnx.helper.make_node(\n act_name[act_type],\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha)\n lr_node.append(node)\n return lr_node",
"def getNode(self):\n node = Token.getNode(self)\n node.tag = 'reltoken'\n return(node)"
] | [
"0.5615053",
"0.5551711",
"0.5397873",
"0.5347614",
"0.53341866",
"0.5257164",
"0.5226096",
"0.5202186",
"0.5176221",
"0.5120736",
"0.5096025",
"0.5051921",
"0.50324756",
"0.5027518",
"0.50227654",
"0.49056938",
"0.48989987",
"0.48988622",
"0.4888863",
"0.4885868",
"0.48846328",
"0.48830795",
"0.48806936",
"0.4854499",
"0.48518068",
"0.47997808",
"0.47924727",
"0.4767416",
"0.47432333",
"0.4735006"
] | 0.6714993 | 0 |
Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator and return the created node. | def convert_l2normalization(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mode = attrs.get("mode", "instance")
if mode != "channel":
raise AttributeError("L2Normalization: ONNX currently supports channel mode only")
l2norm_node = onnx.helper.make_node(
"LpNormalization",
input_nodes,
[name],
axis=1, # channel only
name=name
)
return [l2norm_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_norm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n ord = int(attrs.get(\"ord\", 2))\n\n onnx_op_name = \"ReduceL1\" if ord == 1 else \"ReduceL2\"\n\n if axes:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]\n else:\n reduce_node = onnx.helper.make_node(\n onnx_op_name,\n input_nodes,\n [name],\n keepdims=keepdims,\n name=name\n )\n return [reduce_node]",
"def __init__(self, mode, dim, epsilon=1e-12, name='l2Normalize'):\n super(L2Normalization, self).__init__(mode, name)\n self.dim = dim\n self.epsilon = epsilon",
"def norm2(self):\n return getattr(self, self.norm2_name)",
"def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)",
"def convertL1ToL2(self):\n return _libsbml.Model_convertL1ToL2(self)",
"def convert_instancenorm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n eps = float(attrs.get(\"eps\", 0.001))\n\n node = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=input_nodes,\n outputs=[name],\n name=name,\n epsilon=eps)\n\n return [node]",
"def convert_instance_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n epsilon = op.attr(\"epsilon\")\n\n scale = center = True\n out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale)\n g.add_node(op.output(\"Y\")[0], out)",
"def l2_reg_create_layer(prev, n, activation, lambtha):\n reg = tf.contrib.layers.l2_regularizer(lambtha)\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n t = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init,\n kernel_regularizer=reg,\n )\n return t(prev)",
"def l2_reg_create_layer(prev, n, activation, lambtha):\n regulizer = tf.contrib.layers.l2_regularizer(lambtha)\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n tensor = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init,\n kernel_regularizer=regulizer)\n return tensor(prev)",
"def get_norm_layer():\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n return norm_layer",
"def l2(weights, name=None):\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)",
"def convert_layer_norm(g, op, block):\n\n begin_norm_axis = op.attr(\"begin_norm_axis\")\n epsilon = op.attr(\"epsilon\")\n x = g.get_node(op.input(\"X\")[0])\n bias_input = op.input(\"Bias\")\n scale_input = op.input(\"Scale\")\n\n x_shape = infer_shape(x)\n assert begin_norm_axis in (\n len(x_shape) - 1,\n -1,\n ), \"Support only normalization over last one dimension.\"\n\n if bias_input:\n bias = g.get_node(bias_input[0])\n else:\n bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))\n\n if scale_input:\n scale = g.get_node(scale_input[0])\n else:\n scale = _expr.const(np.ones(x_shape[begin_norm_axis]))\n\n out = _op.nn.layer_norm(\n x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True\n )\n g.add_node(op.output(\"Y\")[0], out)",
"def normalize_l2(x):\n return x / (npla.norm(x))",
"def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)",
"def l2_normalization(\n inputs,\n name,\n scaling=False,\n scale_initializer=init_ops.ones_initializer(),\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n data_format='NHWC',\n trainable=True,\n scope=None):\n\n with variable_scope.variable_scope(\n scope, 'L2Normalization_'+name, [inputs], reuse=reuse) as sc:\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n dtype = inputs.dtype.base_dtype\n if data_format == 'NHWC':\n # norm_dim = tf.range(1, inputs_rank-1)\n norm_dim = tf.range(inputs_rank-1, inputs_rank)\n params_shape = inputs_shape[-1:]\n elif data_format == 'NCHW':\n # norm_dim = tf.range(2, inputs_rank)\n norm_dim = tf.range(1, 2)\n params_shape = (inputs_shape[1])\n\n # Normalize along spatial dimensions.\n outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)\n # Additional scaling.\n if scaling:\n scale_collections = utils.get_variable_collections(\n variables_collections, 'scale')\n scale = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=scale_initializer,\n collections=scale_collections,\n trainable=trainable)\n if data_format == 'NHWC':\n outputs = tf.multiply(outputs, scale)\n elif data_format == 'NCHW':\n scale = tf.expand_dims(scale, axis=-1)\n scale = tf.expand_dims(scale, axis=-1)\n outputs = tf.multiply(outputs, scale)\n # outputs = tf.transpose(outputs, perm=(0, 2, 3, 1))\n\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)",
"def getXmlnsL2():\n return _libsbml.LayoutExtension_getXmlnsL2()",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def convert_lrn(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n alpha = float(attrs.get(\"alpha\", 0.0001))\n beta = float(attrs.get(\"beta\", 0.75))\n bias = float(attrs.get(\"knorm\", 1.0))\n size = int(attrs.get(\"nsize\"))\n\n lrn_node = onnx.helper.make_node(\n \"LRN\",\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=size\n )\n\n return [lrn_node]",
"def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsupported axis {attrs.get_int('axis')} in operator relay lrn operator. \"\n f\"Only axis = 1 is supported by Onnx.\"\n )\n\n return {\"alpha\": attrs.alpha, \"beta\": attrs.beta, \"bias\": attrs.bias, \"size\": attrs.size}",
"def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5",
"def l2_normalization(inputs, scaling=True):\n with tf.variable_scope('L2Normalization'):\n inputs_shape = inputs.get_shape()\n channel_shape = inputs_shape[-1:]\n # cal l2_norm on channel\n outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)\n # scalling\n if scaling:\n # scale.shape == channel.shape\n scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))\n outputs = tf.multiply(outputs, scale)\n\n return outputs",
"def convertL3ToL2(self, strict=False):\n return _libsbml.Model_convertL3ToL2(self, strict)",
"def itkStatisticsLabelMapFilterLM2IUS2_cast(*args):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUS2_cast(*args)",
"def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)",
"def normalized(self):\n L = self.length\n if L > pygonal.EPSILON:\n v = tuple.__new__(Vec2, (self[0] / L, self[1] / L))\n v.__dict__['length'] = v.__dict__['length2'] = 1.0\n return v\n else:\n return null",
"def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)",
"def _get_l2_label(self):\n return self.__l2_label",
"def lap_normalize(img, scale_n=4):\n # img = tf.expand_dims(img, 0)\n # print(\"Inside lap_normalize Function, img shape {}\".format(tf.shape(img)))\n\n tlevels = lap_split_n(img, scale_n)\n tlevels = list(map(normalize_std, tlevels))\n\n out = lap_merge(tlevels)\n\n return out[0, :, :, :]",
"def convertL2ToL1(self, strict=False):\n return _libsbml.Model_convertL2ToL1(self, strict)"
] | [
"0.59415656",
"0.5882721",
"0.58236057",
"0.5708504",
"0.5625045",
"0.54728884",
"0.54538465",
"0.53243077",
"0.5301751",
"0.52801716",
"0.5251732",
"0.51723593",
"0.5098865",
"0.50875914",
"0.5062995",
"0.5036322",
"0.5006597",
"0.4991197",
"0.49699366",
"0.49565876",
"0.4919295",
"0.48926318",
"0.48805144",
"0.48568755",
"0.48326305",
"0.48298103",
"0.48275045",
"0.4827153",
"0.48022494",
"0.47967023"
] | 0.71821564 | 0 |
Map MXNet's Dropout operator attributes to onnx's Dropout operator and return the created node. | def convert_dropout(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
probability = float(attrs.get("p", 0.5))
probability = np.array(probability, dtype=np.float32)
training_mode = False
training_mode = np.array(training_mode, dtype=np.bool)
node_ratio = create_helper_tensor_node(probability, name + '_ratio', kwargs)
node_ratio = create_helper_tensor_node(training_mode, name + '_mode', kwargs)
dropout_node = onnx.helper.make_node(
"Dropout",
[input_nodes[0], name + '_ratio', name + '_mode'],
[name],
name=name
)
return [dropout_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def _create_dropout(cls, onnx_node, inputs, opset_version):\n ratio = onnx_node.getattr(\"ratio\", 0)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(ratio)",
"def create_dropout_layer(self):\n return tf.keras.layers.Dropout(rate=self.dropout)",
"def convert_dropout(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dropout_prob = op.attr(\"dropout_prob\")\n dropout_implementation = op.attr(\"dropout_implementation\")\n if dropout_implementation == \"downgrade_in_infer\":\n out = _op.nn.dropout(x, dropout_prob) * _expr.const(1 - dropout_prob, dtype=\"float32\")\n else:\n out = _op.nn.dropout(x, dropout_prob)\n g.add_node(op.output(\"Out\")[0], out)",
"def dropout_create_layer(prev, n, activation, keep_prob):\n\n dropout = tf.keras.layers.Dropout(keep_prob)\n initializer = tf.keras.initializers.VarianceScaling(scale=2.0,\n mode=(\"fan_avg\"))\n\n tensor = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=initializer,\n kernel_regularizer=dropout)\n\n output = tensor(prev)\n\n return output",
"def dropout(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 20)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('patience', 10000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(None, 28 * 28), input_var=input_var, name='Input')\n # Dropout Layer\n l_dro1 = Dropout(incoming=l_in, corruption_level=0.2, name='Dropout 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_dro1, n_units=500, W=glorot_uniform,\n activation=relu, name='Hidden layer 1')\n # Dropout Layer\n l_dro2 = Dropout(incoming=l_hid1, corruption_level=0.1, name='Dropout 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_dro2, n_units=500, W=glorot_uniform,\n activation=relu, name='Hidden layer 2')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_hid2, n_class=10, name='Logistic regression')\n\n # Create network and add layers\n net = Network('dropout')\n net.add(l_in)\n net.add(l_dro1)\n net.add(l_hid1)\n net.add(l_dro2)\n net.add(l_hid2)\n net.add(l_out)\n\n return net, hp",
"def dropout_create_layer(prev, n, activation, keep_prob):\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n reg = tf.layers.Dropout(keep_prob)\n mod = tf.layers.Dense(n, activation, kernel_initializer=init,\n kernel_regularizer=reg, name='layer')\n return mod(prev)",
"def dropout_create_layer(prev, n, activation, keep_prob):\n init_w = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n dropout = tf.layers.Dropout(rate=keep_prob)\n layers = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init_w,\n kernel_regularizer=dropout)\n A = layers(prev)\n\n return A",
"def dropout_create_layer(prev, n, activation, keep_prob):\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n regularizer = tf.layers.Dropout(keep_prob)\n layer = tf.layers.Dense(n, activation, name='layer',\n kernel_initializer=init,\n kernel_regularizer=regularizer)\n out = layer(prev)\n return out",
"def add_drop_out_layer(self, input_layer):\n return tf.nn.dropout(input_layer, self.keep_prob)",
"def dropout(x, keep_prob, name):\n with tf.name_scope(name):\n outputs = tf.nn.dropout(x, keep_prob)\n # Return layer's output\n return outputs",
"def dropout(cg):\n inputs = VariableFilter(roles=[INPUT])(cg.variables)\n dropout_cg = apply_dropout(cg, inputs, 0.5)\n\n return dropout_cg",
"def dropout(keep_prob, net, is_training):\n return Dropout(keep_prob)(net) if is_training else net",
"def _Dropout(self, name, drop_prob):\n return super()._Dropout(name, keep_prob=1.0 - drop_prob)",
"def _dropout(self,components,dropout=None):\r\n \r\n if dropout is not None:\r\n components.append(nn.Dropout(dropout))",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def __init__(self, incoming, prob, noise_shape=None, selu_dropout: bool = False, training: bool = True,\n name='DropoutLayer'):\n super(DropoutLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n if noise_shape is None:\n noise_shape = np.append(np.ones(len(self.incoming_shape) - 1, dtype=np.int32),\n [self.incoming_shape[-1]])\n else:\n self.noise_shape = noise_shape\n \n self.prob = prob\n self.noise_shape = noise_shape\n self.out = None\n self.name = name\n self.selu_dropout = selu_dropout\n self.training = training",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name",
"def ConvertDropout(converter, serializer, inp, layer_name, parent_layer_name, dropout_ratio):\n\n # Generate a tensor filled with uniform random values..\n # This means tensor value is constant and no random number generation in runtime.\n # This behavior is different from Chainer's one.\n # Chainer uses numpy.random without seed, thus it will generate random tensor per run.\n\n # dropout in Chainer:\n # scale = 1.0 / (1.0 - dropout_ratio)\n # flag = rnd >= dropout_ratio\n # mask = scale * flag\n # y = x * mask\n\n #\n # dropout in TensorFlow(Lite) r1.13:\n # keep_prob = 1 - ratio\n #\n # [keep_prb, 1.0 + keep_prob)\n # random_tensor = keep_prob\n # random_tensor += random_uniform()\n #\n # 0. if [keep_prob, 1.0) and 1.0 if [1.0, 1.0 + keep_prob)\n # binary_tensor = floor(random_tensor)\n # ret = divide(x, keep_prob) * binary_tensor\n #\n # We go with TensorFlow way.\n\n # input\n if inp.name in converter.input_names:\n # Placeholder input\n input_id = serializer.SerializeTensor(\n inp.name, inp.dtype, inp.shape, None)\n converter.inputs[inp.name] = input_id\n elif parent_layer_name == 'data':\n # Constant\n input_id = serializer.SerializeTensor(\n layer_name + '_input0', inp.data.dtype,\n inp.shape, inp.data)\n else:\n input_id = serializer.FindConnection(\n parent_layer_name)\n # There should have valid connection\n if input_id is None:\n print('{} not found in connections'.format(\n parent_layer_name))\n raise\n\n\n keep_prob = 1 - dropout_ratio\n\n #\n # random_tensor = keep_prob\n #\n # Create 1D tensor which contains tensor shape information.\n shape_array = np.array(inp.shape, dtype=np.int32)\n print('shape_array', shape_array)\n shape_id = serializer.SerializeTensor(layer_name + '_shape', 'int32', [len(inp.shape)], shape_array)\n\n # Create 0D tensor with constant scalar value.\n constant_value = np.array([keep_prob], dtype=np.float32)\n constant_id = serializer.SerializeTensor(layer_name + '_keep_prob_fill', 'float32', [], constant_value)\n\n # A tenor filled with `keep_prob` value.\n keep_prob_id = serializer.SerializeTensor(layer_name + '_keep_prob', 'float32', inp.shape, None)\n\n serialize_ops.SerializeOpFill(serializer, shape_id, constant_id, keep_prob_id)\n\n\n #\n # random_tensor += random_uniform()\n #\n\n # [0.0, 1.0)\n rand_array = np.random.rand(*inp.shape).astype(np.float32)\n\n rand_constant_id = serializer.SerializeTensor(layer_name + '_randm_uniform', 'float32', inp.shape, rand_array)\n\n rand_id = serializer.SerializeTensor(layer_name + '_random', 'float32', inp.shape, None)\n\n serialize_ops.SerializeOpAdd(serializer, keep_prob_id, rand_constant_id, rand_id)\n\n #\n # binary_tensor = floor(random_tensor)\n #\n binary_id = serializer.SerializeTensor(layer_name + '_binary', 'float32', inp.shape, None)\n\n serialize_ops.SerializeOpFloor(serializer, rand_id, binary_id)\n\n #\n # divide(x, keep_prob)\n # TODO(LTE): We can precompute `floor(random_tensor)` since dropout_ratio is a constant value\n # in inference phase.\n #\n\n divide_id = serializer.SerializeTensor(layer_name + '_divide', 'float32', inp.shape, None)\n serialize_ops.SerializeOpDiv(serializer, input_id, keep_prob_id, divide_id)\n\n #\n # divide(x, keep_prob) * binary_tensor\n #\n dropout_id = serializer.SerializeTensor(layer_name + '_dropout', 'float32', inp.shape, None)\n serialize_ops.SerializeOpMul(serializer, divide_id, binary_id, dropout_id)",
"def dropout(self, input_layer, keep_prob=0.5):\n if self.is_training:\n dtype = input_layer.dtype\n with tf.variable_scope(self._count_layer('dropout')):\n keep_prob_tensor = tf.constant(keep_prob, dtype=dtype)\n return tf.nn.dropout(input_layer, keep_prob_tensor)\n else:\n return input_layer",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def get_output_node(self) -> WillumpGraphNode:\n return self.output_node",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,\n device: str = \"\"):\n Node.__init__(self, g, node_id=node_id, name=name,\n op_name=op_name, outputs=[], device=device)\n self._attributes = []\n self._inputs = []\n self._control_inputs = []"
] | [
"0.7353928",
"0.65540075",
"0.6395461",
"0.6285999",
"0.5709964",
"0.55830383",
"0.55629826",
"0.55001",
"0.5478698",
"0.5474093",
"0.546457",
"0.53771776",
"0.53521633",
"0.5260273",
"0.5242218",
"0.51851434",
"0.5174336",
"0.5164886",
"0.5147427",
"0.51048636",
"0.5103446",
"0.5079995",
"0.50699735",
"0.5014554",
"0.5007518",
"0.5004288",
"0.49955434",
"0.4986749",
"0.49845368",
"0.4972351"
] | 0.7049092 | 1 |
Map MXNet's Flatten operator attributes to onnx's Flatten operator and return the created node. | def convert_flatten(node, **kwargs):
return create_basic_op_node('Flatten', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _create_flatten(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def convert_fully_connected(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n initializer = kwargs[\"initializer\"]\n\n no_bias = get_boolean_attribute_value(attrs, \"no_bias\")\n\n fcnode = []\n\n op_name = \"flatten_\" + str(kwargs[\"idx\"])\n flatten_node = onnx.helper.make_node(\n 'Flatten',\n inputs=[input_nodes[0]],\n outputs=[op_name],\n name=op_name\n )\n\n input_nodes[0] = op_name\n fcnode.append(flatten_node)\n\n if no_bias:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]\n bias_name = \"bias\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))\n initializer.append(\n onnx.helper.make_tensor(\n name=bias_name,\n data_type=data_type,\n dims=(1,),\n vals=[0],\n raw=False,\n )\n )\n input_nodes.append(bias_name)\n fcnode.append(tensor_node)\n\n node = onnx.helper.make_node(\n \"Gemm\",\n input_nodes, # input (A, B, C) - C can be in place\n [name], # output\n alpha=1.0,\n beta=1.0,\n transA=False,\n transB=True,\n name=name\n )\n\n fcnode.append(node)\n\n return fcnode",
"def flatten(node: ir.Node) -> ir.Node:\n\n def visitor(node: ir.Node, args=None) -> ir.Node:\n if isinstance(node, ir.BinaryOp):\n\n # Flatten singleton BinaryOp\n if len(node.operand) == 1:\n return flatten(node.operand[0])\n\n # Flatten BinaryOp with reduction operators\n new_operator: List[str] = []\n new_operand: List[ir.Expr] = []\n for child_operator, child_operand in zip((None, *node.operator),\n node.operand):\n if child_operator is not None:\n new_operator.append(child_operator)\n # The first operator can always be flattened if two operations has the\n # same type.\n if child_operator in (None, '||', '&&', *'|&+*') and \\\n type(child_operand) is type(node):\n new_operator.extend(child_operand.operator)\n new_operand.extend(child_operand.operand)\n else:\n new_operand.append(child_operand)\n # At least 1 operand is flattened.\n if len(new_operand) > len(node.operand):\n return flatten(type(node)(operator=new_operator, operand=new_operand))\n\n # Flatten compound Operand\n if isinstance(node, ir.Operand):\n for attr in node.ATTRS:\n val = getattr(node, attr)\n if val is not None:\n if isinstance(val, ir.Node):\n return flatten(val)\n break\n else:\n raise util.InternalError('undefined Operand')\n\n # Flatten identity unary operators\n if isinstance(node, ir.Unary):\n minus_count = node.operator.count('-')\n if minus_count % 2 == 0:\n plus_count = node.operator.count('+')\n if plus_count + minus_count == len(node.operator):\n return flatten(node.operand)\n not_count = node.operator.count('!')\n if not_count % 2 == 0 and not_count == len(node.operator):\n return flatten(node.operand)\n\n # Flatten reduction functions\n if isinstance(node, ir.Call):\n operator = getattr(node, 'name')\n if operator in ir.REDUCTION_FUNCS:\n operands: List[ir.Expr] = []\n for operand in getattr(node, 'arg'):\n if (isinstance(operand, ir.Call) and\n getattr(operand, 'name') == operator):\n operands.extend(getattr(operand, 'arg'))\n else:\n operands.append(operand)\n if len(operands) > len(getattr(node, 'arg')):\n return flatten(ir.Call(name=operator, arg=operands))\n\n return node\n\n if not isinstance(node, ir.Node):\n return node\n\n return node.visit(visitor)",
"def local_flatten_lift(node):\r\n if (isinstance(node.op, T.Flatten) and\r\n node.inputs[0].owner and\r\n isinstance(node.inputs[0].owner.op, T.Elemwise) and\r\n len(node.inputs[0].owner.inputs) == 1):\r\n f = node.op(node.inputs[0].owner.inputs[0])\r\n e = node.inputs[0].owner.op(f)\r\n return [e]",
"def flat(self):\n return Op('flat', self)",
"def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)",
"def flattened(self) -> 'Node':\n\n # Find attributes that need to be passed to __init__\n init_sig = inspect.signature(self.__class__.__init__)\n args = []\n kwargs = {}\n for name, param in init_sig.parameters.items():\n if name != 'self':\n if param.kind == param.POSITIONAL_ONLY:\n args.append(self.__getattribute__(name))\n elif param.kind in (param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY):\n kwargs[name] = self.__getattribute__(name)\n\n new = self.__class__(*args, **kwargs)\n\n for child in self.children:\n new.append_child(child.flattened())\n\n return new",
"def convert(self):\n return _libsbml.CompFlatteningConverter_convert(self)",
"def convert_flatten(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n input_shape = list(infer_shape(x))\n\n start = op.attr(\"start_axis\")\n end = op.attr(\"stop_axis\")\n ndim = len(input_shape)\n if end < 0:\n end += ndim\n new_shape = [0] * start\n\n new_shape.append(-1)\n squeeze_axes = []\n for i in range(start + 1, end + 1):\n new_shape.append(1)\n squeeze_axes.append(i)\n for _ in range(end + 1, ndim):\n new_shape.append(0)\n out = _op.reshape(x, new_shape)\n if squeeze_axes:\n out = _op.squeeze(out, axis=squeeze_axes)\n\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def flatten(input, name):\n with tf.name_scope(name):\n l = tf.layers.flatten(input)\n return l",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def create_helper_expand_node(input_name, output_name, expand_shape):\n expand_node = onnx.helper.make_node(\n \"Expand\",\n inputs=[input_name, expand_shape],\n outputs=[output_name],\n name=output_name,\n )\n return [expand_node]",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def convert_dot(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n input_node_a = input_nodes[0]\n input_node_b = input_nodes[1]\n\n trans_a_node = None\n trans_b_node = None\n\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if trans_a:\n input_node_a = op_name + \"_a\"\n trans_a_node, = create_helper_trans_node(input_nodes[0], input_node_a)\n if trans_b:\n input_node_b = op_name + \"_b\"\n trans_b_node, = create_helper_trans_node(input_nodes[1], input_node_b)\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_node_a, input_node_b],\n outputs=[name],\n name=name\n )\n\n if not trans_a and not trans_b:\n return [matmul_node]\n elif trans_a and not trans_b:\n return [trans_a_node, matmul_node]\n elif trans_b and not trans_a:\n return [trans_b_node, matmul_node]\n else:\n return [trans_a_node, trans_b_node, matmul_node]",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def flatten():",
"def tf_flatten(x):\n return tf.contrib.layers.flatten(x)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def module(self) -> XlaFlattenParamsWrapper:\n assert isinstance(self._fsdp_wrapped_module, XlaFlattenParamsWrapper)\n return self._fsdp_wrapped_module",
"def createFlattenGroupObjectiveField(self):\n flattenGroup = self._fitter.getFlattenGroup()\n if not flattenGroup:\n return None\n flattenGroupName = flattenGroup.getName()\n flattenMeshGroup = None\n for dimension in range(self._fitter.getHighestDimensionMesh().getDimension(), 0, -1):\n mesh = self._fitter.getMesh(dimension)\n flattenMeshGroup = flattenGroup.getMeshGroup(mesh)\n if flattenMeshGroup.isValid() and (flattenMeshGroup.getSize() > 0):\n break\n else:\n if self.getDiagnosticLevel() > 0:\n print(\"Flatten group \" + flattenGroupName + \" is empty\")\n return None\n weight = self.getGroupDataWeight(flattenGroupName)[0]\n if weight <= 0.0:\n if self.getDiagnosticLevel() > 0:\n print(\"Flatten group \" + flattenGroupName + \" has zero weight\")\n return None\n\n fieldmodule = self._fitter.getFieldmodule()\n modelCoordinates = self._fitter.getModelCoordinatesField()\n flattenComponent = fieldmodule.createFieldComponent(modelCoordinates, modelCoordinates.getNumberOfComponents())\n flattenWeight = fieldmodule.createFieldConstant([weight])\n flattenComponentWeighted = flattenWeight * flattenComponent\n flattenIntegrand = flattenComponentWeighted * flattenComponentWeighted\n numberOfGaussPoints = 3 # assuming some data applied around edges\n flattenGroupObjective = fieldmodule.createFieldMeshIntegral(\n flattenIntegrand, self._fitter.getModelReferenceCoordinatesField(), flattenMeshGroup)\n flattenGroupObjective.setNumbersOfPoints(numberOfGaussPoints)\n return flattenGroupObjective",
"def convert_feed(g, op, block):\n\n if block is not None:\n ipt_name = op.output(\"Out\")[0]\n ipt_shape = block.var(ipt_name).shape\n ipt_dtype = block.var(ipt_name).dtype\n ipt_dtype = str(ipt_dtype).strip().split(\".\")[1]\n else:\n ipt_shape = op.shape\n ipt_dtype = str(op.dtype).strip().split(\".\")[1]\n ipt_name = op.name\n if g.shape_dict is not None:\n ipt_shape = g.shape_dict[ipt_name]\n\n if isinstance(ipt_shape, tuple):\n ipt_shape = list(ipt_shape)\n for i, s in enumerate(ipt_shape):\n if s < 0:\n ipt_shape[i] = _ty.Any()\n out = new_var(ipt_name, shape=ipt_shape, dtype=ipt_dtype)\n g.add_node(ipt_name, out)",
"def flatten(self):\n # get flattened circuit and corresponding expr_map\n cq_flat, expr_map = cirq.flatten(self)\n self.assign(cq_flat)\n if self.expr_map is not None:\n self._expr_map = quple.resolve_expression_map_conflicts(self.expr_map, expr_map)\n else:\n self._expr_map = expr_map",
"def flatten_aliquot(node):\n flat = {}\n def flatten_node(node):\n \"\"\"Replicate the node.\"\"\"\n if 'node_id' not in flat:\n flat['node_id'] = node.node_id\n flat['label'] = node.label\n if 'project_id' in node.properties:\n program, project = node.properties['project_id'].split('-')\n flat['gen3_resource_path'] = '/programs/{}/{}'.format(program, project)\n # creates flat document\n for k in node.properties.keys():\n # skip datetime until guppy supports\n if 'datetime' in k:\n continue\n flat[k] = node.properties[k]\n else:\n # creates flat document\n for k in node.properties.keys():\n # skip system properties if not root node\n if k in ['project_id', 'id', 'node_id']:\n continue\n # skip datetime until guppy supports\n if 'datetime' in k:\n continue\n flat['{}_{}'.format(node.label, k)] = node.properties[k]\n\n traverse_up(node, flatten_node)\n for bcc_sample in node.sample.bcc_sample:\n flatten_node(bcc_sample)\n for diagnosis in node.sample.diagnosis:\n flatten_node(diagnosis)\n for treatment in diagnosis.treatment:\n flatten_node(treatment)\n for bcc_chemotherapy in treatment.bcc_chemotherapy:\n flatten_node(bcc_chemotherapy)\n\n\n # traverse down, this node already rendered\n for e in node.edges_in:\n traverse_down(e.src, flatten_node)\n return flat",
"def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n return node",
"def __init__(self):\n super().__init__()\n self.flatten = Flatten()",
"def __init__(self):\n super().__init__()\n self.flatten = Flatten()",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])"
] | [
"0.70053905",
"0.6535054",
"0.6155151",
"0.61142373",
"0.6006348",
"0.5795786",
"0.5709457",
"0.56205976",
"0.5437379",
"0.54225713",
"0.5404203",
"0.5383438",
"0.5313076",
"0.5282197",
"0.5249121",
"0.5248736",
"0.522337",
"0.5183563",
"0.51568246",
"0.51036185",
"0.51030135",
"0.50807935",
"0.5069833",
"0.5055739",
"0.5045557",
"0.50398105",
"0.5009211",
"0.50077647",
"0.50077647",
"0.4999101"
] | 0.68925685 | 1 |
Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node. | def convert_clip(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
a_min = np.float(attrs.get('a_min', -np.inf))
a_max = np.float(attrs.get('a_max', np.inf))
clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\n else:\n node.input.append(\"\")\n return node",
"def _create_clip(cls, onnx_node, inputs, opset_version):\n # sometime onnx may ignore these two inputs, min or max or both\n if len(inputs) >= 2 and onnx_node.inputs[1] != '':\n min_v = tensor.to_numpy(inputs.pop(1)).tolist()[0]\n else:\n min_v = None\n if len(inputs) >= 2 and onnx_node.inputs[2] != '':\n max_v = tensor.to_numpy(inputs.pop(1)).tolist()[0]\n else:\n max_v = None\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(min_v, max_v)",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def convert_clip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n # if the min/max value is a tensor\n min_max_is_tensor = False\n if op.input(\"Min\"):\n min_value = g.get_node(op.input(\"Min\")[0])\n min_value, infered = try_infer_value(min_value, g.get_params())\n if infered:\n min_value = min_value.tolist()[0]\n if isinstance(min_value, _expr.Expr):\n min_max_is_tensor = True\n else:\n min_value = op.attr(\"min\")\n\n if op.input(\"Max\"):\n max_value = g.get_node(op.input(\"Max\")[0])\n max_value, infered = try_infer_value(max_value, g.get_params())\n if infered:\n max_value = max_value.tolist()[0]\n if isinstance(max_value, _expr.Expr):\n min_max_is_tensor = True\n else:\n max_value = op.attr(\"max\")\n\n if min_max_is_tensor:\n if not isinstance(min_value, _expr.Expr):\n min_value = _op.const(min_value, dtype)\n if not isinstance(max_value, _expr.Expr):\n max_value = _op.const(max_value, dtype)\n out = _op.maximum(x, min_value)\n out = _op.minimum(out, max_value)\n else:\n out = _op.clip(x, min_value, max_value)\n g.add_node(op.output(\"Out\")[0], out)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def clip(self, name=None, attrs=None):\n idx = 1\n while name is None or name in self.masks:\n name = \"clip%d\" % idx\n idx += 1\n self.masks[name] = SVG()\n with self.group(style={\"clip-path\": \"url(#%s)\" % name}, attrs=attrs):\n yield self.masks[name]",
"def convert_copy(node, **kwargs):\n return create_basic_op_node('Identity', node, kwargs)",
"def clip(self, name=None, attrs=None):\n if name is None:\n while name is None or name in self.masks:\n name = \"__auto__clip%d\" % idx\n idx += 1\n if name not in self.masks:\n self.masks[name] = SVG()\n with self.group(style={\"clip-path\": \"url(#%s)\" % name}, attrs=attrs):\n yield self.masks[name]",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node",
"def _special_handle_clip(cls, op, X, W):\n tensor_list = []\n # clip add min and max\n append_inputs = {\"min\": op.min, \"max\": op.max}\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n helper.make_tensor(node_name, TensorProto.FLOAT, [],\n [append_input]))\n return tensor_list",
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def to_op(self):\n raise NotImplementedError",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def clone(self):\n tmp = self.my_operator\n self.my_operator = None\n new = copy.copy(self)\n self.my_operator = tmp\n return new",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def convert_crop(node, **kwargs):\n\n name, inputs, attrs = get_inputs(node, kwargs)\n\n start = np.array([0, 0, 0, 0], dtype=np.int) # index是int类型\n\n export_nodes = []\n\n start_node = create_helper_tensor_node(start, name + '__starts', kwargs)\n export_nodes.extend(start_node)\n start_node = start_node[-1].name\n shape_node = create_helper_shape_node(inputs[1], inputs[1] + '__shape')\n export_nodes.extend(shape_node)\n shape_node = shape_node[-1].name\n\n crop_node = onnx.helper.make_node(\n \"Slice\",\n inputs=[inputs[0], name + '__starts', inputs[1] + '__shape'], # data、start、end\n outputs=[name],\n name=name\n )\n\n logging.warning(\n \"Using an experimental ONNX operator: Crop. \" \\\n \"Its definition can change.\")\n export_nodes.extend([crop_node])\n\n return export_nodes",
"def __init__(self,clip_list):\n self.requested_clips=clip_list",
"def Clip(*args, **kwargs):\n return _gdi_.GraphicsContext_Clip(*args, **kwargs)",
"def _weight_clipping_op(self):\n with tf.name_scope('weight_clipping'):\n clipped_weights = [tf.assign(var, tf.clip_by_value(var, -self.clip, self.clip))\n for var in self.c_params]\n return clipped_weights",
"def get_clip_module(clip_model_name: str) -> CLIP:\n return CLIP(*PARAMETERS[clip_model_name].values())",
"def copyAttributes(fromNcVar, toNcVar):\r\n for attrName in fromNcVar.ncattrs():\r\n attrVal = getattr(fromNcVar, attrName)\r\n setattr(toNcVar, attrName, attrVal)",
"def clip(self):\r\n\t\treturn self._clip"
] | [
"0.64989394",
"0.5577588",
"0.5251773",
"0.5233911",
"0.50637215",
"0.50445",
"0.5005481",
"0.4997189",
"0.48964328",
"0.4827775",
"0.48206207",
"0.48055732",
"0.47918075",
"0.47716203",
"0.47590274",
"0.47138596",
"0.46755806",
"0.4661917",
"0.4658839",
"0.4649198",
"0.46434242",
"0.46338883",
"0.46332774",
"0.45616248",
"0.45614612",
"0.45416647",
"0.45348588",
"0.45335895",
"0.45249963",
"0.44778535"
] | 0.6301358 | 1 |
Map MXNet's _mul_scalar operator attributes to onnx's Mul operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_mul_scalar(node, **kwargs):
return scalar_op_helper(node, 'Mul', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def tree_add_scalar_mul(tree_x, scalar, tree_y):\n return tree_multimap(lambda x, y: x + scalar * y, tree_x, tree_y)",
"def scalar_mult(diagram, scalar):\n for node in diagram.nodes:\n if node.is_leaf():\n node.value *= scalar\n else:\n for oindex in node.offsets:\n node.offsets[oindex] *= scalar",
"def __mul__(self, scalar):\n m, n = self.shape\n scalar = mpfr(scalar)\n data = dict()\n for i in range(m):\n for j in range(n):\n data[i, j] = self[i, j] * scalar\n return MPMatrix((m, n), data)",
"def mul_(self, scalar):\n for idx in range(len(self)):\n self.parameters[idx] *= scalar",
"def local_mul_specialize(node):\r\n # here, we are past the point of canonicalization, so we don't\r\n # want to put in un-necessary fills.\r\n #\r\n # at this point [post canonicalize], mul() may have many inputs.\r\n if node.op == T.mul:\r\n #the idea here is that we have pow(x, y)\r\n neg = False\r\n new_inputs = []\r\n nb_neg_node = 0\r\n nb_cst = 0\r\n for input in node.inputs:\r\n # remove any neg arguments\r\n while input.owner and input.owner.op == T.neg:\r\n neg ^= True\r\n input = input.owner.inputs[0]\r\n nb_neg_node += 1\r\n\r\n # remove special case arguments of 1, -1 or 0\r\n y = local_mul_canonizer.get_constant(input)\r\n if y == 1.0:\r\n nb_cst += 1\r\n elif y == -1.0:\r\n nb_cst += 1\r\n neg ^= True # toggles\r\n elif y == 0.0:\r\n # if we find any zero, we just return right away\r\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\r\n else:\r\n new_inputs.append(input)\r\n\r\n if new_inputs != node.inputs:\r\n if new_inputs:\r\n if len(new_inputs) == 1:\r\n if neg:\r\n rval = -new_inputs[0]\r\n else:\r\n rval = new_inputs[0]\r\n else:\r\n # The next case would cause a replace by an equivalent case.\r\n if (neg and\r\n nb_neg_node == 0 and\r\n nb_cst == 1):\r\n return\r\n elif neg:\r\n # Don't add an extra neg node as we can't\r\n # fully replace this mul by a neg.\r\n m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)\r\n new_inputs = [m1] + new_inputs\r\n rval = T.mul(*new_inputs)\r\n\r\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\r\n else:\r\n # there are no variable inputs to mul\r\n # N.B. this could have been constant-folded...\r\n if neg:\r\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\r\n else:\r\n return [broadcast_like(1, node.outputs[0], node.fgraph)]",
"def scalar_mult(diagram, scalar):\n raise NotImplementedError",
"def __mul__(self, _scalar):\n\t\tans = copy.deepcopy(self)\n\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] *= _scalar\n\t\treturn ans",
"def local_add_mul_fusion(node):\r\n if (not isinstance(node.op, Elemwise) or\r\n not isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul))):\r\n return False\r\n\r\n s_op = node.op.scalar_op.__class__\r\n for inp in node.inputs:\r\n if (inp.owner and\r\n isinstance(inp.owner.op, Elemwise) and\r\n isinstance(inp.owner.op.scalar_op, s_op)):\r\n l = list(node.inputs)\r\n l.remove(inp)\r\n return [node.op(*(l + inp.owner.inputs))]",
"def __mul__(self, scalar) -> 'ModelParameters':\n return ModelParameters([self[idx] * scalar for idx in range(len(self))])",
"def __mul__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'mul')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(\n tf.multiply(self.tf, other.tf), self.type_name, provenance)\n else:\n provenance = NQExprProvenance(\n operation='mul',\n inner=self.provenance,\n other=NQExprProvenance(operation='constant', args=(None, other)))\n return self.context.as_nql(\n tf.multiply(self.tf, other), self.type_name, provenance)",
"def scalar_mult(diagram, scalar):\n for leaf in diagram.leaves:\n leaf.value *= scalar",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def local_sum_mul_by_scalar(node):\r\n # TODO: if the the thing inside the Sum is a division,\r\n # we should get at the numerator....\r\n if isinstance(node.op, T.Sum):\r\n thing_summed, = node.inputs\r\n if thing_summed.owner and thing_summed.owner.op == T.mul:\r\n terms = thing_summed.owner.inputs\r\n scalars = [t.dimshuffle() for t in terms if\r\n numpy.all(t.type.broadcastable)]\r\n non_scalars = [t for t in terms if not numpy.all(t.broadcastable)]\r\n if scalars:\r\n if len(scalars) > 1:\r\n if len(non_scalars) > 1:\r\n return [T.mul(T.mul(*scalars),\r\n node.op(T.mul(*non_scalars)))]\r\n elif len(non_scalars) == 1:\r\n return [T.mul(T.mul(*scalars),\r\n node.op(non_scalars[0]))]\r\n else:\r\n return [T.mul(*scalars)]\r\n else:\r\n if len(non_scalars) > 1:\r\n return [T.mul(scalars[0],\r\n node.op(T.mul(*non_scalars)))]\r\n elif len(non_scalars) == 1:\r\n return [T.mul(scalars[0], node.op(non_scalars[0]))]\r\n else:\r\n return [scalars[0]]\r\n if thing_summed.owner and thing_summed.owner.op == T.neg:\r\n return [T.neg(node.op(thing_summed.owner.inputs[0]))]",
"def __mul__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Mul.apply(self, other)",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def scalar_op_helper(node, op_name, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n from onnx import numpy_helper\n input_type = kwargs[\"in_type\"]\n scalar_value = np.array([attrs.get(\"scalar\", 1)],\n dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])\n\n initializer = kwargs[\"initializer\"]\n flag = True\n # If the input value is in initializer, just multiply with scalar input\n # and create a new initializer\n for i in initializer:\n if i.name == input_nodes[0]:\n if op_name == 'Mul':\n new_initializer = numpy_helper.to_array(i) * scalar_value[0]\n elif op_name == 'Sub':\n if name.startswith(\"_rminusscalar\"):\n new_initializer = scalar_value[0] - numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) - scalar_value[0]\n elif op_name == 'Add':\n new_initializer = numpy_helper.to_array(i) + scalar_value[0]\n elif op_name == 'Div':\n if name.startswith(\"_rdivscalar\"):\n new_initializer = scalar_value[0] / numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) / scalar_value[0]\n elif op_name == 'Pow':\n new_initializer = numpy_helper.to_array(i) ** scalar_value[0]\n flag = False\n break\n\n # else create a new tensor of the scalar value, add it in initializer\n if flag is True:\n dims = np.shape(scalar_value)\n\n scalar_op_name = \"scalar_op\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=scalar_op_name,\n data_type=input_type,\n dims=dims,\n vals=scalar_value,\n raw=False,\n )\n )\n\n mul_node = onnx.helper.make_node(\n op_name,\n [input_nodes[0], scalar_op_name],\n [name],\n name=name\n )\n\n return [tensor_node, mul_node]\n else:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype]\n dims = np.shape(new_initializer)\n\n new_a_node = input_nodes[0] + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=new_a_node,\n data_type=data_type,\n dims=dims,\n vals=new_initializer,\n raw=False,\n )\n )\n return [tensor_node]",
"def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"",
"def scalar_mult(diagram, scalar):\n for oindex in diagram.offsets:\n diagram.offsets[oindex] *= scalar",
"def scalar_mult(diagram, scalar):\n for oindex in diagram.offsets:\n diagram.offsets[oindex] *= scalar",
"def multiply( self, scalar ):\n self._coords[:3] *= scalar\n return self",
"def __mul__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Multiply, value)\n return out",
"def __mul__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"*\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"* scalar)\"\n x *= value\n return x",
"def __mul__(self, tensor):\n return self.mul(tensor)",
"def __rmul__(self, scalar) -> 'ModelParameters':\n return self.__mul__(scalar)",
"def local_mul_to_sqr(node):\r\n if node.op == T.mul:\r\n if len(node.inputs) == 2:\r\n if node.inputs[0] is node.inputs[1]:\r\n return [T.sqr(node.inputs[0])]",
"def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)",
"def compute_mul(tree):\r\n neg, inputs = tree\r\n if inputs is None:\r\n raise AssertionError(\r\n 'Function `compute_mul` found a missing leaf, did you forget to '\r\n 'call `simplify_mul` on the tree first?')\r\n elif isinstance(inputs, list):\r\n # Recurse through inputs.\r\n rval = tensor.mul(*map(compute_mul, inputs))\r\n else:\r\n rval = inputs\r\n if neg:\r\n rval = -rval\r\n return rval",
"def local_mul_zero(node):\r\n if node.op == T.mul:\r\n otype = node.outputs[0].type\r\n\r\n for i in node.inputs:\r\n try:\r\n value = get_scalar_constant_value(i)\r\n except NotScalarConstantError:\r\n continue\r\n #print 'MUL by value', value, node.inputs\r\n if value == 0:\r\n #print '... returning zeros'\r\n return _fill_chain(theano._asarray(0, dtype=otype.dtype),\r\n node.inputs)"
] | [
"0.79224896",
"0.7241136",
"0.6973371",
"0.6796328",
"0.6784119",
"0.6776615",
"0.67680424",
"0.6747651",
"0.67148656",
"0.6702787",
"0.66282004",
"0.6625424",
"0.6572571",
"0.65414375",
"0.652442",
"0.6481082",
"0.63871235",
"0.63544697",
"0.63508964",
"0.6326876",
"0.6326876",
"0.63099724",
"0.63086414",
"0.6306102",
"0.62947637",
"0.62361264",
"0.62278086",
"0.62223834",
"0.62145776",
"0.62055045"
] | 0.8387702 | 0 |
Map MXNet's _minus_scalar operator attributes to onnx's Minus operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_minus_scalar(node, **kwargs):
return scalar_op_helper(node, 'Sub', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_rminus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)",
"def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)",
"def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\n else:\n node.input.append(\"\")\n return node",
"def __neg__(self):\n return UnaryMinus(self)",
"def Minus(self):\n return CompoundWithCoeff(-self.coeff, self.compound, self.phase,\n self.name)",
"def visit_UnaryOp(self, node):\n self.generic_visit(node)\n if isinstance(node.operand, ast.Num):\n # Don't transform negations of numeric literals. Just treat them\n # as literals.\n return node\n return to_call(self.op_to_function(node.op), [node.operand])",
"def pauli_represent_minus_plus(e):\n # XXX: todo, make sure that new operators inherit labels\n return expression_tree_transform(\n e, [(lambda e: isinstance(e, SigmaX),\n lambda e: SigmaMinus() + SigmaPlus()),\n (lambda e: isinstance(e, SigmaY),\n lambda e: I * SigmaMinus() - I * SigmaPlus())]\n )",
"def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def convert_negative(node, **kwargs):\n return create_basic_op_node('Neg', node, kwargs)",
"def scalar_op_helper(node, op_name, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n from onnx import numpy_helper\n input_type = kwargs[\"in_type\"]\n scalar_value = np.array([attrs.get(\"scalar\", 1)],\n dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])\n\n initializer = kwargs[\"initializer\"]\n flag = True\n # If the input value is in initializer, just multiply with scalar input\n # and create a new initializer\n for i in initializer:\n if i.name == input_nodes[0]:\n if op_name == 'Mul':\n new_initializer = numpy_helper.to_array(i) * scalar_value[0]\n elif op_name == 'Sub':\n if name.startswith(\"_rminusscalar\"):\n new_initializer = scalar_value[0] - numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) - scalar_value[0]\n elif op_name == 'Add':\n new_initializer = numpy_helper.to_array(i) + scalar_value[0]\n elif op_name == 'Div':\n if name.startswith(\"_rdivscalar\"):\n new_initializer = scalar_value[0] / numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) / scalar_value[0]\n elif op_name == 'Pow':\n new_initializer = numpy_helper.to_array(i) ** scalar_value[0]\n flag = False\n break\n\n # else create a new tensor of the scalar value, add it in initializer\n if flag is True:\n dims = np.shape(scalar_value)\n\n scalar_op_name = \"scalar_op\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=scalar_op_name,\n data_type=input_type,\n dims=dims,\n vals=scalar_value,\n raw=False,\n )\n )\n\n mul_node = onnx.helper.make_node(\n op_name,\n [input_nodes[0], scalar_op_name],\n [name],\n name=name\n )\n\n return [tensor_node, mul_node]\n else:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype]\n dims = np.shape(new_initializer)\n\n new_a_node = input_nodes[0] + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=new_a_node,\n data_type=data_type,\n dims=dims,\n vals=new_initializer,\n raw=False,\n )\n )\n return [tensor_node]",
"def __neg__(self):\n return type(self)(self.parent(), self._simplify(-self._express))",
"def __neg__(self):\n return TensorWithIndices(-self._tensor, \n self._con + '_' + self._cov)",
"def __neg__(self):\n return tuple.__new__(Vec2, (-self[0], -self[1]))",
"def __neg__(self):\r\n return mat4(map(lambda x: -x, self.mlist))",
"def visit_UnaryOperator(self, node: UnaryOperator) -> Constant:\n\n operator = node.operator.type\n if operator == TokenType.PLUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(+expression.constant))\n elif operator == TokenType.MINUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(-expression.constant))",
"def __neg__(self):\n return Quantity(-(self._value), self.unit)",
"def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression",
"def _create_clip(cls, onnx_node, inputs, opset_version):\n # sometime onnx may ignore these two inputs, min or max or both\n if len(inputs) >= 2 and onnx_node.inputs[1] != '':\n min_v = tensor.to_numpy(inputs.pop(1)).tolist()[0]\n else:\n min_v = None\n if len(inputs) >= 2 and onnx_node.inputs[2] != '':\n max_v = tensor.to_numpy(inputs.pop(1)).tolist()[0]\n else:\n max_v = None\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(min_v, max_v)",
"def __neg__(self):\n return self.from_points(-v for v in self._vectors)",
"def __neg__(self):\n\t\tval = -self.val\n\t\tder = -self.der if len(self.der.shape) else None\n\t\treturn Var(val, der)",
"def __neg__(self):\n return Vector([-c for c in self.components])",
"def convert_min(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def __sub__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during substraction to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Minus(self, other)",
"def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)",
"def visit_UnaryOpNode(self, node: UnaryOpNode, symbol_table: SymbolTable) -> Number:\n number = self.visit(node.node, symbol_table)\n\n if node.op_tok.token_type == TokenType.MINUS:\n return number * Number(-1)\n elif node.op_tok.token_type == TokenType.PLUS:\n return number\n elif node.op_tok.value == 'not':\n return number.notted_by()",
"def _create_selu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.67326)\n gamma = onnx_node.getattr(\"gamma\", 1.0507)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha, gamma)",
"def from_scalar(self, s, t):\n raise NotImplementedError('from_scalar')",
"def __neg__(self):\n data = [[-self[i, j] for j in range(self.n)] for i in range(self.m)]\n return self.__class__(self.m, self.n, data)",
"def visit_UnaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.right)\n if token.type == MINUS:\n return -1 * self.visit(node.right)\n self.raise_error()"
] | [
"0.6305766",
"0.6050129",
"0.60270244",
"0.6017436",
"0.597134",
"0.58471966",
"0.56328523",
"0.5549238",
"0.553047",
"0.5523445",
"0.55042356",
"0.54917306",
"0.5459456",
"0.5451665",
"0.5436429",
"0.5424267",
"0.53898084",
"0.5378877",
"0.53705645",
"0.5368082",
"0.53500354",
"0.5325119",
"0.5306801",
"0.5282946",
"0.52645224",
"0.5264497",
"0.52394396",
"0.5228635",
"0.52285194",
"0.52253705"
] | 0.6904257 | 0 |
Map MXNet's _rminus_scalar operator attributes to onnx's Sub operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_rminus_scalar(node, **kwargs):
return scalar_op_helper(node, 'Sub', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)",
"def convert_elementwise_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)",
"def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = sub_byconst_op(self, other)\r\n return new_node",
"def covert_broadcast_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)",
"def test_sub():\n # Test for subtraction with Rnode object\n x = Rnode(0.11)\n y = Rnode(0.5)\n z = x - y\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value - y.value\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)\n # Test for subtraction with scalar Rnode object and float value\n x = Rnode(0.5)\n z = x - 0.1\n try:\n assert z.value == x.value - 0.1\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)",
"def __neg__(self):\n return UnaryMinus(self)",
"def __rsub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(other, self)",
"def __rsub__(self, tensor):\n return -self + tensor",
"def __rsub__(self, other, **kwargs):\n kwargs.update({'sub': False, 'operator': 'add'})\n return Curve.__add__(self.__neg__(), other, **kwargs)",
"def __sub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(self, other)",
"def __sub__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Subtract, value)\n return out",
"def __rsub__(self, other):\n return self._operation_sub(other, self)",
"def __sub__(self, other):\n return self._operation_sub(self, other)",
"def __sub__(self, tensor):\n return self.sub(tensor)",
"def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)",
"def __rsub__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during substraction of {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Minus(other, self)",
"def __sub__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during substraction to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Minus(self, other)",
"def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)",
"def __sub__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x-y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"",
"def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)",
"def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj",
"def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)",
"def _sub_op(value, sample_args, rationals_allowed):\n entropy, sample_args = sample_args.peel()\n if rationals_allowed and sample_args.count >= 3:\n x = number.integer_or_rational(entropy, True)\n else:\n x = number.integer(entropy, True)\n if random.choice([False, True]):\n op_args = [x, x - value]\n else:\n op_args = [value + x, x]\n return ops.Sub, op_args, sample_args",
"def __sub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.sub)",
"def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"",
"def convert_negative(node, **kwargs):\n return create_basic_op_node('Neg', node, kwargs)",
"def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)",
"def __sub__(self, other):\n return self + other.__neg__()",
"def __sub__(self, other):\n return self + other.__neg__()",
"def sub(self, other, weight=one):\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n \n ops = []\n if isinstance(weight, VarStruct):\n weight = weight.var\n \n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign_sub(weight*other_var))\n return tf.group(*ops, name=\"sub_\"+self.name)"
] | [
"0.79085666",
"0.68994915",
"0.6750096",
"0.65244937",
"0.61306584",
"0.61295164",
"0.61034423",
"0.6103109",
"0.6042899",
"0.6030476",
"0.6012048",
"0.60119057",
"0.5996009",
"0.5993797",
"0.5971871",
"0.59533864",
"0.5938247",
"0.588912",
"0.58856267",
"0.57691115",
"0.5764717",
"0.57465816",
"0.57447577",
"0.57423186",
"0.5739742",
"0.5699567",
"0.5686701",
"0.5682151",
"0.5682151",
"0.56641495"
] | 0.8166036 | 0 |
Map MXNet's _plus_scalar operator attributes to onnx's Add operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_add_scalar(node, **kwargs):
return scalar_op_helper(node, 'Add', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def __add__(self, other):\r\n if isinstance(other, Node):\r\n new_node = add_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = add_byconst_op(self, other)\r\n return new_node",
"def __add__(self, other):\n if isinstance(other, Node):\n new_node = add_op(self, other)\n else:\n # Add by a constant stores the constant in the new node's const_attr field.\n # 'other' argument is a constant\n new_node = add_byconst_op(self, other)\n return new_node",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out",
"def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)",
"def __add__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.add)",
"def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)",
"def add(lhs, rhs):\n return _make.add(lhs, rhs)",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def add(a: PipeNumeric, b: PipeNumeric):\n assert a.get_type() == b.get_type()\n num_type = a.get_type()\n assert isinstance(num_type, num.SignedFixedNumberType) or isinstance(num_type, num.UnsignedIntegerNumberType)\n\n if isinstance(a, PipeConstant) and isinstance(b, PipeConstant):\n return PipeConstant(num_type, int(\n num_type.create_from_constant(a.get_value()) + num_type.create_from_constant(b.get_value())\n ))\n elif isinstance(a, PipeConstant) or isinstance(b, PipeConstant):\n if isinstance(a, PipeConstant):\n static_value = a.get_value()\n dynamic_value = b\n else:\n static_value = b.get_value()\n dynamic_value = a\n\n if static_value == 0:\n return dynamic_value\n\n node = OneCycleNode()\n\n node.add_inputs(a=a, b=b)\n res = PipeSignal(num_type, Signal(num_type.create()))\n node.add_output(res)\n node.set_name('{}-add'.format('fixed' if isinstance(num_type, num.SignedFixedNumberType) else 'integer'))\n node.set_logic(add_seq)\n\n return node",
"def __add__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__add__\")",
"def __iadd__(self, tensor):\n return self.add_(tensor)",
"def __add__(self, other) -> 'Tensor':\n return _add(self, ensure_tensor(other))",
"def __iadd__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.add)",
"def _add_scalar(self, scalar):\n\n a, b = self, scalar\n\n # Don't bother to salt/obfuscate in a basic operation, do it\n # just before leaving the computer.\n encrypted_scalar = a.public_key.raw_encrypt(b, 1)\n\n sum_ciphertext = a._raw_add(a.ciphertext(False), encrypted_scalar)\n return EncryptedNumber(a.public_key, sum_ciphertext)",
"def __add__(self, tensor):\n return self.add(tensor)",
"def append(self):\n return AttributeFunctor(self, lambda a, b: a + b)",
"def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)",
"def __add__(self, other):\n return add_mps(self, other)",
"def add(self, node, **offset):\n return self.dtype.add(self, node, **offset)",
"def __add__(self,that):\n return self.__opExpand2(that,np.add)",
"def scalar_op_helper(node, op_name, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n from onnx import numpy_helper\n input_type = kwargs[\"in_type\"]\n scalar_value = np.array([attrs.get(\"scalar\", 1)],\n dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])\n\n initializer = kwargs[\"initializer\"]\n flag = True\n # If the input value is in initializer, just multiply with scalar input\n # and create a new initializer\n for i in initializer:\n if i.name == input_nodes[0]:\n if op_name == 'Mul':\n new_initializer = numpy_helper.to_array(i) * scalar_value[0]\n elif op_name == 'Sub':\n if name.startswith(\"_rminusscalar\"):\n new_initializer = scalar_value[0] - numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) - scalar_value[0]\n elif op_name == 'Add':\n new_initializer = numpy_helper.to_array(i) + scalar_value[0]\n elif op_name == 'Div':\n if name.startswith(\"_rdivscalar\"):\n new_initializer = scalar_value[0] / numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) / scalar_value[0]\n elif op_name == 'Pow':\n new_initializer = numpy_helper.to_array(i) ** scalar_value[0]\n flag = False\n break\n\n # else create a new tensor of the scalar value, add it in initializer\n if flag is True:\n dims = np.shape(scalar_value)\n\n scalar_op_name = \"scalar_op\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=scalar_op_name,\n data_type=input_type,\n dims=dims,\n vals=scalar_value,\n raw=False,\n )\n )\n\n mul_node = onnx.helper.make_node(\n op_name,\n [input_nodes[0], scalar_op_name],\n [name],\n name=name\n )\n\n return [tensor_node, mul_node]\n else:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype]\n dims = np.shape(new_initializer)\n\n new_a_node = input_nodes[0] + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=new_a_node,\n data_type=data_type,\n dims=dims,\n vals=new_initializer,\n raw=False,\n )\n )\n return [tensor_node]",
"def add(self, value):",
"def ADD (self, n1, n2):",
"def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)",
"def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)",
"def __add__(self, other: Any) -> TypeValue:\n if isinstance(other, np.ndarray):\n return other + float(self)\n\n return self._like_self_from_float(\n float(self) + self._other_same_units(other)\n )"
] | [
"0.767562",
"0.72351784",
"0.69635",
"0.6916363",
"0.69029146",
"0.6712746",
"0.65971774",
"0.6551346",
"0.6480433",
"0.6455669",
"0.64297163",
"0.64250094",
"0.64143014",
"0.63674563",
"0.6350933",
"0.63499177",
"0.63171613",
"0.6313307",
"0.62899745",
"0.6258752",
"0.62583524",
"0.6213183",
"0.6160441",
"0.6140524",
"0.6112976",
"0.6107246",
"0.61043364",
"0.6098213",
"0.6097841",
"0.6094445"
] | 0.80610985 | 0 |
Map MXNet's _div_scalar operator attributes to onnx's Div operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_div_scalar(node, **kwargs):
return scalar_op_helper(node, 'Div', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)",
"def convert_rdiv_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def convert_broadcast_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)",
"def __div__(self, _scalar):\n\t\tans = copy.deepcopy(self)\n\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] /= _scalar\n\t\treturn ans",
"def __div__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"/\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"/ scalar)\"\n x /= value\n return x",
"def __div__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)",
"def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out",
"def __div__(self, other, **kwargs):\n kwargs.update({'operator': 'div'})\n return self.__add__(other, **kwargs)",
"def __div__(self, tensor):\n return self.div(tensor)",
"def __rdiv__(self, _scalar):\n\t\treturn self / _scalar",
"def __rdiv__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)",
"def floordiv_(self, scalar):\n for idx in range(len(self)):\n self.parameters[idx] //= scalar",
"def __div__(self, other):\r\n T = type(other)\r\n # vec4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x/other, self.y/other, self.z/other, self.w/other)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def __div__(self, other):\r\n T = type(other)\r\n # mat4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x/other, self.mlist))\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def __itruediv__(self, scalar):\n return self.div_(scalar)",
"def __floordiv__(self, scalar) -> 'ModelParameters':\n return ModelParameters([self[idx] // scalar for idx in range(len(self))])",
"def scalar_op_helper(node, op_name, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n from onnx import numpy_helper\n input_type = kwargs[\"in_type\"]\n scalar_value = np.array([attrs.get(\"scalar\", 1)],\n dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])\n\n initializer = kwargs[\"initializer\"]\n flag = True\n # If the input value is in initializer, just multiply with scalar input\n # and create a new initializer\n for i in initializer:\n if i.name == input_nodes[0]:\n if op_name == 'Mul':\n new_initializer = numpy_helper.to_array(i) * scalar_value[0]\n elif op_name == 'Sub':\n if name.startswith(\"_rminusscalar\"):\n new_initializer = scalar_value[0] - numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) - scalar_value[0]\n elif op_name == 'Add':\n new_initializer = numpy_helper.to_array(i) + scalar_value[0]\n elif op_name == 'Div':\n if name.startswith(\"_rdivscalar\"):\n new_initializer = scalar_value[0] / numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) / scalar_value[0]\n elif op_name == 'Pow':\n new_initializer = numpy_helper.to_array(i) ** scalar_value[0]\n flag = False\n break\n\n # else create a new tensor of the scalar value, add it in initializer\n if flag is True:\n dims = np.shape(scalar_value)\n\n scalar_op_name = \"scalar_op\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=scalar_op_name,\n data_type=input_type,\n dims=dims,\n vals=scalar_value,\n raw=False,\n )\n )\n\n mul_node = onnx.helper.make_node(\n op_name,\n [input_nodes[0], scalar_op_name],\n [name],\n name=name\n )\n\n return [tensor_node, mul_node]\n else:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype]\n dims = np.shape(new_initializer)\n\n new_a_node = input_nodes[0] + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=new_a_node,\n data_type=data_type,\n dims=dims,\n vals=new_initializer,\n raw=False,\n )\n )\n return [tensor_node]",
"def __div__(self,that):\n return self.__opExpand2(that, np.divide)",
"def div_value(self, lv, rv):",
"def test_mixeddiv():\r\n i = iscalar()\r\n d = dscalar()\r\n assert 0 == function([i, d], d * (i // (i + 1)))(3, 1.0)",
"def __div__(self, other):\n\n return self._mul_div(other, div=True)",
"def __div__(self, other):\n if isinstance(other, (int, float)):\n return self * (1 / other)\n else:\n raise TypeError(\"Cannot divide vector by {}\".format(other))",
"def divide( self, scalar ):\n # check we aren't dividing by 0\n if abs(scalar) < self.EPSILON:\n raise ZeroDivisionError( \"can't divide vector by zero!\" )\n\n self._coords[:3] /= scalar\n\n return self",
"def _mul_div(self, scaling_factor, div=False):\n\n if not isinstance(scaling_factor, UFloat):\n try:\n scaling_factor = float(scaling_factor)\n except (TypeError, ValueError):\n raise TypeError(\n 'Spectrum must be multiplied/divided by a scalar')\n if (scaling_factor == 0 or\n np.isinf(scaling_factor) or\n np.isnan(scaling_factor)):\n raise ValueError(\n 'Scaling factor must be nonzero and finite')\n else:\n if (scaling_factor.nominal_value == 0 or\n np.isinf(scaling_factor.nominal_value) or\n np.isnan(scaling_factor.nominal_value)):\n raise ValueError(\n 'Scaling factor must be nonzero and finite')\n if div:\n multiplier = 1 / scaling_factor\n else:\n multiplier = scaling_factor\n\n if self._counts is not None:\n data_arg = {'counts': self.counts * multiplier}\n else:\n data_arg = {'cps': self.cps * multiplier}\n spect_obj = Spectrum(bin_edges_kev=self.bin_edges_kev, **data_arg)\n return spect_obj",
"def divide(lhs, rhs):\n return _make.divide(lhs, rhs)",
"def __div__(self, other):\n return self.__mul__(1 / other)",
"def __div__(self, other):\n return self.__mul__(1 / other)",
"def scalar_mult(diagram, scalar):\n for node in diagram.nodes:\n if node.is_leaf():\n node.value *= scalar\n else:\n for oindex in node.offsets:\n node.offsets[oindex] *= scalar",
"def __div__(self, oth):\n\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] / oth, self.rows, self.cols)",
"def __div__(self, other):\n\n s = len(self)\n v = zeros_como(self)\n\n if isinstance(other, Vetor):\n # Both operands are Vetors\n # In this case perform a element wise product\n r = len(other)\n\n if s != r:\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n for i in range(slen):\n v[i] = self[i] / float(other[i])\n else:\n # check if other is a scalar\n if hasattr(other, \"__len__\"):\n raise(VetorError, \"Operand isn't an scalar\")\n\n for i in range(s):\n v[i] = self[i] / float(other)\n\n return v"
] | [
"0.7483501",
"0.69537693",
"0.6816418",
"0.6603985",
"0.6457433",
"0.63764226",
"0.635181",
"0.6023572",
"0.5944247",
"0.59403896",
"0.5891626",
"0.5882687",
"0.5876528",
"0.58643323",
"0.5856814",
"0.5833514",
"0.5809807",
"0.5758597",
"0.55864036",
"0.5551182",
"0.5471876",
"0.54654515",
"0.54470235",
"0.54465276",
"0.5424368",
"0.54172206",
"0.54172206",
"0.5406287",
"0.5383857",
"0.53837186"
] | 0.8018971 | 0 |
Map MXNet's _rdiv_scalar operator attributes to onnx's Div operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_rdiv_scalar(node, **kwargs):
return scalar_op_helper(node, 'Div', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)",
"def __rdiv__(self, _scalar):\n\t\treturn self / _scalar",
"def convert_broadcast_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)",
"def __rdiv__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)",
"def __div__(self, _scalar):\n\t\tans = copy.deepcopy(self)\n\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] /= _scalar\n\t\treturn ans",
"def __div__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"/\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"/ scalar)\"\n x /= value\n return x",
"def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out",
"def __div__(self, other, **kwargs):\n kwargs.update({'operator': 'div'})\n return self.__add__(other, **kwargs)",
"def __rdiv__(self, scalar):\n raise(VetorError, \"Not possible divide a scalar by a vector\")",
"def __div__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)",
"def __rdiv__(self, other):\n return self.__rtruediv__(other)",
"def __div__(self, other):\r\n T = type(other)\r\n # mat4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x/other, self.mlist))\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def __div__(self, other):\r\n T = type(other)\r\n # vec4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x/other, self.y/other, self.z/other, self.w/other)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def div_value(self, lv, rv):",
"def __div__(self, tensor):\n return self.div(tensor)",
"def __rdiv__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during division by {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Divide(other, self)",
"def rdivmod(self, other, **kwargs):\n return SeriesDefault.register(pandas.Series.rdivmod)(\n self, other=other, **kwargs\n )",
"def __itruediv__(self, scalar):\n return self.div_(scalar)",
"def __div__(self,that):\n return self.__opExpand2(that, np.divide)",
"def __rdiv__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return Curve.__add__(self.__invertArithmetic__(), other, **kwargs)",
"def __rdiv__(self, number):\n return self.__div__(number)",
"def __rdiv__(self,that):\n B = that if isinstance(that,Factor) else Factor([],that)\n return B.__opExpand2(self, np.divide)",
"def div(self, source, destination):\n value = bytearray()\n\n dividend = destination\n divider = source\n\n if is_single_scalar_reg(divider):\n value.extend([0xF3, 0x0F, 0x5E]) # divss\n mod = 0b11\n rm = get_register_encoding(divider)\n reg = get_register_encoding(dividend)\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n elif is_double_scalar_reg(divider):\n value.extend([0xF2, 0x0F, 0x5E]) # divsd\n mod = 0b11\n rm = get_register_encoding(divider)\n reg = get_register_encoding(dividend)\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n else:\n # idiv eax = edx:eax / divider\n if divider == ProcessorRegister.accumulator:\n tmp_reg = ProcessorRegister.data\n value += self.copy_from_reg_to_reg(destination=divider,\n source=tmp_reg)\n divider = tmp_reg\n # so dividend is no accumulator\n tmp_reg = ProcessorRegister.accumulator\n value += self.copy_from_reg_to_reg(destination=dividend,\n source=tmp_reg)\n\n tmp_reg = ProcessorRegister.counter\n value += self.copy_from_reg_to_reg(destination=divider,\n source=tmp_reg)\n divider = tmp_reg\n\n src = ProcessorRegister.accumulator\n value += self.copy_from_reg_to_reg(destination=dividend,\n source=src)\n\n # mov eax -> edx\n src = ProcessorRegister.accumulator\n dest = ProcessorRegister.data\n value += self.copy_from_reg_to_reg(destination=dest,\n source=src)\n\n # shift edx by 31 -> contains the highest bits of the dividend,\n # eax the lowest 31 bits\n value += self.shift(ProcessorRegister.data,\n ShiftMode.right_arithmetic,\n amount=31)\n\n value.append(0xf7) # idiv\n\n mod = 0b11\n rm = get_register_encoding(divider)\n reg = 7 # F7 /7 -> 7 in the reg field\n modr_byte = (mod << 6) + (reg << 3) + (rm << 0)\n value.append(modr_byte)\n\n # the result is stored in the acc register, so copy it to the\n # correct result register if needed\n if destination != ProcessorRegister.accumulator:\n register = ProcessorRegister.accumulator\n value += self.copy_from_reg_to_reg(register, dividend)\n\n return value",
"def __floordiv__(self, scalar) -> 'ModelParameters':\n return ModelParameters([self[idx] // scalar for idx in range(len(self))])",
"def divide(lhs, rhs):\n return _make.divide(lhs, rhs)",
"def __rfloordiv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(floordiv, other)",
"def floordiv_(self, scalar):\n for idx in range(len(self)):\n self.parameters[idx] //= scalar",
"def divide_rhs_by(self, expr, var):\n return self.modify_rhs(expr, u'divide', var)",
"def __div__(self, other):\n\n return self._mul_div(other, div=True)"
] | [
"0.7794921",
"0.73854715",
"0.6798597",
"0.67867917",
"0.6434399",
"0.63413435",
"0.6173485",
"0.6116096",
"0.6076342",
"0.5998175",
"0.5978865",
"0.592204",
"0.59028167",
"0.5873986",
"0.587386",
"0.57993555",
"0.575969",
"0.5733526",
"0.5695951",
"0.56446165",
"0.5617565",
"0.5616356",
"0.56130296",
"0.55906844",
"0.55554515",
"0.554192",
"0.5531506",
"0.5515158",
"0.5514133",
"0.5460812"
] | 0.82317907 | 0 |
Map MXNet's _pow_scalar operator attributes to onnx's Pow operator. Creates a new node for the input scalar value, adds it to the initializer and return multiple created nodes. | def convert_pow_scalar(node, **kwargs):
return scalar_op_helper(node, 'Pow', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor = _expr.const(factor, dtype=dtype)\n out = _op.power(x, factor)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError, 'expecting a float'\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = quantitative(name, func=self, transform=lambda x: N.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)",
"def convert_square(node, **kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n initializer = kwargs[\"initializer\"]\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]\n\n power2_name = \"square_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,))\n initializer.append(\n onnx.helper.make_tensor(\n name=power2_name,\n data_type=data_type,\n dims=(1,),\n vals=[2],\n raw=False,\n )\n )\n\n input_nodes.append(power2_name)\n\n node = onnx.helper.make_node(\n \"Pow\",\n input_nodes,\n [name],\n name=name\n )\n return [tensor_node, node]",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError('expecting a float')\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = Quantitative(name, func=self, transform=lambda x: np.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def __pow__(self, exponent):\n return type(self)(self.parent(),\n self._simplify(pow(self._express, exponent)))",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __pow__(self, other, **kwargs):\n kwargs.update({'operator': 'pow'})\n return self.__add__(other, **kwargs)",
"def py_pow(x, p, op_version=None):\n return x ** p",
"def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)",
"def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))",
"def __pow__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Pow.apply(self, other)",
"def __pow__(self, power: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(power, float):\n for item in self.values:\n result.append(item ** power)\n else:\n assert len(self.values) == len(power.values)\n for i in range(len(self.values)):\n result.append(self.values[i] ** power.values[i])\n return Simpy(result)",
"def __pow__(self, other, tensor=False):\r\n return self.prod(other, tensor=True)",
"def __pow__(self, other):\n return MyCustomNumber(self.value ** other.value)",
"def __pow__(self, exponent: float) -> PointType:\n return Point(self.x ** exponent, self.y ** exponent)",
"def __pow__(self, power):\n if power == 1:\n return self\n elif power == 0:\n return Polynomial(1)\n\n self.polynomials = {key: val for key, val in self.polynomials.items() if val != 0}\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n\n attributes = {}\n\n # Using Binomial theorem\n n = 0\n m = power\n use_n = True\n\n for k in range(0, power + 1):\n result = self.calculate_combinatorial_number(power, k)\n\n for index, polynomial in self.polynomials.items():\n if use_n:\n result *= pow(polynomial, (power - n))\n n += 1\n use_n = False\n else:\n result *= pow(polynomial, (power + m))\n m -= 1\n use_n = True\n\n attributes[\"x\" + str(n - 1)] = result\n\n return Polynomial(**attributes)",
"def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )",
"def __pow__(self, power, modulo=None):\n if isinstance(power, (int, float, complex, Fraction)) and not isinstance(power, bool):\n return Vector([i ** power for i in self.data], self.column)\n else:\n raise TypeError('power is not a number')",
"def test_op_pow_scalar_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n s = 2\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n cutoff = numpy.empty_like(a)\n cutoff[:] = cutoff_value\n expect = numpy.minimum(pow(a, s), cutoff)\n\n offl_a = stream.bind(a)\n offl_r = pow(offl_a, s)\n r = offl_r.update_host().array\n stream.sync()\n r = numpy.minimum(r, cutoff)\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def pow(space, w_base, w_exponent, w_modulus):\n return space.pow(w_base, w_exponent, w_modulus)",
"def test_op_pow_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n s = 0.7\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = pow(a, s)\n\n offl_a = stream.bind(a)\n offl_r = pow(offl_a, s)\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertEqualEpsilon(r, expect,\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def pow(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"pow(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safePow(value,factor)\n return seriesList",
"def __pow__(self, exponent: int):\n\t\tif exponent < 0:\n\t\t\traise ValueError(\"Negative powers not supported\")\n\t\telif exponent == 0:\n\t\t\treturn SquareMatrix(self._rows, 1)\n\t\telse:\n\t\t\tres = self\n\t\t\tfor i in range(1, exponent):\n\t\t\t\tres *= self\n\t\t\treturn res",
"def pow(self, a: 'PFElement', n: int) -> 'PFElement':\n res = power(a, n)\n if not isinstance(res, PFElement):\n return self.element(res)\n else:\n return res",
"def __mul__(self, _scalar):\n\t\tans = copy.deepcopy(self)\n\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] *= _scalar\n\t\treturn ans",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __ipow__(self, exponent: float) -> PointType:\n self.x **= exponent\n self.y **= exponent\n return self"
] | [
"0.73938245",
"0.71553993",
"0.69053197",
"0.6618425",
"0.6509602",
"0.64128435",
"0.6384842",
"0.6307531",
"0.62721306",
"0.6149648",
"0.6126736",
"0.60709953",
"0.60470843",
"0.5954794",
"0.59512776",
"0.59302795",
"0.5929934",
"0.58751506",
"0.58481187",
"0.58359647",
"0.582202",
"0.57648826",
"0.5731303",
"0.5727342",
"0.5723832",
"0.5708041",
"0.5700529",
"0.5696331",
"0.5695533",
"0.56576586"
] | 0.821569 | 0 |
Map MXNet's argmax operator attributes to onnx's ArgMax operator and return the created node. | def convert_argmax(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis"))
keepdims = get_boolean_attribute_value(attrs, "keepdims")
node = onnx.helper.make_node(
'ArgMax',
inputs=input_nodes,
axis=axis,
keepdims=keepdims,
outputs=[name],
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_maximum(node, **kwargs):\n return create_basic_op_node('Max', node, kwargs)",
"def convert_max(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMax',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMax',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def argmax(module, x, axes=None):\n return module.argmax(x, axes)",
"def local_max_and_argmax(node):\r\n if node.op == T._max_and_argmax:\r\n if len(node.outputs[1].clients) == 0:\r\n #MaxAndArgmax support variable axis,\r\n #but CAReduce support only constant axis.\r\n if node.inputs[1].data is None:\r\n axis = None\r\n else:\r\n try:\r\n axis = get_scalar_constant_value(node.inputs[1])\r\n except NotScalarConstantError:\r\n return False\r\n\r\n new = CAReduce(scal.maximum, axis)(node.inputs[0])\r\n return [new, None]",
"def OpenXmax(self, *args):\n return _Bnd.Bnd_Box2d_OpenXmax(self, *args)",
"def argmax(tensor):\n raise NotImplementedError",
"def OpenXmax(self, *args):\n return _Bnd.Bnd_Box_OpenXmax(self, *args)",
"def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]",
"def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]",
"def convert_arg_max_min(g, op, block):\n\n axis = op.attr(\"axis\")\n keepdims = op.attr(\"keepdims\")\n flatten = op.attr(\"flatten\")\n dtype = op.attr(\"dtype\")\n dtype = _convert_dtype_value(dtype)\n\n func = _op.argmax if op.type == \"arg_max\" else _op.argmin\n x = g.get_node(op.input(\"X\")[0])\n if axis is None or flatten:\n x = _op.reshape(x, [-1])\n out = func(x, axis=None, keepdims=True)\n else:\n out = func(x, axis=axis, keepdims=keepdims)\n if dtype != infer_type(out).checked_type.dtype:\n out = _op.cast(out, dtype)\n g.add_node(op.output(\"Out\")[0], out)",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.argmax(input_vals[0], node.const_attr)\r\n else:\r\n return np.argmax(input_vals[0])",
"def OpenYmax(self, *args):\n return _Bnd.Bnd_Box_OpenYmax(self, *args)",
"def OpenZmax(self, *args):\n return _Bnd.Bnd_Box_OpenZmax(self, *args)",
"def max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.max, reduce_instance_dims, name)",
"def argmax(self, axis: str = 'rows') -> 'DataFrame':\n return self._stat_funcs('argmax', axis)",
"def OpenYmax(self, *args):\n return _Bnd.Bnd_Box2d_OpenYmax(self, *args)",
"def argmax(self, axis=None):\n return np.argmax(self.data, axis=axis)",
"def add_max(org, kernels, centres):\n return org",
"def argmax(self, state, action):\n return copy.deepcopy(state.object_states[self._objid])",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def argmax2(self, cvars=None, ctuple=None):\n if (cvars is None):\n return self.v.ind2sub(self.t.argmax())\n ax = tuple(map(lambda x:ctuple[cvars.index(x)] if x in cvars else slice(None) ,self.v))\n return self.v.ind2sub(self.t[ax].argmax())",
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)",
"def argmax(self, values):\n return self.aggregate(values, \"argmax\")",
"def get_bprop_argmaxwithvalue(self):\n axis = self.axis\n keep_dims = self.keep_dims\n op = P.ArgMaxWithValue(axis)\n\n def bprop(x, out, dout):\n dx = _argmin_or_argmax_grad(x, axis, keep_dims, op, out, dout)\n return (dx,)\n return bprop",
"def produce_max(self, *args, **kwargs):\n raise NotImplementedError('This interaction has no produce_max method yet!')",
"def argmax(x1, axis=None, out=None):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n if x1_desc:\n if axis is not None:\n pass\n elif out is not None:\n pass\n else:\n result_obj = dpnp_argmax(x1_desc).get_pyobj()\n result = dpnp.convert_single_elem_array_to_scalar(result_obj)\n\n return result\n\n return call_origin(numpy.argmax, x1, axis, out)",
"def x_max(self) -> ir.FloatingValue:\n return ops.GeoXMax(self).to_expr()",
"def get_max(self):\n if not self:\n return None\n return self.right.get_max() if self.right else self.value #Ternarary Operator",
"def bprop_argmax(x, axis, out, dout):\n return (zeros_like(x), zeros_like(axis))",
"def relay_argmax(c, v, dims):\n v = c.ref(v)\n assert dims.is_constant(tuple)\n return relay.cast(relay.argmax(v, axis=dims.value), 'int64')"
] | [
"0.70940566",
"0.6874972",
"0.61902833",
"0.61549073",
"0.61046344",
"0.60914123",
"0.6080087",
"0.5927143",
"0.5927143",
"0.5880221",
"0.58306336",
"0.57700604",
"0.57442945",
"0.57169616",
"0.56744826",
"0.5666791",
"0.56665546",
"0.5664926",
"0.5634875",
"0.5621766",
"0.55798393",
"0.54798603",
"0.54716545",
"0.5450414",
"0.5448129",
"0.54387796",
"0.54198444",
"0.5408342",
"0.539221",
"0.53786385"
] | 0.7944788 | 0 |
Map MXNet's argmin operator attributes to onnx's ArgMin operator and return the created node. | def convert_argmin(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis"))
keepdims = get_boolean_attribute_value(attrs, "keepdims")
node = onnx.helper.make_node(
'ArgMin',
inputs=input_nodes,
axis=axis,
keepdims=keepdims,
outputs=[name],
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)",
"def convert_min(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def OpenXmin(self, *args):\n return _Bnd.Bnd_Box_OpenXmin(self, *args)",
"def OpenXmin(self, *args):\n return _Bnd.Bnd_Box2d_OpenXmin(self, *args)",
"def argmin(tensor):\n raise NotImplementedError",
"def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)",
"def convert_arg_max_min(g, op, block):\n\n axis = op.attr(\"axis\")\n keepdims = op.attr(\"keepdims\")\n flatten = op.attr(\"flatten\")\n dtype = op.attr(\"dtype\")\n dtype = _convert_dtype_value(dtype)\n\n func = _op.argmax if op.type == \"arg_max\" else _op.argmin\n x = g.get_node(op.input(\"X\")[0])\n if axis is None or flatten:\n x = _op.reshape(x, [-1])\n out = func(x, axis=None, keepdims=True)\n else:\n out = func(x, axis=axis, keepdims=keepdims)\n if dtype != infer_type(out).checked_type.dtype:\n out = _op.cast(out, dtype)\n g.add_node(op.output(\"Out\")[0], out)",
"def argmin(self, axis: str = 'rows') -> 'DataFrame':\n return self._stat_funcs('argmin', axis)",
"def OpenZmin(self, *args):\n return _Bnd.Bnd_Box_OpenZmin(self, *args)",
"def argmin(x1, axis=None, out=None):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n if x1_desc:\n if axis is not None:\n pass\n elif out is not None:\n pass\n else:\n result_obj = dpnp_argmin(x1_desc).get_pyobj()\n result = dpnp.convert_single_elem_array_to_scalar(result_obj)\n\n return result\n\n return call_origin(numpy.argmin, x1, axis, out)",
"def argmin2(self, cvars=None, ctuple=None):\n if (cvars is None):\n return self.v.ind2sub(self.t.argmin())\n ax = tuple(map(lambda x:ctuple[cvars.index(x)] if x in cvars else slice(None) ,self.v))\n return self.v.ind2sub(self.t[ax].argmin())",
"def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()",
"def argmin(self, values):\n return self.aggregate(values, \"argmin\")",
"def MIN(*args):\n return _group_function(min, *args)",
"def argmin(self, array):\n minvalue = inf\n minindex = None\n for i in range(len(array)):\n if array[i] < minvalue:\n minvalue = array[i]\n minindex = i\n return minvalue, minindex",
"def argmin(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmin\")\n return k, cast(pdarray, v)",
"def argmin(a, *args, **kwargs):\n warn('The function argmin is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n if isinstance(a, np.ma.MaskedArray):\n return np.ma.argmin(a, *args, **kwargs)\n elif isinstance(a, np.ndarray):\n return np.argmin(a, *args, **kwargs)\n else:\n return _argmin(a)",
"def min(self, axis=None, keepdims=False, out=None):\n return np.minimum.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def MinX(*args, **kwargs):\n return _gdi_.DC_MinX(*args, **kwargs)",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min_(*args, **kwargs):\n ...",
"def produce_min(self, *args, **kwargs):\n raise NotImplementedError('This interaction has no produce_min method yet!')",
"def Min(attribute):\n\n def annot(dn, attrs):\n # We want to return None on an empty attribute rather than throwing\n try:\n return min(attrs.get(attribute, []))\n except ValueError:\n return None\n\n return annot",
"def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX",
"def OpenYmin(self, *args):\n return _Bnd.Bnd_Box_OpenYmin(self, *args)",
"def scalar_min(self, dst, src0, src1):\n return self._scalar_binary_func('min', dst, src0, src1)",
"def produce_min(self, meta, raven_vars, dispatch, t):\n if self._minimum:\n request, meta = self.get_minimum(meta, raven_vars, dispatch, t)\n else:\n request = {next(iter(self.get_inputs())): 0.0} # TODO is this a good choice when no min var avail?\n return request, meta",
"def OpenYmin(self, *args):\n return _Bnd.Bnd_Box2d_OpenYmin(self, *args)",
"def get_bprop_argminwithvalue(self):\n axis = self.axis\n keep_dims = self.keep_dims\n op = P.ArgMinWithValue(axis)\n\n def bprop(x, out, dout):\n dx = _argmin_or_argmax_grad(x, axis, keep_dims, op, out, dout)\n return (dx,)\n return bprop"
] | [
"0.7161309",
"0.7048762",
"0.653032",
"0.6511046",
"0.61759573",
"0.6156072",
"0.61389035",
"0.60431165",
"0.60163444",
"0.5919297",
"0.58403546",
"0.5791767",
"0.57676524",
"0.5744902",
"0.5734494",
"0.57265365",
"0.5693712",
"0.56826967",
"0.5673407",
"0.5659327",
"0.5659327",
"0.5630733",
"0.5622292",
"0.5570864",
"0.5553552",
"0.55525225",
"0.55467254",
"0.5537432",
"0.55151683",
"0.5476647"
] | 0.7900277 | 0 |
Map MXNet's _maximum operator attributes to onnx's Max operator and return the created node. | def convert_maximum(node, **kwargs):
return create_basic_op_node('Max', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_max(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMax',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMax',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def get_max(self):\n if not self:\n return None\n return self.right.get_max() if self.right else self.value #Ternarary Operator",
"def max(self):\n node = self\n while node.right:\n node = node.right\n return node",
"def x_max(self) -> ir.FloatingValue:\n return ops.GeoXMax(self).to_expr()",
"def max(self) -> \"Node\":\n current = self\n while current.right is not None:\n current = current.right\n return current",
"def find_max(self):\n return max(self.nodes, key=int)",
"def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value",
"def max_pool(self, x, name=\"\"):\n return tf.nn.max_pool(x, ksize=self.mp_size, strides=self.mp_stride,\n padding=self.mp_padding, name=name)",
"def get_maximum ( self, object ):\n return self.maximum",
"def _max_pool(x):\n return tf.nn.max_pool(value=x,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')",
"def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")",
"def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")",
"def convert_argmax(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n node = onnx.helper.make_node(\n 'ArgMax',\n inputs=input_nodes,\n axis=axis,\n keepdims=keepdims,\n outputs=[name],\n name=name\n )\n return [node]",
"def _get_maximum(self):\n return self._maximum",
"def _max_pool(self, bottom, name='max_pool'):\n return tf.nn.max_pool(\n bottom,\n ksize=[1, 3, 1, 1],\n strides=[1, 3, 1, 1],\n padding='SAME', name=name)",
"def get_max(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.max, reduce_instance_dims, name)",
"def max():\n return KeeperOfMinOrMax(int.__lt__)",
"def y_max(self) -> ir.FloatingValue:\n return ops.GeoYMax(self).to_expr()",
"def get_signal_maximum(\n data, setup={}, varname=None, gate_min=None, gate_max=None):\n idx = get_index_of_signal_maximum(\n data, setup, varname, gate_min, gate_max)\n nt = range(len(idx))\n\n if varname is None:\n varname = get_\n return data[varname][nt, idx]",
"def max(self):\n return self._reduce_for_stat_function(F.max, only_numeric=False)",
"def max(self, fn=lambda x: x):\n return _(max(*self._, key=fn))",
"def create_max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')",
"def _max_weight_operator(ops: Iterable[PauliTerm]) -> Union[None, PauliTerm]:\n mapping = dict() # type: Dict[int, str]\n for op in ops:\n for idx, op_str in op:\n if idx in mapping:\n if mapping[idx] != op_str:\n return None\n else:\n mapping[idx] = op_str\n op = functools.reduce(mul, (PauliTerm(op, q) for q, op in mapping.items()), sI())\n return op",
"def local_max_and_argmax(node):\r\n if node.op == T._max_and_argmax:\r\n if len(node.outputs[1].clients) == 0:\r\n #MaxAndArgmax support variable axis,\r\n #but CAReduce support only constant axis.\r\n if node.inputs[1].data is None:\r\n axis = None\r\n else:\r\n try:\r\n axis = get_scalar_constant_value(node.inputs[1])\r\n except NotScalarConstantError:\r\n return False\r\n\r\n new = CAReduce(scal.maximum, axis)(node.inputs[0])\r\n return [new, None]",
"def max(self):\n if self.right is None:\n return self.item\n else:\n return self.right.max()",
"def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data",
"def max(self):\n no = self.root\n if no:\n no = self.__search_node_max_esq(no)\n if no:\n return no.valor\n return None",
"def maximum(lhs, rhs):\n return _make.maximum(lhs, rhs)",
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)"
] | [
"0.70796645",
"0.6651025",
"0.64678437",
"0.6413021",
"0.6399445",
"0.63507855",
"0.63063204",
"0.6268924",
"0.624761",
"0.62005275",
"0.6182359",
"0.6182359",
"0.6120072",
"0.6118509",
"0.61165994",
"0.6114771",
"0.60751885",
"0.6062354",
"0.60444164",
"0.60346764",
"0.6011624",
"0.59863913",
"0.59759945",
"0.597525",
"0.5968861",
"0.5954866",
"0.59469306",
"0.5931341",
"0.5918051",
"0.590856"
] | 0.79425055 | 0 |
Map MXNet's _minimum operator attributes to onnx's Min operator and return the created node. | def convert_minimum(node, **kwargs):
return create_basic_op_node('Min', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_min(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()",
"def OpenXmin(self, *args):\n return _Bnd.Bnd_Box_OpenXmin(self, *args)",
"def find_min(self):\n return min(self.nodes, key=int)",
"def produce_min(self, meta, raven_vars, dispatch, t):\n if self._minimum:\n request, meta = self.get_minimum(meta, raven_vars, dispatch, t)\n else:\n request = {next(iter(self.get_inputs())): 0.0} # TODO is this a good choice when no min var avail?\n return request, meta",
"def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator",
"def OpenXmin(self, *args):\n return _Bnd.Bnd_Box2d_OpenXmin(self, *args)",
"def convert_argmin(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n node = onnx.helper.make_node(\n 'ArgMin',\n inputs=input_nodes,\n axis=axis,\n keepdims=keepdims,\n outputs=[name],\n name=name\n )\n return [node]",
"def min(self):\n node = self\n while node.left:\n node = node.left\n return node",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)",
"def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)",
"def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)",
"def _get_minimum(self):\n return self._minimum",
"def min(self) -> \"Node\":\n current = self\n while current.left is not None:\n current = current.left\n return current",
"def min_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min_nodes\")",
"def min():\n return KeeperOfMinOrMax(int.__gt__)",
"def get_min(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def scalar_min(self, dst, src0, src1):\n return self._scalar_binary_func('min', dst, src0, src1)",
"def min(self):\n return min(self)",
"def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode",
"def min(self):\n p = self._find_min()\n item = p.element()\n return (item._key, item._value)",
"def minimum(self):\n \n omega_star = fmin(self.function, 0, disp=False)[0]\n loss = self.function(omega_star)\n return omega_star, loss",
"def min(self):\n return self.__min",
"def min(self):\n raise NotImplementedError('must be implemented by subclass')",
"def min(self):\n raise NotImplementedError('must be implemented by subclass')",
"def min(self, numeric_only=None):\n assert numeric_only == None\n return self._lift(\"min\")",
"def getXmin(self):\n return min(self.p1.x, self.p2.x)",
"def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value"
] | [
"0.7165784",
"0.66267216",
"0.65062666",
"0.6468117",
"0.6441443",
"0.643317",
"0.63803333",
"0.6362957",
"0.6308952",
"0.628914",
"0.628914",
"0.6285821",
"0.62643987",
"0.6242156",
"0.62365615",
"0.6184856",
"0.618154",
"0.61484843",
"0.6128366",
"0.61151135",
"0.61137015",
"0.6111591",
"0.6107374",
"0.61002207",
"0.6096978",
"0.608322",
"0.608322",
"0.6072541",
"0.6067563",
"0.6042712"
] | 0.79770553 | 0 |
Map MXNet's min operator attributes to onnx's ReduceMin operator and return the created node. | def convert_min(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceMin',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceMin',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)",
"def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)",
"def produce_min(self, meta, raven_vars, dispatch, t):\n if self._minimum:\n request, meta = self.get_minimum(meta, raven_vars, dispatch, t)\n else:\n request = {next(iter(self.get_inputs())): 0.0} # TODO is this a good choice when no min var avail?\n return request, meta",
"def convert_argmin(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n node = onnx.helper.make_node(\n 'ArgMin',\n inputs=input_nodes,\n axis=axis,\n keepdims=keepdims,\n outputs=[name],\n name=name\n )\n return [node]",
"def min(self, start=0, end=None):\n return super(MinSegmentTree, self).reduce(start, end)",
"def min(self, start=0, end=None):\n return super(MinSegmentTree, self).reduce(start, end)",
"def min(self, start=0, end=None):\n\n return super(MinSegmentTree, self).reduce(start, end)",
"def min(self, start=0, end=None):\n\n return super(MinSegmentTree, self).reduce(start, end)",
"def min(self, start=0, end=None):\n\n return super(MinSegmentTree, self).reduce(start, end)",
"def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()",
"def min(self):\n return self._summarize(DataFrameCpu._cmin)",
"def find_min(self):\n return min(self.nodes, key=int)",
"def min(self, axis=None, keepdims=False, out=None):\n return np.minimum.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)",
"def produce_min(self, meta, raven_vars, dispatch, t):\n if self._minimum:\n request, meta = self.get_minimum(meta, raven_vars, dispatch, t)#[self._minimum]\n request = {self._minimum_var: request[self._minimum_var]}\n else:\n request = {next(iter(self.get_outputs())): 0.0}\n balance, meta = self.produce(request, meta, raven_vars, dispatch, t)\n return balance, meta",
"def OpenXmin(self, *args):\n return _Bnd.Bnd_Box_OpenXmin(self, *args)",
"def min_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min_nodes\")",
"def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator",
"def min(self):\n node = self\n while node.left:\n node = node.left\n return node",
"def min(self, fn=lambda x: x):\n return _(min(*self._, key=fn))",
"def MIN(*args):\n return _group_function(min, *args)",
"def get_bprop_reducemin(self):\n\n def bprop(x, axis, out, dout):\n dx = _min_or_max_grad(x, axis, out, dout)\n return (dx, zeros_like(axis))\n return bprop",
"def OpenXmin(self, *args):\n return _Bnd.Bnd_Box2d_OpenXmin(self, *args)",
"def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def cmin(self):\n return self[\"cmin\"]",
"def calculate_ucb_min(self, node):\n pass",
"def _create_reduceOp(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\", None)\n keepdims = onnx_node.getattr(\"keepdims\", 1)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes, keepdims)",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node"
] | [
"0.74088675",
"0.6358284",
"0.61328006",
"0.61307657",
"0.5984475",
"0.5984475",
"0.59366184",
"0.59366184",
"0.59366184",
"0.586159",
"0.58153784",
"0.58141047",
"0.5771208",
"0.5747477",
"0.5725165",
"0.57177365",
"0.56871665",
"0.5682544",
"0.5676312",
"0.5637121",
"0.5634679",
"0.5627777",
"0.5626737",
"0.5573901",
"0.5565013",
"0.5565013",
"0.55368656",
"0.5535628",
"0.5524169",
"0.54997456"
] | 0.76065016 | 0 |
Map MXNet's max operator attributes to onnx's ReduceMax operator and return the created node. | def convert_max(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceMax',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceMax',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_maximum(node, **kwargs):\n return create_basic_op_node('Max', node, kwargs)",
"def max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.max, reduce_instance_dims, name)",
"def convert_argmax(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n node = onnx.helper.make_node(\n 'ArgMax',\n inputs=input_nodes,\n axis=axis,\n keepdims=keepdims,\n outputs=[name],\n name=name\n )\n return [node]",
"def max_pool(self, x, name=\"\"):\n return tf.nn.max_pool(x, ksize=self.mp_size, strides=self.mp_stride,\n padding=self.mp_padding, name=name)",
"def add_max(org, kernels, centres):\n return org",
"def get_max(self):\n if not self:\n return None\n return self.right.get_max() if self.right else self.value #Ternarary Operator",
"def find_max(self):\n return max(self.nodes, key=int)",
"def max(self):\n node = self\n while node.right:\n node = node.right\n return node",
"def _max_pool(x):\n return tf.nn.max_pool(value=x,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')",
"def max(self) -> \"Node\":\n current = self\n while current.right is not None:\n current = current.right\n return current",
"def local_max_and_argmax(node):\r\n if node.op == T._max_and_argmax:\r\n if len(node.outputs[1].clients) == 0:\r\n #MaxAndArgmax support variable axis,\r\n #but CAReduce support only constant axis.\r\n if node.inputs[1].data is None:\r\n axis = None\r\n else:\r\n try:\r\n axis = get_scalar_constant_value(node.inputs[1])\r\n except NotScalarConstantError:\r\n return False\r\n\r\n new = CAReduce(scal.maximum, axis)(node.inputs[0])\r\n return [new, None]",
"def x_max(self) -> ir.FloatingValue:\n return ops.GeoXMax(self).to_expr()",
"def _max_weight_operator(ops: Iterable[PauliTerm]) -> Union[None, PauliTerm]:\n mapping = dict() # type: Dict[int, str]\n for op in ops:\n for idx, op_str in op:\n if idx in mapping:\n if mapping[idx] != op_str:\n return None\n else:\n mapping[idx] = op_str\n op = functools.reduce(mul, (PauliTerm(op, q) for q, op in mapping.items()), sI())\n return op",
"def get_bprop_reducemax(self):\n\n def bprop(x, axis, out, dout):\n dx = _min_or_max_grad(x, axis, out, dout)\n return (dx, zeros_like(axis))\n return bprop",
"def calculate_ucb_max(self, node):\n pass",
"def create_max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')",
"def max(self, fn=lambda x: x):\n return _(max(*self._, key=fn))",
"def max(self, axis=None, keepdims=False, out=None):\n return np.maximum.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.argmax(input_vals[0], node.const_attr)\r\n else:\r\n return np.argmax(input_vals[0])",
"def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value",
"def convert_softmax(g, op, block):\n\n axis = op.attr(\"axis\")\n input_shape = block.var(op.input(\"X\")[0]).shape\n if axis < 0:\n axis = len(input_shape) + axis\n x = g.get_node(op.input(\"X\")[0])\n m = _op.max(x, axis, keepdims=True)\n e = _op.exp(x - m)\n out = e / _op.sum(e, axis, keepdims=True)\n g.add_node(op.output(\"Out\")[0], out)",
"def _max_pool(self, bottom, name='max_pool'):\n return tf.nn.max_pool(\n bottom,\n ksize=[1, 3, 1, 1],\n strides=[1, 3, 1, 1],\n padding='SAME', name=name)",
"def produce_max(self, meta, raven_vars, dispatch, t):\n request, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n return request, meta",
"def prop_max_pool(self, activation, relevance, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1]):\n act = tf.expand_dims(activation, 3) # N x M x F x 1\n z = tf.nn.max_pool(act, ksize, strides, padding='SAME') + self.epsilon\n with self.model.graph.as_default():\n rel = tf.expand_dims(relevance, 3)\n s = rel / z\n c = gen_nn_ops.max_pool_grad_v2(act, z, s, ksize, strides, padding='SAME')\n tmp = c * act\n return tf.squeeze(tmp, [3])",
"def max_pooling(self, filter_):\n return self.add_layer(max_pooling, filter_)",
"def _create_max_avg_pool(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(\n onnx_node.attrs[\"pads\"]) if \"pads\" in onnx_node.attrs else (0, 0)\n stride = tuple(onnx_node.getattr('strides', (1, 1)))\n # default the odd_padding is 0, once there are same pad mode, we modify it\n # for odd_padding, please refer the autegrade.py\n odd_padding = (0, 0, 0, 0)\n if \"auto_pad\" in onnx_node.attrs:\n auto_pad = utils.force_unicode(onnx_node.attrs['auto_pad'])\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n padding, odd_padding = utils.get_padding_shape(\n auto_pad, inputs[0].shape[2:], kernel, stride)\n\n # not support count_include_pad and auto_pad\n if \"count_include_pad\" in onnx_node.attrs or \"ceil_mode\" in onnx_node.attrs:\n raise ValueError(\n \"Not implemented yet for count_include_pad or ceil_mode\")\n\n # only support 2d\n if len(kernel) != 2:\n raise ValueError(\"Not implemented yet\")\n\n is_max = onnx_node.op_type == 'MaxPool'\n x = inputs[0]\n if x.device.id() == -1:\n handle = singa.PoolingHandle(x.data, kernel, stride, padding,\n is_max)\n else:\n handle = singa.CudnnPoolingHandle(x.data, kernel, stride, padding,\n is_max)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(handle, odd_padding)",
"def max(self):\n return self._reduce_for_stat_function(F.max, only_numeric=False)",
"def get_maximum ( self, object ):\n return self.maximum",
"def maxmarginal(self, target, out=None):\n return self.__opReduce2(self.v - target,np.max, out=out)",
"def find_max(self, node):\n current_node = node\n while current_node.right:\n current_node = current_node.right\n return current_node"
] | [
"0.7659286",
"0.6377665",
"0.6145082",
"0.6139816",
"0.6064376",
"0.6055874",
"0.6041896",
"0.6037136",
"0.5989364",
"0.5909807",
"0.5900593",
"0.5855207",
"0.58143365",
"0.5796772",
"0.57937384",
"0.57896626",
"0.5788211",
"0.578123",
"0.5748129",
"0.57333666",
"0.57123965",
"0.5704577",
"0.5692769",
"0.5670914",
"0.5668258",
"0.5641149",
"0.56410253",
"0.5630554",
"0.56269306",
"0.5611825"
] | 0.7726849 | 0 |
Map MXNet's mean operator attributes to onnx's ReduceMean operator and return the created node. | def convert_mean(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceMean',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceMean',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_reduce_mean_02():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"], axes=[1, 2])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, [1, 1, 1, 5])]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()",
"def test_reduce_mean_00():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, (1, 1, 1, 1))]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()",
"def test_reduce_mean_01():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"], keepdims=0)\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, [])] # the shape is scalar\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.array(np.mean(input_vals[0], node.const_attr))\r\n else:\r\n return np.array(np.mean(input_vals[0]))",
"def mean(self):\n return self._lift(\"mean\")",
"def _get_u_mean(self, nodelist: List[Tuple[int, int]]) -> Optional[float]:\n meanlist = [self.u_matrix[u_node] for u_node in nodelist]\n u_mean = None\n if self.u_mean_mode_ == \"mean\":\n u_mean = np.mean(meanlist)\n elif self.u_mean_mode_ == \"median\":\n u_mean = np.median(meanlist)\n elif self.u_mean_mode_ == \"min\":\n u_mean = np.min(meanlist)\n elif self.u_mean_mode_ == \"max\":\n u_mean = np.max(meanlist)\n return u_mean",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def mean(self):\n return self._summarize(lambda c: c.mean)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def GetMeanOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS3_GetMeanOutput(self, *args)",
"def mean(x, reduce_instance_dims=True, name=None):\n with tf.name_scope(name, 'mean'):\n # Note: Calling `sum` defined in this module, not the builtin.\n return tf.divide(\n sum(x, reduce_instance_dims), size(x, reduce_instance_dims))",
"def GetMeanOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_GetMeanOutput(self, *args)",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def mean(self):\n return self._mean_func",
"def _create_reduceOp(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\", None)\n keepdims = onnx_node.getattr(\"keepdims\", 1)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes, keepdims)",
"def transform(self, mean_node, light_graph):\n self.check_original_node(mean_node, graph_type=self.GRAPH_TYPE)\n\n # Get axes\n axes = self._get_array_from_input_indx(mean_node, light_graph, 1).flatten()\n tf_attr = self._get_tf_attr(mean_node)\n keep_dims = tf_attr[\"keep_dims\"].b\n\n return self.do_generic_transform(mean_node.name,\n mean_node.inputs[0],\n mean_node.outputs[0],\n mean_node.control_inputs,\n axes,\n keep_dims)",
"def getMean(self):\n return self.mean",
"def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)",
"def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)",
"def reduce(nodes):\r\n accum = tc.mean(nodes.mailbox['m'], 1).cuda()\r\n return {'h': accum}",
"def GetMean(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetMean(self, label)",
"def AddClassWithInitialMean(self, mean: 'double') -> \"void\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2IUS2_AddClassWithInitialMean(self, mean)",
"def GetMeanOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIF3_GetMeanOutput(self, *args)",
"def GetMeanOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID3_GetMeanOutput(self, *args)"
] | [
"0.6283045",
"0.6170426",
"0.61373425",
"0.58561695",
"0.5818263",
"0.5725782",
"0.5599807",
"0.5562414",
"0.5543096",
"0.5514586",
"0.5514586",
"0.5514586",
"0.5514586",
"0.5514586",
"0.54903483",
"0.5438709",
"0.5438554",
"0.53819174",
"0.53819174",
"0.5381861",
"0.5380131",
"0.5341796",
"0.53084856",
"0.52933306",
"0.52823865",
"0.5274909",
"0.52719504",
"0.52675116",
"0.52556306",
"0.5251676"
] | 0.7579308 | 0 |
Map MXNet's prod operator attributes to onnx's ReduceProd operator and return the created node. | def convert_prod(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceProd',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceProd',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def _create_reduceOp(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\", None)\n keepdims = onnx_node.getattr(\"keepdims\", 1)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes, keepdims)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def prod(self, x, y):\n return self.reduce(x + y)",
"def prod(self):\n return self._summarize(lambda c: c.prod)",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def prod(self):\n # skipna == True\n # only_numerical == True\n return self._lift(\"prod\")",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))",
"def map_product(process):\n\n process_params1 = set_extra_values(process['arguments'])\n process_params2 = get_process_params(process['arguments'], {'ignore_nodata': 'bool'})\n \n return map_default(process, 'product', 'reduce', {**process_params1, **process_params2})",
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 1\r\n #print(input_vals[0].shape)\r\n #print(node.name)\r\n #print(np.max(input_vals[0]))\r\n #print(np.sum(input_vals[0]))\r\n #assert np.mean(np.array(np.less(input_vals[0],750).astype(float32)))==1\r\n return np.exp(input_vals[0])",
"def compute(self, node, input_vals):\n \"\"\"TODO: Your code here\"\"\"\n assert len(input_vals) == 1\n return input_vals[0] * node.const_attr",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def __init__(self, expr1, expr2, name='mult'):\n super(ProductExpression, self).__init__(e1=expr1, e2=expr2,\n domain=expr1.domain,\n name=name)",
"def convert_reduce(g, op, block):\n\n op_map = {\n \"reduce_all\": \"all\",\n \"reduce_any\": \"any\",\n \"reduce_max\": \"max\",\n \"reduce_min\": \"min\",\n \"reduce_prod\": \"prod\",\n \"reduce_sum\": \"sum\",\n \"reduce_mean\": \"mean\",\n }\n op_name = op_map[op.type]\n input_x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"dim\")\n if op.attr(\"reduce_all\"):\n axis = None\n keepdims = op.attr(\"keep_dim\")\n out = get_relay_op(op_name)(input_x, axis=axis, keepdims=keepdims)\n if not axis and not keepdims:\n # use `expand_dims` to solve the following situation\n # for TVM, the shape of `out` will be (, )\n # for Paddle, the shape of `out` will be [1]\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 1\r\n return input_vals[0] * node.const_attr",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 2\r\n return input_vals[0] * input_vals[1]",
"def convert_max(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMax',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMax',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def reduce(self, app, nodes, result):",
"def product(self):\n raise NotImplementedError",
"def prod(self, args):\n assert len(args) > 0, \"Cannot compute an empty product in a semigroup\"\n return prod(args[1:], args[0])",
"def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n return [node]",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n \r\n if node.const_attr!=None:\r\n #print(\"hahah\")\r\n shape = tuple(input_vals[1])\r\n oldshape = list(input_vals[0].shape)\r\n for i in node.const_attr:\r\n oldshape.insert(i%(len(oldshape)+1),1)\r\n #print(oldshape)\r\n #print(shape)\r\n return np.array(np.broadcast_to(input_vals[0].reshape(tuple(oldshape)),shape))\r\n #return np.broadcast_to(input_vals[0], node.const_attr)\r\n else:\r\n return np.broadcast_to(input_vals[0], tuple(input_vals[1]))",
"def prod(self, values):\n return self.aggregate(values, \"prod\")",
"def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name"
] | [
"0.62867284",
"0.5835511",
"0.57357043",
"0.5722716",
"0.5617108",
"0.5607751",
"0.55567515",
"0.5480656",
"0.54691935",
"0.543317",
"0.54207885",
"0.5408181",
"0.53855884",
"0.52865595",
"0.5275297",
"0.5250045",
"0.5214458",
"0.52013505",
"0.5191116",
"0.51694757",
"0.5160994",
"0.5151188",
"0.51260686",
"0.5080323",
"0.5077144",
"0.507401",
"0.5058621",
"0.50121737",
"0.500419",
"0.4999477"
] | 0.75327003 | 0 |
Map MXNet's elemwise_add operator attributes to onnx's Add operator and return the created node. | def convert_elementwise_add(node, **kwargs):
return create_basic_op_node('Add', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def __add__(self, other):\r\n if isinstance(other, Node):\r\n new_node = add_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = add_byconst_op(self, other)\r\n return new_node",
"def __add__(self, other):\n if isinstance(other, Node):\n new_node = add_op(self, other)\n else:\n # Add by a constant stores the constant in the new node's const_attr field.\n # 'other' argument is a constant\n new_node = add_byconst_op(self, other)\n return new_node",
"def add_elementwise(self, op, inputs, name=None):\n input_names = [self._maybe_add_const(input, \"elementwise_input\") \\\n for input in inputs]\n return self._build_op(op, input_names, name=name)",
"def append(self):\n return AttributeFunctor(self, lambda a, b: a + b)",
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)",
"def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)",
"def __add__(self,that):\n return self.__opExpand2(that,np.add)",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def add(self, elem):",
"def add(self, elem):",
"def add(self):\n return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def convert_addmm(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n\n alpha = op.attr(\"Alpha\")\n beta = op.attr(\"Beta\")\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n\n if not isinstance(alpha, _expr.Expr) and alpha != 1:\n alpha = _expr.const(alpha, dtype)\n x *= alpha\n\n if not isinstance(beta, _expr.Expr) and beta != 1:\n beta = _expr.const(beta, dtype)\n input_x *= beta\n\n transposed_y = _op.transpose(y, axes=[1, 0])\n dense_out = _op.nn.dense(x, transposed_y)\n out = dense_out + input_x\n g.add_node(op.output(\"Out\")[0], out)",
"def __iadd__(self, tensor):\n return self.add_(tensor)",
"def add(self, node, **offset):\n return self.dtype.add(self, node, **offset)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def add_op_and_inputs(self, op: Operator) -> Operator:\n self.add_op(op)\n for i in op.input_ops.values():\n self.add_op_and_inputs(i)\n\n return op",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def __add__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.add)",
"def _append_operator(self, operator):",
"def __add__(self, loss):\n def add(output, target, params):\n return self(output, target, params) + loss(output, target, params)\n return type(self)(type(self).__reserved_init, add, None, f\"({self._str_make()} + {loss._str_make()})\")",
"def __add__(self, tensor):\n return self.add(tensor)",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __iadd__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.add)",
"def __iadd__(self,that):\n #return self.__opExpand1(that,np.add, out=self)\n return self.__opExpand2(that,np.add, out=self)",
"def add(element):"
] | [
"0.7189428",
"0.655496",
"0.6537227",
"0.6472508",
"0.6449052",
"0.64310604",
"0.6380455",
"0.62438035",
"0.6235679",
"0.6205862",
"0.6189562",
"0.6052324",
"0.6052324",
"0.60426134",
"0.60199106",
"0.60036284",
"0.5988608",
"0.59885174",
"0.5946734",
"0.5942936",
"0.5912339",
"0.58903044",
"0.5889427",
"0.5857702",
"0.58476305",
"0.582349",
"0.5821048",
"0.5816298",
"0.5808347",
"0.5804661"
] | 0.79225135 | 0 |
Map MXNet's broadcast_add operator attributes to onnx's Add operator and return the created node. | def covert_broadcast_add(node, **kwargs):
return create_basic_op_node('Add', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def convert_addmm(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n\n alpha = op.attr(\"Alpha\")\n beta = op.attr(\"Beta\")\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n\n if not isinstance(alpha, _expr.Expr) and alpha != 1:\n alpha = _expr.const(alpha, dtype)\n x *= alpha\n\n if not isinstance(beta, _expr.Expr) and beta != 1:\n beta = _expr.const(beta, dtype)\n input_x *= beta\n\n transposed_y = _op.transpose(y, axes=[1, 0])\n dense_out = _op.nn.dense(x, transposed_y)\n out = dense_out + input_x\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def simple_broadcasted_add():\n examples = [\n benchmark.Example(\n inputs=[\n [3, 4, 5],\n [10, 20, 30],\n ],\n output=[[13, 14, 15], [23, 24, 25], [33, 34, 35]],\n ),\n ]\n constants = []\n description = 'Add two tensors with broadcasting'\n target_program = 'tf.add(in1, tf.expand_dims(in2, 1))'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_broadcasted_add')",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def append(self):\n return AttributeFunctor(self, lambda a, b: a + b)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def add(self):\n return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()",
"def convert_scatter_nd_add(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n updates = g.get_node(op.input(\"Updates\")[0])\n indices_dim = len(infer_shape(index))\n axes = list(range(indices_dim))\n index = _op.transpose(index, axes[-1:] + axes[:-1])\n out = _op.scatter_nd(x, index, updates, mode=\"add\")\n g.add_node(op.output(\"Out\")[0], out)",
"def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()",
"def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()",
"def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()",
"def add(self, node, **offset):\n return self.dtype.add(self, node, **offset)",
"def add(self, ConnectedVia=None, Name=None, NoOfDest=None, StackedLayers=None):\n # type: (List[str], str, int, List[str]) -> TwampIpv6\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def __add__(self, other):\r\n if isinstance(other, Node):\r\n new_node = add_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = add_byconst_op(self, other)\r\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def add_node(self, new_node: 'GraphNode'):\n self.operator.add_node(new_node)",
"def __add__(self, other):\n if isinstance(other, Node):\n new_node = add_op(self, other)\n else:\n # Add by a constant stores the constant in the new node's const_attr field.\n # 'other' argument is a constant\n new_node = add_byconst_op(self, other)\n return new_node",
"def addbroadcast(x, *axes):\r\n rval = Rebroadcast(*[(axis, True) for axis in axes])(x)\r\n return theano.tensor.opt.apply_rebroadcast_opt(rval)",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __add__(self,that):\n return self.__opExpand2(that,np.add)",
"def addbroadcast(x, *axes):\n if is_theano_object(x):\n # T.addbroadcast only works with positive axes\n axes = [ ax if ax >= 0 else x.ndim + ax for ax in axes ]\n return T.addbroadcast(x, *axes)\n else:\n for ax in axes:\n if x.shape[ax] != 1:\n raise ValueError(\"Tried to make axis {} of a variable with shape {} broadcastable. \"\n \"Only dimensions with length 1 can be broadcasted.\"\n .format(ax, x.shape))\n return x",
"def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def addop(name, fields, args=None, alias=False):\n\n namespace = {\"fields\": fields, \"alias\": alias}\n\n if args is not None:\n namespace[\"args\"] = args\n\n # Dynamically create the \"name\" object\n type(name, (mn_pinky,), namespace)",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def add(self, **kwargs) -> None:\n self.append(Operation(**kwargs))"
] | [
"0.6780478",
"0.63417214",
"0.60001606",
"0.58787817",
"0.5827999",
"0.5784491",
"0.5780997",
"0.5736768",
"0.56432146",
"0.5639515",
"0.5625344",
"0.56026065",
"0.5514841",
"0.5495319",
"0.5493361",
"0.54555446",
"0.54506385",
"0.544259",
"0.543173",
"0.5420061",
"0.541649",
"0.5414843",
"0.5411713",
"0.5405141",
"0.53817767",
"0.537384",
"0.53294015",
"0.5305401",
"0.52978796",
"0.5293326"
] | 0.8338817 | 0 |
Map MXNet's elemwise_sub operator attributes to onnx's Sub operator and return the created node. | def convert_elementwise_sub(node, **kwargs):
return create_basic_op_node('Sub', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def covert_broadcast_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)",
"def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = sub_byconst_op(self, other)\r\n return new_node",
"def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)",
"def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)",
"def sub(self, other, weight=one):\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n \n ops = []\n if isinstance(weight, VarStruct):\n weight = weight.var\n \n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign_sub(weight*other_var))\n return tf.group(*ops, name=\"sub_\"+self.name)",
"def __sub__(self, other):\n return self._operation_sub(self, other)",
"def __sub__(self, tensor):\n return self.sub(tensor)",
"def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)",
"def convert_rminus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)",
"def __sub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.sub)",
"def __sub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(self, other)",
"def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def __sub__(self, other, **kwargs):\n kwargs.update({'sub': True})\n return self.__add__(other, **kwargs)",
"def __rsub__(self, other):\n return self._operation_sub(other, self)",
"def visit_Subscript(self, node):\n self.generic_visit(node)\n return node",
"def SUB(self, n1, n2):",
"def __isub__(self,that):\n #return self.__opExpand1(that,np.subtract, out=self)\n return self.__opExpand2(that,np.subtract, out=self)",
"def __sub__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] - ox, self[1] - oy))",
"def sub(self, other):\n\n return self._get(\"sub\", other, self.__class__)",
"def __rsub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(other, self)",
"def visit_Subscript(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n args = [ node.value, self.index_to_expr(node.slice) ]\n return to_call(to_attribute(self.operator, 'getitem'), args)\n return node",
"def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj",
"def _sub_op(value, sample_args, rationals_allowed):\n entropy, sample_args = sample_args.peel()\n if rationals_allowed and sample_args.count >= 3:\n x = number.integer_or_rational(entropy, True)\n else:\n x = number.integer(entropy, True)\n if random.choice([False, True]):\n op_args = [x, x - value]\n else:\n op_args = [value + x, x]\n return ops.Sub, op_args, sample_args",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)",
"def subimage(self, *args, **kwargs):\n return _coordsys.coordsys_subimage(self, *args, **kwargs)",
"def __sub__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x-y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"",
"def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)",
"def __sub__(self, oth):\n\t\tif not isinstance(oth, Matrix):\n\t\t\toth = Matrix(oth)\n\t\treturn self._sub(oth)"
] | [
"0.7141842",
"0.68545747",
"0.615163",
"0.6033808",
"0.5878628",
"0.58357406",
"0.5734108",
"0.5729102",
"0.56466043",
"0.56212085",
"0.55548865",
"0.5548119",
"0.54640967",
"0.5426134",
"0.5379119",
"0.5364434",
"0.53603375",
"0.53511447",
"0.5314139",
"0.53106934",
"0.5282491",
"0.52764195",
"0.5267596",
"0.5240762",
"0.52381337",
"0.52272075",
"0.51885545",
"0.51862246",
"0.51810104",
"0.5180685"
] | 0.78317475 | 0 |
Map MXNet's broadcast_sub operator attributes to onnx's Sub operator and return the created node. | def covert_broadcast_sub(node, **kwargs):
return create_basic_op_node('Sub', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elementwise_sub(node, **kwargs):\n return create_basic_op_node('Sub', node, kwargs)",
"def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = sub_byconst_op(self, other)\r\n return new_node",
"def sub(self, other, weight=one):\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n \n ops = []\n if isinstance(weight, VarStruct):\n weight = weight.var\n \n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign_sub(weight*other_var))\n return tf.group(*ops, name=\"sub_\"+self.name)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def local_subtensor_lift(node):\r\n if isinstance(node.op, Subtensor):\r\n u = node.inputs[0]\r\n if not u.owner or len(u.clients) > 1:\r\n return False\r\n\r\n if isinstance(u.owner.op, T.Elemwise) and len(u.owner.inputs) == 1:\r\n idx = node.inputs[1:]\r\n x_idx = node.op(u.owner.inputs[0], *idx)\r\n return [u.owner.op(x_idx)]\r\n\r\n if isinstance(u.owner.op, T.Elemwise):\r\n new_inputs = []\r\n if all([sum(i.type.broadcastable) == 0 for i in u.owner.inputs]):\r\n # There is no broadcastable in the inputs\r\n idx = node.inputs[1:]\r\n new_inputs = [node.op(i, *idx) for i in u.owner.inputs]\r\n return [u.owner.op(*new_inputs)]\r\n elif all([sum(i.type.broadcastable) in [i.ndim, 0]\r\n for i in u.owner.inputs]):\r\n # There is no broadcastable in the inputs or it is scalar\r\n idx = node.inputs[1:]\r\n new_inputs = []\r\n for i in u.owner.inputs:\r\n if sum(i.type.broadcastable) == 0:\r\n new_inputs.append(node.op(i, *idx))\r\n else:\r\n # If the subtensor remove some dims, we must\r\n # lower the number of dimensions of this scalar.\r\n if node.outputs[0].ndim == i.ndim:\r\n new_inputs.append(i)\r\n else:\r\n new_inputs.append(\r\n i.dimshuffle(['x'] * node.outputs[0].ndim))\r\n return [u.owner.op(*new_inputs)]\r\n\r\n if isinstance(u.owner.op, T.Rebroadcast):\r\n # make sure that Rebroadcast has only 1 input\r\n assert len(u.owner.inputs) == 1\r\n\r\n # Subtensor might reduce dim., adapt broadcast pattern accordingly\r\n new_axis = []\r\n\r\n # loop through indices being subtensor-ed\r\n # i indexes broadcastable pattern before subtensor\r\n # j indexes broadcastable pattern after subtensor\r\n j = 0\r\n for (i, x) in enumerate(node.op.idx_list):\r\n # if its not a slice, it will reduce the dimension, should\r\n # not appear in the broascastable dimensions\r\n if isinstance(x, slice):\r\n new_axis += [(j, u.broadcastable[i])]\r\n j += 1\r\n # now keep the broadcastable pattern of all\r\n # items not appearing in subtensor list\r\n for i in xrange(len(node.op.idx_list), len(u.broadcastable)):\r\n new_axis += [(j, u.broadcastable[i])]\r\n j += 1\r\n\r\n subt_x = node.op(u.owner.inputs[0], *node.inputs[1:])\r\n rbcast_subt_x = T.Rebroadcast(*new_axis)(subt_x)\r\n\r\n return [rbcast_subt_x]",
"def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)",
"def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)",
"def subnetwork(self, node_list):\n return Network(adjacency=self.internal_adjacency(node_list),\n directed=self.directed,\n node_weights=self.node_weights[node_list],\n silence_level=self.silence_level)",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def __sub__(self, other):\n return self._operation_sub(self, other)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node",
"def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)",
"def __sub__(self, tensor):\n return self.sub(tensor)",
"def _create_slice(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n for attr in ['starts', 'ends', 'axes', 'steps']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def __init__(self, prefix, downstream, upstream, root):\n super(SubGraph, self).__init__(prefix, downstream, upstream, root)",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def __sub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(self, other)",
"def convert_sub(sub):\n\n args = sub.args\n (ref_aa, pos, new_aa) = args\n\n parent_fn_name = sub.parent_function.name_short\n prefix_list = {\"p\": \"p.\", \"r\": \"r.\", \"g\": \"c.\"}\n prefix = prefix_list[parent_fn_name]\n\n new_var_arg = f'\"{prefix}{belspec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][ref_aa.value]}{pos.value}{belspec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][new_aa.value]}\"'\n\n new_var = Function(\"var\", version=version)\n\n new_var.add_argument(StrArg(new_var_arg, new_var))\n\n return new_var",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def canonicalize_subnetwork_info(name, subnet, instances):\n return Subnetwork(name=name, subnetwork_id=subnet[\"SubnetId\"], cidr_block=subnet[\"CidrBlock\"],\n region=subnet[\"AvailabilityZone\"][:-1],\n availability_zone=subnet[\"AvailabilityZone\"],\n instances=instances)",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)"
] | [
"0.67469776",
"0.58889604",
"0.5360961",
"0.52912056",
"0.52282476",
"0.5209201",
"0.51015747",
"0.5097751",
"0.5091051",
"0.5062901",
"0.5049066",
"0.5000981",
"0.4991342",
"0.49907017",
"0.49763283",
"0.49502683",
"0.4915788",
"0.4915497",
"0.49108106",
"0.48997536",
"0.48655608",
"0.48565257",
"0.4851981",
"0.48515356",
"0.48512468",
"0.48504138",
"0.48464552",
"0.48378658",
"0.48354167",
"0.48279986"
] | 0.789098 | 0 |
Map MXNet's elemwise_mul operator attributes to onnx's Mul operator and return the created node. | def convert_elemwise_mul(node, **kwargs):
return create_basic_op_node('Mul', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)",
"def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)",
"def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)",
"def __mul__(self, other: Any) -> ColumnOperators:\n return self.operate(mul, other)",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def __mul__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'mul')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(\n tf.multiply(self.tf, other.tf), self.type_name, provenance)\n else:\n provenance = NQExprProvenance(\n operation='mul',\n inner=self.provenance,\n other=NQExprProvenance(operation='constant', args=(None, other)))\n return self.context.as_nql(\n tf.multiply(self.tf, other), self.type_name, provenance)",
"def compute_mul(tree):\r\n neg, inputs = tree\r\n if inputs is None:\r\n raise AssertionError(\r\n 'Function `compute_mul` found a missing leaf, did you forget to '\r\n 'call `simplify_mul` on the tree first?')\r\n elif isinstance(inputs, list):\r\n # Recurse through inputs.\r\n rval = tensor.mul(*map(compute_mul, inputs))\r\n else:\r\n rval = inputs\r\n if neg:\r\n rval = -rval\r\n return rval",
"def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))",
"def local_add_mul_fusion(node):\r\n if (not isinstance(node.op, Elemwise) or\r\n not isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul))):\r\n return False\r\n\r\n s_op = node.op.scalar_op.__class__\r\n for inp in node.inputs:\r\n if (inp.owner and\r\n isinstance(inp.owner.op, Elemwise) and\r\n isinstance(inp.owner.op.scalar_op, s_op)):\r\n l = list(node.inputs)\r\n l.remove(inp)\r\n return [node.op(*(l + inp.owner.inputs))]",
"def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def local_mul_specialize(node):\r\n # here, we are past the point of canonicalization, so we don't\r\n # want to put in un-necessary fills.\r\n #\r\n # at this point [post canonicalize], mul() may have many inputs.\r\n if node.op == T.mul:\r\n #the idea here is that we have pow(x, y)\r\n neg = False\r\n new_inputs = []\r\n nb_neg_node = 0\r\n nb_cst = 0\r\n for input in node.inputs:\r\n # remove any neg arguments\r\n while input.owner and input.owner.op == T.neg:\r\n neg ^= True\r\n input = input.owner.inputs[0]\r\n nb_neg_node += 1\r\n\r\n # remove special case arguments of 1, -1 or 0\r\n y = local_mul_canonizer.get_constant(input)\r\n if y == 1.0:\r\n nb_cst += 1\r\n elif y == -1.0:\r\n nb_cst += 1\r\n neg ^= True # toggles\r\n elif y == 0.0:\r\n # if we find any zero, we just return right away\r\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\r\n else:\r\n new_inputs.append(input)\r\n\r\n if new_inputs != node.inputs:\r\n if new_inputs:\r\n if len(new_inputs) == 1:\r\n if neg:\r\n rval = -new_inputs[0]\r\n else:\r\n rval = new_inputs[0]\r\n else:\r\n # The next case would cause a replace by an equivalent case.\r\n if (neg and\r\n nb_neg_node == 0 and\r\n nb_cst == 1):\r\n return\r\n elif neg:\r\n # Don't add an extra neg node as we can't\r\n # fully replace this mul by a neg.\r\n m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)\r\n new_inputs = [m1] + new_inputs\r\n rval = T.mul(*new_inputs)\r\n\r\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\r\n else:\r\n # there are no variable inputs to mul\r\n # N.B. this could have been constant-folded...\r\n if neg:\r\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\r\n else:\r\n return [broadcast_like(1, node.outputs[0], node.fgraph)]",
"def multiplier(self) -> global___Expression:",
"def __mul__(self, tensor):\n return self.mul(tensor)",
"def mul_elementwise(self, other):\n # XXX: flint matrices do not support elementwise multiplication\n return self.to_ddm().mul_elementwise(other.to_ddm()).to_dfm()",
"def __mul__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.mul)",
"def __mul__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Mul.apply(self, other)",
"def add_matmul(self, input_names, name=None, attr={}):\n return self._build_op('MatMul', input_names, name=name, attr=attr)",
"def py_mul(*x, op_version=None):\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p",
"def __imul__(self, tensor):\n return self.mul_(tensor)",
"def local_mul_to_sqr(node):\r\n if node.op == T.mul:\r\n if len(node.inputs) == 2:\r\n if node.inputs[0] is node.inputs[1]:\r\n return [T.sqr(node.inputs[0])]",
"def __mul__(self, othertr):\n res = self.dot(othertr)\n return res",
"def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node",
"def __mul__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__mul__\")",
"def __mul__(self, other):\r\n return self.prod(other)",
"def mul(self, multiplier):\n result = {}\n for k, v in self.variables.items():\n a, b = self._broadcast(multiplier, v)\n result[k] = a * b\n return MultivariateDerivative(result)",
"def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)"
] | [
"0.7594407",
"0.71869403",
"0.6733486",
"0.66129386",
"0.6552191",
"0.6502875",
"0.6452503",
"0.6406036",
"0.6391522",
"0.6358358",
"0.6305449",
"0.62934154",
"0.62835354",
"0.62716043",
"0.62310976",
"0.6220493",
"0.6211813",
"0.62024266",
"0.6190721",
"0.6168245",
"0.61618495",
"0.6115584",
"0.607381",
"0.60635906",
"0.6054468",
"0.6052397",
"0.6045345",
"0.6030873",
"0.6026313",
"0.60045165"
] | 0.8666627 | 0 |
Map MXNet's broadcast_mul operator attributes to onnx's Mul operator and return the created node. | def convert_broadcast_mul(node, **kwargs):
return create_basic_op_node('Mul', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)",
"def __mul__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'mul')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(\n tf.multiply(self.tf, other.tf), self.type_name, provenance)\n else:\n provenance = NQExprProvenance(\n operation='mul',\n inner=self.provenance,\n other=NQExprProvenance(operation='constant', args=(None, other)))\n return self.context.as_nql(\n tf.multiply(self.tf, other), self.type_name, provenance)",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node",
"def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node",
"def local_mul_specialize(node):\r\n # here, we are past the point of canonicalization, so we don't\r\n # want to put in un-necessary fills.\r\n #\r\n # at this point [post canonicalize], mul() may have many inputs.\r\n if node.op == T.mul:\r\n #the idea here is that we have pow(x, y)\r\n neg = False\r\n new_inputs = []\r\n nb_neg_node = 0\r\n nb_cst = 0\r\n for input in node.inputs:\r\n # remove any neg arguments\r\n while input.owner and input.owner.op == T.neg:\r\n neg ^= True\r\n input = input.owner.inputs[0]\r\n nb_neg_node += 1\r\n\r\n # remove special case arguments of 1, -1 or 0\r\n y = local_mul_canonizer.get_constant(input)\r\n if y == 1.0:\r\n nb_cst += 1\r\n elif y == -1.0:\r\n nb_cst += 1\r\n neg ^= True # toggles\r\n elif y == 0.0:\r\n # if we find any zero, we just return right away\r\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\r\n else:\r\n new_inputs.append(input)\r\n\r\n if new_inputs != node.inputs:\r\n if new_inputs:\r\n if len(new_inputs) == 1:\r\n if neg:\r\n rval = -new_inputs[0]\r\n else:\r\n rval = new_inputs[0]\r\n else:\r\n # The next case would cause a replace by an equivalent case.\r\n if (neg and\r\n nb_neg_node == 0 and\r\n nb_cst == 1):\r\n return\r\n elif neg:\r\n # Don't add an extra neg node as we can't\r\n # fully replace this mul by a neg.\r\n m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)\r\n new_inputs = [m1] + new_inputs\r\n rval = T.mul(*new_inputs)\r\n\r\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\r\n else:\r\n # there are no variable inputs to mul\r\n # N.B. this could have been constant-folded...\r\n if neg:\r\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\r\n else:\r\n return [broadcast_like(1, node.outputs[0], node.fgraph)]",
"def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)",
"def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))",
"def __mul__(self, other: Any) -> ColumnOperators:\n return self.operate(mul, other)",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def mul(self, multiplier):\n result = {}\n for k, v in self.variables.items():\n a, b = self._broadcast(multiplier, v)\n result[k] = a * b\n return MultivariateDerivative(result)",
"def convert_matmul(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[0]), g.get_node(op.input(\"Y\")[0])]\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if op.has_attr(\"trans_x\"):\n # for matmul_v2\n trans_x = op.attr(\"trans_x\")\n trans_y = op.attr(\"trans_y\")\n else:\n # for matmul\n trans_x = op.attr(\"transpose_X\")\n trans_y = op.attr(\"transpose_Y\")\n if trans_x:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n inputs[0] = _op.transpose(inputs[0], axes=perm)\n if trans_y:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n inputs[1] = _op.transpose(inputs[1], axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(inputs[0], dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1], dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(inputs[0], a_shape, 3)\n b = flatten_to_nd(inputs[1], b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n inputs[1] = _op.expand_dims(inputs[1], 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n out = _op.nn.dense(inputs[0], input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if op.has_attr(\"alpha\"):\n alpha = op.attr(\"alpha\")\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha).astype(\"float32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def __mul__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Mul.apply(self, other)",
"def local_mul_to_sqr(node):\r\n if node.op == T.mul:\r\n if len(node.inputs) == 2:\r\n if node.inputs[0] is node.inputs[1]:\r\n return [T.sqr(node.inputs[0])]",
"def add_matmul(self, input_names, name=None, attr={}):\n return self._build_op('MatMul', input_names, name=name, attr=attr)",
"def local_add_mul_fusion(node):\r\n if (not isinstance(node.op, Elemwise) or\r\n not isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul))):\r\n return False\r\n\r\n s_op = node.op.scalar_op.__class__\r\n for inp in node.inputs:\r\n if (inp.owner and\r\n isinstance(inp.owner.op, Elemwise) and\r\n isinstance(inp.owner.op.scalar_op, s_op)):\r\n l = list(node.inputs)\r\n l.remove(inp)\r\n return [node.op(*(l + inp.owner.inputs))]",
"def __mul__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__mul__\")",
"def __mul__(self, tensor):\n return self.mul(tensor)",
"def _matmul_broadcast(x, y, name):\n with tf.variable_scope(name) as scope:\n return tf.reduce_sum(\n tf.nn.dropout(x[..., tf.newaxis] * y[..., tf.newaxis, :, :],1), axis=-2\n )",
"def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)",
"def py_mul(*x, op_version=None):\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p",
"def multiply(self, layer):\n pass",
"def __mul__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.mul)",
"def __mul__(self, scalar):\n m, n = self.shape\n scalar = mpfr(scalar)\n data = dict()\n for i in range(m):\n for j in range(n):\n data[i, j] = self[i, j] * scalar\n return MPMatrix((m, n), data)",
"def mul(A, b):\n return A.from_rep(A.rep.mul(b))",
"def mul(x, y, pub):\n x_shape = x.shape\n y_shape = y.shape\n if x_shape == y_shape:\n x_flatten = np.flatten(x)\n y_flatten = np.flatten(y)\n res = paillier_gpu.mul_impl(x_flatten, y_flatten)\n return np.reshape(res, x_shape)\n else:\n brd_x, brd_y = brdcst(x, y)\n return mul(brd_x, brd_y, pub)",
"def multiplier(self) -> global___Expression:"
] | [
"0.7861241",
"0.6676384",
"0.6386201",
"0.63332176",
"0.62079525",
"0.6138245",
"0.6137143",
"0.61355305",
"0.6135341",
"0.6122188",
"0.6114543",
"0.60985154",
"0.60596746",
"0.5996618",
"0.5970572",
"0.5940501",
"0.5925935",
"0.5879273",
"0.5866603",
"0.58360845",
"0.58312863",
"0.5821811",
"0.5806842",
"0.57869905",
"0.5738968",
"0.5723348",
"0.5708423",
"0.56921065",
"0.5682568",
"0.5669209"
] | 0.8551312 | 0 |
Map MXNet's elemwise_div operator attributes to onnx's Div operator and return the created node. | def convert_elemwise_div(node, **kwargs):
return create_basic_op_node('Div', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_broadcast_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)",
"def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def __div__(self,that):\n return self.__opExpand2(that, np.divide)",
"def __div__(self, other, **kwargs):\n kwargs.update({'operator': 'div'})\n return self.__add__(other, **kwargs)",
"def convert_rdiv_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def __floordiv__(self, other: Any) -> ColumnOperators:\n return self.operate(floordiv, other)",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n shape = tuple(input_vals[1])\r\n divby = 1\r\n if node.const_attr!=None:\r\n oldshape = list(input_vals[0].shape)\r\n #print(\"hahah\")\r\n for i in node.const_attr:\r\n oldshape.insert(i%(len(oldshape)+1),1)\r\n divby *= shape[i]\r\n #print(oldshape)\r\n #print(shape)\r\n return np.array(np.broadcast_to(input_vals[0].reshape(tuple(oldshape)),shape))/divby\r\n #return np.broadcast_to(input_vals[0], node.const_attr)\r\n else:\r\n for i in shape:\r\n divby *= i\r\n return np.broadcast_to(input_vals[0], shape)/divby",
"def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out",
"def __div__(self, tensor):\n return self.div(tensor)",
"def __div__(self, oth):\n\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] / oth, self.rows, self.cols)",
"def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)",
"def divmod_node(self, mpi_procs, omp_threads):\n return divmod(mpi_procs * omp_threads, self.cores_per_node)",
"def div(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_div(a.value, b.value, self.multiplicative_group))",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 1\r\n return node.const_attr / input_vals[0]",
"def HDivDiv(mesh, **args):\n return FESpace(\"hdivdiv\", mesh, **args)",
"def test_evaluate_div_expression(self):\n value = self.evaluate_common(\"4M div 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 2, \"Expected 2\")\n value = self.evaluate_common(\"4D div 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 2.0, \"Expected 2.0\")\n try:\n value = self.evaluate_common(\"4D div 0\")\n self.fail(\"Division by zero\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"4F div 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 2.0, \"Expected 2.0\")\n value = self.evaluate_common(\"5 div 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 2, \"Expected 2L\")\n value = self.evaluate_common(\"-5 div 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == -2, \"Expected -2L\")\n try:\n value = self.evaluate_common(\"4 div '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"4 div null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")",
"def __div__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"/\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"/ scalar)\"\n x /= value\n return x",
"def __div__(self, other):\r\n T = type(other)\r\n # mat4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x/other, self.mlist))\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 1\r\n return input_vals[0] / node.const_attr",
"def __div__(self, other):\n return self.__mul__(1 / other)",
"def __div__(self, other):\n return self.__mul__(1 / other)",
"def __div__(self, other):\r\n T = type(other)\r\n # vec4/scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x/other, self.y/other, self.z/other, self.w/other)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for /\"",
"def __div__(self, other):\n\n return self._mul_div(other, div=True)",
"def divide_rhs_by(self, expr, var):\n return self.modify_rhs(expr, u'divide', var)",
"def __div__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during division of {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Divide(self, other)",
"def __rfloordiv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(floordiv, other)",
"def div(a, b):\n c = Calculator()\n result = c.div(a, b)\n click.echo('{} / {} = {}'.format(a, b, result))",
"def div(self, other):\n\n return self._get(\"div\", other, self.__class__)",
"def divmod(self, other, **kwargs):\n return SeriesDefault.register(pandas.Series.divmod)(self, other=other, **kwargs)"
] | [
"0.7340376",
"0.6443447",
"0.61537325",
"0.592873",
"0.5718753",
"0.5646812",
"0.5636024",
"0.5619774",
"0.5601933",
"0.5586622",
"0.5423298",
"0.54130757",
"0.5413029",
"0.5365055",
"0.52879655",
"0.52837497",
"0.52705294",
"0.5257432",
"0.5253376",
"0.5204898",
"0.5200385",
"0.5200385",
"0.5170775",
"0.51688415",
"0.5149901",
"0.51471144",
"0.51464045",
"0.513762",
"0.5136993",
"0.5128625"
] | 0.8170008 | 0 |
Map MXNet's broadcast_div operator attributes to onnx's Div operator and return the created node. | def convert_broadcast_div(node, **kwargs):
return create_basic_op_node('Div', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)",
"def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def __div__(self,that):\n return self.__opExpand2(that, np.divide)",
"def divmod_node(self, mpi_procs, omp_threads):\n return divmod(mpi_procs * omp_threads, self.cores_per_node)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __div__(self, other, **kwargs):\n kwargs.update({'operator': 'div'})\n return self.__add__(other, **kwargs)",
"def __floordiv__(self, other: Any) -> ColumnOperators:\n return self.operate(floordiv, other)",
"def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n shape = tuple(input_vals[1])\r\n divby = 1\r\n if node.const_attr!=None:\r\n oldshape = list(input_vals[0].shape)\r\n #print(\"hahah\")\r\n for i in node.const_attr:\r\n oldshape.insert(i%(len(oldshape)+1),1)\r\n divby *= shape[i]\r\n #print(oldshape)\r\n #print(shape)\r\n return np.array(np.broadcast_to(input_vals[0].reshape(tuple(oldshape)),shape))/divby\r\n #return np.broadcast_to(input_vals[0], node.const_attr)\r\n else:\r\n for i in shape:\r\n divby *= i\r\n return np.broadcast_to(input_vals[0], shape)/divby",
"def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def convert_rdiv_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)",
"def HDivDiv(mesh, **args):\n return FESpace(\"hdivdiv\", mesh, **args)",
"def div(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_div(a.value, b.value, self.multiplicative_group))",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def __div__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"/\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"/ scalar)\"\n x /= value\n return x",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def div(a, b):\n c = Calculator()\n result = c.div(a, b)\n click.echo('{} / {} = {}'.format(a, b, result))",
"def __div__(self, oth):\n\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] / oth, self.rows, self.cols)",
"def convert_broadcast_equal(node, **kwargs):\n return create_basic_op_node('Equal', node, kwargs)",
"def __div__(self, tensor):\n return self.div(tensor)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def convert_broadcast_lesser(node, **kwargs):\n return create_basic_op_node('Less', node, kwargs)",
"def broadcast() -> BroadcastDistribute:\n return _broadcast",
"def div(self, other):\n\n return self._get(\"div\", other, self.__class__)",
"def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)",
"def __rfloordiv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(floordiv, other)",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node"
] | [
"0.7091991",
"0.5654483",
"0.54506725",
"0.5246034",
"0.5235014",
"0.5234837",
"0.5166161",
"0.5122407",
"0.51005995",
"0.5100523",
"0.5087641",
"0.50812745",
"0.5039023",
"0.50275385",
"0.49929944",
"0.49497214",
"0.49223348",
"0.49145442",
"0.48808396",
"0.4876805",
"0.48750106",
"0.48690403",
"0.48553202",
"0.48245952",
"0.48168746",
"0.4814175",
"0.48098266",
"0.47904593",
"0.47892365",
"0.47850946"
] | 0.81881297 | 0 |
Map MXNet's negative operator attributes to onnx's Neg operator and return the created node. | def convert_negative(node, **kwargs):
return create_basic_op_node('Neg', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __neg__(self):\n return UnaryMinus(self)",
"def __neg__(self) -> ColumnOperators:\n return self.operate(neg)",
"def __neg__(self):\n return type(self)(self.parent(), self._simplify(-self._express))",
"def __neg__(self):\n data = [[-self[i, j] for j in range(self.n)] for i in range(self.m)]\n return self.__class__(self.m, self.n, data)",
"def __neg__(self):\n return TensorWithIndices(-self._tensor, \n self._con + '_' + self._cov)",
"def convert_logical_not(node, **kwargs):\n return create_basic_op_node('Not', node, kwargs)",
"def get_negative(self):\r\n return Literal(self.label, not self.positive_state)",
"def negIP(self):\n np.negative(self.t, out=self.t)\n return self",
"def _negation_op(spec, expression):",
"def __neg__(self):\n return self.neg()",
"def __neg__(self):\n\t\tval = -self.val\n\t\tder = -self.der if len(self.der.shape) else None\n\t\treturn Var(val, der)",
"def neg(self):\n return self._new_rep(-self.rep)",
"def __neg__(self):\r\n return mat4(map(lambda x: -x, self.mlist))",
"def neg(A):\n return A.from_rep(A.rep.neg())",
"def __neg__(self):\n return self.__mul__(-1)",
"def __neg__(self):\n return self.negated()",
"def __neg__(self):\n return (-1)*self",
"def __neg__(self):\n retval = self.copy()\n retval._val = -retval._val\n return retval",
"def __neg__(self):\n return Vector(-self.x, -self.y)",
"def __neg__(self):\n return Vector(-self.x, -self.y)",
"def __neg__(self):\n return self.coeff_mul(-1)",
"def __neg__(self):\n return tuple.__new__(Vec2, (-self[0], -self[1]))",
"def negate(val: PipeNumeric):\n num_type = val.get_type()\n assert isinstance(num_type, num.SignedFixedNumberType)\n\n if isinstance(val, PipeConstant):\n return PipeConstant(num_type, -val.get_value())\n\n node = OneCycleNode()\n\n node.add_inputs(val=val)\n res = PipeSignal(num_type, Signal(num_type.create()))\n node.add_output(res)\n node.set_name('fixed-negate')\n node.set_logic(negate_seq)\n\n return node",
"def __neg__(self) -> PointType:\n return self * -1",
"def __neg__(self):\n return Quantity(-(self._value), self.unit)",
"def __neg__(self):\n return 0 - self",
"def negated(self):\n ops = {Eq: Ne, Ge: Lt, Gt: Le, Le: Gt, Lt: Ge, Ne: Eq}\n # If there ever will be new Relational subclasses, the following line\n # will work until it is properly sorted out\n # return ops.get(self.func, lambda a, b, evaluate=False: ~(self.func(a,\n # b, evaluate=evaluate)))(*self.args, evaluate=False)\n return Relational.__new__(ops.get(self.func), *self.args)",
"def convert_logical_not(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0)\n g.add_node(op.output(\"Out\")[0], out)",
"def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression",
"def __invert__(self):\n return self.__neg__()"
] | [
"0.71537274",
"0.69829476",
"0.69523317",
"0.6824377",
"0.67790425",
"0.6737146",
"0.67330045",
"0.67264277",
"0.6704793",
"0.66476816",
"0.65888995",
"0.6529035",
"0.6525028",
"0.6494333",
"0.64887315",
"0.6466599",
"0.6442492",
"0.6382106",
"0.6371845",
"0.6371845",
"0.63510394",
"0.63418204",
"0.6329239",
"0.62874156",
"0.6285955",
"0.6276334",
"0.62571573",
"0.6244915",
"0.62408537",
"0.6233091"
] | 0.7796946 | 0 |
Map MXNet's abs operator attributes to onnx's Abs operator and return the created node. | def convert_abs(node, **kwargs):
return create_basic_op_node('Abs', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def abs(self) -> LinearOperator:\n return self.__class__(self._diag.abs())",
"def __abs__(self):\n out = self.copy()\n out.addFunction(Query.Function.Abs)\n return out",
"def abs(self):\n\n return self._get(\"abs\", rtype=self.__class__)",
"def __init__(self):\n GinacFunction.__init__(self, \"abs\", latex_name=r\"\\mathrm{abs}\",\n conversions=dict(sympy='Abs'))",
"def abs(self) -> LinearOperator:\n return ConstantDiagLinearOperator(self.diag_values.abs(), diag_shape=self.diag_shape)",
"def scalar_abs(self, dst, src):\n return self._scalar_single_func('abs', dst, src)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def toabs(self, value, isworld=-1):\n return _coordsys.coordsys_toabs(self, value, isworld)",
"def __abs__(self):\n return type(self)(abs(self.number))",
"def __abs__ (self) :\n return self.__class__ (abs (self.degrees))",
"def __abs__(self):\n retval = self.copy()\n retval._val = abs(retval._val)\n return retval",
"def __abs__(self):\n\t\tval = abs(self.val)\n\t\tif 0 in self.val:\n\t\t\traise ValueError(\"Absolute value is not differentiable at 0.\")\n\n\t\tder_copy = np.copy(self.der)\n\t\tif len(der_copy.shape):\n\t\t\tfor i, val_i in enumerate(self.val):\n\t\t\t\tif val_i < 0:\n\t\t\t\t\tder_copy[i] = -1 * der_copy[i]\n\t\treturn Var(val, der_copy)",
"def handle_abs(self):\n # pylint: disable=no-member\n x_raw = self.microbit.accelerometer.get_x()\n y_raw = self.microbit.accelerometer.get_y()\n x_abs = ('Absolute', 0x00, x_raw)\n y_abs = ('Absolute', 0x01, y_raw)\n return x_abs, y_abs",
"def abs(self):\n\n return Number.abs(self)",
"def __abs__ (self) :\n return self.__class__ (abs (self.radians))",
"def __abs__(self):\n if self.value == NEG:\n return TRIT_POS\n else:\n return self",
"def abs(self: FrameLike) -> FrameLike:\n\n def abs(psser: \"Series\") -> Union[\"Series\", Column]:\n if isinstance(psser.spark.data_type, BooleanType):\n return psser\n elif isinstance(psser.spark.data_type, NumericType):\n return psser._with_new_scol(\n F.abs(psser.spark.column), field=psser._internal.data_fields[0]\n )\n else:\n raise TypeError(\n \"bad operand type for abs(): {} ({})\".format(\n spark_type_to_pandas_dtype(psser.spark.data_type),\n psser.spark.data_type.simpleString(),\n )\n )\n\n return self._apply_series_op(abs)",
"def local_abs_lift(node):\r\n if node.op == T.abs_ and node.inputs[0].owner:\r\n assert node.nin == 1\r\n if node.inputs[0].owner.op == T.mul:\r\n return [T.mul(*[T.abs_(i) for i in node.inputs[0].owner.inputs])]\r\n if node.inputs[0].owner.op == T.true_div:\r\n i = node.inputs[0].owner.inputs\r\n return [T.true_div(T.abs_(i[0]), T.abs_(i[1]))]",
"def __abs__(self):\r\n raise TypeError(f\"bad operand type for abs(): '{type(self).__name__}'\")",
"def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs",
"def abs(self):\n return DataFrameDefault.register(pandas.DataFrame.abs)(self)",
"def __abs__( self ):\r\n\t\tif ( self < 0 ): return -self\r\n\t\telse: return self",
"def abs(self):\n return self * self.sign()",
"def local_abs_merge(node):\r\n if node.op == T.mul and sum([i.owner.op == T.abs_ for i in node.inputs\r\n if i.owner]) > 1:\r\n inputs = []\r\n for i in node.inputs:\r\n if i.owner and i.owner.op == T.abs_:\r\n inputs.append(i.owner.inputs[0])\r\n elif isinstance(i, Constant):\r\n try:\r\n const = get_scalar_constant_value(i)\r\n except NotScalarConstantError:\r\n return False\r\n if not (const >= 0).all():\r\n return False\r\n inputs.append(i)\r\n else:\r\n return False\r\n return [T.abs_(T.mul(*inputs))]\r\n if node.op == T.true_div and sum([i.owner.op == T.abs_ for i in\r\n node.inputs if i.owner]) == 2:\r\n return [T.abs_(T.true_div(node.inputs[0].owner.inputs[0],\r\n node.inputs[1].owner.inputs[0]))]",
"def convert_atan(node, **kwargs):\n return create_basic_op_node('Atan', node, kwargs)",
"def abs_(a):",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def abs(f):\n return f.per(dmp_abs(f.rep, f.lev, f.dom))",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression"
] | [
"0.6505",
"0.6213608",
"0.6065252",
"0.592394",
"0.58516395",
"0.5665779",
"0.5517811",
"0.5490736",
"0.5476933",
"0.54744714",
"0.54476523",
"0.54003453",
"0.5390359",
"0.5326909",
"0.5319726",
"0.5315074",
"0.53031343",
"0.52716726",
"0.5261375",
"0.5250854",
"0.524636",
"0.5227312",
"0.52090055",
"0.519997",
"0.51838773",
"0.5169894",
"0.51612055",
"0.5141382",
"0.51391935",
"0.5095987"
] | 0.7698864 | 0 |
Map MXNet's add_n operator attributes to onnx's Sum operator and return the created node. | def convert_addn(node, **kwargs):
return create_basic_op_node('Sum', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def ADD (self, n1, n2):",
"def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n return [node]",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def __add__(self, other):\r\n if isinstance(other, Node):\r\n new_node = add_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = add_byconst_op(self, other)\r\n return new_node",
"def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)",
"def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)",
"def __add__(self, other):\n if isinstance(other, Node):\n new_node = add_op(self, other)\n else:\n # Add by a constant stores the constant in the new node's const_attr field.\n # 'other' argument is a constant\n new_node = add_byconst_op(self, other)\n return new_node",
"def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def add_n():\n pass",
"def addNode( self, n, **attr ):\n self._G.add_node(n, attr)",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def add(self, node, **offset):\n return self.dtype.add(self, node, **offset)",
"def sum(n):\n times = lambda x: jnp.sum(x, keepdims=True, axis=0)\n trans = lambda x: jnp.repeat(x, n, axis=0)\n return Operator(times=times, trans=trans, shape=(1,n))",
"def add(self, x):\n self.sum += x\n self.n += 1",
"def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)",
"def n(label):\n global id\n node = pydot.Node(name=id, obj_dict=None, label=label)\n id += 1\n graph.add_node(node)\n return node",
"def __add__(self, n):\n return _elas.SwigPyIterator___add__(self, n)",
"def get(self):\n args = single_parser.parse_args()\n n1 = args.n\n m1 = args.m\n r = summation(n1, m1)\n print(r)\n return {\"add\": r}",
"def add(n1, n2):\n return n1 + n2",
"def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out",
"def __add__(self,that):\n return self.__opExpand2(that,np.add)",
"def append(self):\n return AttributeFunctor(self, lambda a, b: a + b)",
"def add(n1, n2):\n return n1 + n2",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def get_bprop_scalar_addn(self):\n\n def bprop(x, out, dout):\n dx = ()\n for _ in range(len(x)):\n dx = dx + (dout,)\n return dx\n return bprop",
"def convert_addmm(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n\n alpha = op.attr(\"Alpha\")\n beta = op.attr(\"Beta\")\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n\n if not isinstance(alpha, _expr.Expr) and alpha != 1:\n alpha = _expr.const(alpha, dtype)\n x *= alpha\n\n if not isinstance(beta, _expr.Expr) and beta != 1:\n beta = _expr.const(beta, dtype)\n input_x *= beta\n\n transposed_y = _op.transpose(y, axes=[1, 0])\n dense_out = _op.nn.dense(x, transposed_y)\n out = dense_out + input_x\n g.add_node(op.output(\"Out\")[0], out)",
"def add(m, n):\n return lambda f: lambda x: m(f)(\n n(f)(x)\n )",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node"
] | [
"0.66162986",
"0.65958875",
"0.65007085",
"0.6439678",
"0.6250043",
"0.5785631",
"0.5775108",
"0.57710975",
"0.57699406",
"0.57460105",
"0.5735643",
"0.57265383",
"0.5694182",
"0.5615365",
"0.5607123",
"0.56009924",
"0.5595946",
"0.5591358",
"0.5585656",
"0.5575787",
"0.5536213",
"0.55171555",
"0.5512672",
"0.5501437",
"0.5496856",
"0.5493054",
"0.5469145",
"0.5463753",
"0.54595774",
"0.54501766"
] | 0.79465455 | 0 |
Map MXNet's ceil operator attributes to onnx's Ceil operator and return the created node. | def convert_ceil(node, **kwargs):
return create_basic_op_node('Ceil', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n BuiltinFunction.__init__(self, \"ceil\",\n conversions=dict(maxima='ceiling',\n sympy='ceiling'))",
"def __ceil__(self, ???):",
"def ceil(raw_tensor):\n dst_dtype = \"int32\"\n\n return cast_op(raw_tensor, dst_dtype, \"elewise_single_ceil\")",
"def getCeiling(self):\n if self.ceiling is not None:\n return self.ceiling\n # preserve backwards compatability for zenpacks\n for dp in self.graphPoints():\n if dp.meta_type == 'DataPointGraphPoint' and dp.format:\n try:\n lhs = dp.format.split(\".\")[0][-1]\n rhs = dp.format.split(\".\")[1][0]\n return int(lhs) - int(rhs)\n except (IndexError, ValueError, TypeError):\n # unable to parse the format just continue\n # or use the default\n pass\n return 3",
"def ceil(x):\r\n # see decorator for function body\r",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def __ceil__(self, *args, **kwargs): # real signature unknown\n pass",
"def ceil(data):\n return _make.ceil(data)",
"def __call__(self, x, **kwds):\n maximum_bits = kwds.get('maximum_bits', 20000)\n try:\n return x.ceil()\n except AttributeError:\n if isinstance(x, (int, long)):\n return Integer(x)\n elif isinstance(x, (float, complex)):\n return Integer(int(math.ceil(x)))\n elif type(x).__module__ == 'numpy':\n import numpy\n return numpy.ceil(x)\n\n from sage.rings.all import RealIntervalField\n\n bits = 53\n while bits < maximum_bits:\n try:\n x_interval = RealIntervalField(bits)(x)\n except TypeError:\n # If we cannot compute a numerical enclosure, leave the\n # expression unevaluated.\n return BuiltinFunction.__call__(self, SR(x))\n try:\n return x_interval.unique_ceil()\n except ValueError:\n bits *= 2\n\n try:\n return ceil(SR(x).full_simplify().canonicalize_radical())\n except ValueError:\n pass\n\n raise ValueError(\"computing ceil(%s) requires more than %s bits of precision (increase maximum_bits to proceed)\"%(x, maximum_bits))",
"def xpathCeilingFunction(self, nargs):\n libxml2mod.xmlXPathCeilingFunction(self._o, nargs)",
"def convert_floor(node, **kwargs):\n return create_basic_op_node('Floor', node, kwargs)",
"def ceil(x):\n return 0.0",
"def convert_pooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel = eval(attrs[\"kernel\"])\n pool_type = attrs[\"pool_type\"] if attrs.get(\"pool_type\") else \"max\"\n stride = eval(attrs[\"stride\"]) if attrs.get(\"stride\") else (1, 1)\n global_pool = get_boolean_attribute_value(attrs, \"global_pool\")\n p_value = attrs.get('p_value', 'None')\n\n pooling_convention = attrs.get('pooling_convention', 'valid')\n ceil_mode = False\n if pooling_convention == 'full':\n if onnx.__version__ < \"1.5.0\":\n pooling_warning = \"Pooling: ONNX lower than 1.5.0 doesn't support pooling_convention. \" \\\n \"This might lead to shape or accuracy issues. \" \\\n \"https://github.com/onnx/onnx/issues/549\"\n ceil_mode = True\n logging.warning(pooling_warning)\n\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n pad_dims = pad_dims + pad_dims\n pool_types = {\"max\": \"MaxPool\", \"avg\": \"AveragePool\", \"lp\": \"LpPool\"}\n global_pool_types = {\"max\": \"GlobalMaxPool\", \"avg\": \"GlobalAveragePool\",\n \"lp\": \"GlobalLpPool\"}\n\n if pool_type == 'lp' and p_value == 'None':\n raise AttributeError('ONNX requires a p value for LpPool and GlobalLpPool')\n\n if global_pool:\n if pool_type == 'lp':\n node = onnx.helper.make_node(\n global_pool_types[pool_type],\n input_nodes, # input\n [name],\n p=int(p_value),\n name=name\n )\n else:\n node = onnx.helper.make_node(\n global_pool_types[pool_type],\n input_nodes, # input\n [name],\n name=name\n )\n else:\n if pool_type == 'lp':\n node = onnx.helper.make_node(\n pool_types[pool_type],\n input_nodes, # input\n [name],\n p=int(p_value),\n kernel_shape=kernel,\n pads=pad_dims,\n strides=stride,\n name=name\n )\n else:\n if onnx.__version__ >= \"1.5.0\":\n node = onnx.helper.make_node(\n pool_types[pool_type],\n input_nodes, # input\n [name],\n kernel_shape=kernel,\n pads=pad_dims,\n strides=stride,\n name=name,\n ceil_mode=ceil_mode\n )\n else:\n node = onnx.helper.make_node(\n pool_types[pool_type],\n input_nodes, # input\n [name],\n kernel_shape=kernel,\n pads=pad_dims,\n strides=stride,\n name=name\n )\n\n return [node]",
"def ceil_div_offline(value, factor):\n return ((value) + (factor)-1) // (factor)",
"def ceil_inplace(a):",
"def ceil(x):\n # if x is within MACHINE_EPS of an integer, return that integer\n if abs(x - round(x)) < MACHINE_EPS:\n return round(x)\n # otherwise, return the ceiling of x\n return math.ceil(x)"
] | [
"0.57651013",
"0.5506392",
"0.5357824",
"0.5333224",
"0.5305275",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.52933913",
"0.51081866",
"0.50534457",
"0.49789116",
"0.49027023",
"0.4799571",
"0.4653408",
"0.46025816",
"0.4554841",
"0.43762"
] | 0.76702064 | 0 |
Map MXNet's floor operator attributes to onnx's Floor operator and return the created node. | def convert_floor(node, **kwargs):
return create_basic_op_node('Floor', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_floor(tile):\n rtn = bmesh.new()\n rtn.from_object(bpy.data.objects['FLOOR_CENTER'], bpy.context.scene)\n BmeshFactory.add_floor_corners(rtn, tile)\n rtn.from_object(bpy.data.objects[tile.terrain_type().name], bpy.context.scene)\n BmeshFactory.add_ceiling_center_below(rtn, tile)\n return rtn",
"def floor(self):\n return self.container['floor']",
"def floor(self, floor):\n\n self.container['floor'] = floor",
"def floor(raw_tensor):\n dst_dtype = \"int32\"\n\n return cast_op(raw_tensor, dst_dtype, \"elewise_single_floor\")",
"def set_floor_indicator_light(self):\n\t\tif self.currentFloor & 0x01:\n\t\t\tio.set_bit(OUTPUT.FLOOR_IND1, 1)\n\t\telse:\n\t\t\tio.set_bit(OUTPUT.FLOOR_IND1, 0)\n\t\tif self.currentFloor & 0x02:\n\t\t\tio.set_bit(OUTPUT.FLOOR_IND2, 1)\n\t\telse:\n\t\t\tio.set_bit(OUTPUT.FLOOR_IND2, 0)",
"def create_floor(self):\n def _on_enter(obj):\n return lambda: obj.play_blink(duration=1, loop=True)\n def _on_leave(obj):\n return lambda: obj.play_fadeout(duration=1)\n\n cell_size = self.map.get_cell_size()\n for x in xrange(0, self.map_size[0]):\n for y in xrange(0, self.map_size[1]):\n obj = Floor(\n parent=self.map,\n style={\n 'width': cell_size,\n 'height': cell_size,\n 'z-index': layers['floor'] }\n )\n # Make it blinking when the player stays on it\n make_trackingfloor(self, obj, x, y, \n on_enter=_on_enter(obj),\n on_leave=_on_leave(obj)\n )\n self.map.add_node(obj, x, y)",
"def floor(self):\n return self._floor",
"def __init__(self, floor=0):\n self._floor = floor",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def floor(expr: vecpy.base.Expr):\n return vecpy.function(\"floor\", expr)",
"def findRandomFloorNode(self):\n\t\tx = random.randint(0, self.width - 1)\n\t\ty = random.randint(0, self.height - 1)\n\t\twhile not self.isFloor(x, y):\n\t\t\tx = random.randint(0, self.width - 1)\n\t\t\ty = random.randint(0, self.height - 1)\n\t\treturn (x, y)",
"def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))",
"def _create_clip(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n if op.min is not None:\n node.input.append(op.name + \":min\")\n else:\n node.input.append(\"\")\n if op.max is not None:\n node.input.append(op.name + \":max\")\n else:\n node.input.append(\"\")\n return node",
"def setFloor(self, x, y):\n\t\tself.setValue(x, y, self.floor_char)",
"def __floordiv__(self, other: Any) -> ColumnOperators:\n return self.operate(floordiv, other)",
"def changeFlooring():\r\n\tif tileFloor.getVisible():\r\n\t\ttileFloor.visible(viz.OFF)\r\n\t\thardwoodFloor.visible(viz.ON)\r\n\telse:\r\n\t\ttileFloor.visible(viz.ON)\r\n\t\thardwoodFloor.visible(viz.OFF)",
"def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)",
"def xpathFloorFunction(self, nargs):\n libxml2mod.xmlXPathFloorFunction(self._o, nargs)",
"def __init__(self):\n BuiltinFunction.__init__(self, \"floor\",\n conversions=dict(sympy='floor'))",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass",
"def __floor__(self, *args, **kwargs): # real signature unknown\n pass"
] | [
"0.5913754",
"0.5866849",
"0.5621953",
"0.5610637",
"0.55931014",
"0.5583222",
"0.54438764",
"0.53761",
"0.53067064",
"0.5256313",
"0.5121537",
"0.50940347",
"0.5093765",
"0.5074465",
"0.50180596",
"0.50060546",
"0.49971378",
"0.49704736",
"0.49595678",
"0.49277905",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693",
"0.49110693"
] | 0.76645154 | 0 |
Map MXNet's Reshape operator attributes to onnx's Reshape operator. Converts output shape attribute to output shape tensor and return multiple created nodes. | def convert_reshape(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
output_shape_list = convert_string_to_list(attrs["shape"])
initializer = kwargs["initializer"]
output_shape_np = np.array(output_shape_list, dtype='int64')
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]
dims = np.shape(output_shape_np)
output_shape_name = "reshape_attr_tensor" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=output_shape_name,
data_type=data_type,
dims=dims,
vals=output_shape_list,
raw=False,
)
)
input_nodes.append(output_shape_name)
not_supported_shape = [-2, -3, -4]
for val in output_shape_list:
if val in not_supported_shape:
raise AttributeError("Reshape: Shape value not supported in ONNX", val)
reshape_node = onnx.helper.make_node(
"Reshape",
input_nodes,
[name],
name=name
)
return [tensor_node, reshape_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_helper_reshape_node(input_name, output_name, shape, kwargs):\n shape_tensor_node, = create_helper_tensor_node(\n np.asarray(shape, dtype=np.int64), output_name + \"__shape\", kwargs\n )\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[input_name, shape_tensor_node.name],\n outputs=[output_name],\n name=output_name\n )\n\n return [shape_tensor_node, reshape_node]",
"def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n return node",
"def _create_reshape(cls, onnx_node, inputs, opset_version):\n shape = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(shape)",
"def add_reshape(self, input_name, shape, name=None, attr={}):\n return self._build_op('Reshape', [input_name, shape], name=name)",
"def _special_handle_reshape(cls, op, X, W):\n node_name = op.name + \":shape\"\n return [\n numpy_helper.from_array(np.array(op.shape, dtype=np.int64),\n node_name)\n ]",
"def convert_reshape(g, op, block):\n\n input_shape = op.input(\"Shape\")\n input_shape_tensor = op.input(\"ShapeTensor\")\n data = g.get_node(op.input(\"X\")[0])\n if input_shape:\n new_shape = g.get_node(input_shape[0])\n elif input_shape_tensor:\n new_shape = []\n for shape_name in input_shape_tensor:\n shape = g.get_node(shape_name)\n if len(infer_shape(shape)) == 0:\n shape = _op.reshape(shape, [-1])\n new_shape.append(shape)\n new_shape = _op.concatenate(new_shape, axis=0)\n new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())\n if infered:\n new_shape = new_shape.tolist()\n else:\n new_shape = op.attr(\"shape\")\n out = _op.reshape(data, new_shape)\n g.add_node(op.output(\"Out\")[0], out)",
"def parse_reshape(\n cls, reshape_attributes: ReshapeLayerAttributes\n ) -> Tuple[DIMENSION_MAP, DIMENSION_MAP, ReshapeMode]:\n input_shape_not_cut = reshape_attributes.input_shape\n output_shape_not_cut = reshape_attributes.output_shape\n\n in_indexes_not_cut_map = [i for i, dim in enumerate(input_shape_not_cut) if dim != 1]\n out_indexes_not_cut_map = [i for i, dim in enumerate(output_shape_not_cut) if dim != 1]\n\n input_shape = list(filter(lambda x: x != 1, input_shape_not_cut))\n output_shape = list(filter(lambda x: x != 1, output_shape_not_cut))\n\n in_idx = 0\n out_idx = 0\n in_map = {}\n out_map = {}\n\n mode = ReshapeMode.DEFAULT\n\n while in_idx < len(input_shape) and out_idx < len(output_shape):\n if input_shape[in_idx] == output_shape[out_idx]:\n in_map[in_idx] = [out_idx]\n out_map[out_idx] = [in_idx]\n elif input_shape[in_idx] > output_shape[out_idx]:\n res, out_idx = cls._map_dims_(\n source_array=input_shape,\n target_array=output_shape,\n source_idx=in_idx,\n start_target_idx=out_idx,\n source_to_target_map=in_map,\n target_to_source_map=out_map,\n )\n if not res or mode == ReshapeMode.SHRINK:\n return None, None, ReshapeMode.DEFAULT\n mode = ReshapeMode.EXTEND\n else:\n res, in_idx = cls._map_dims_(\n source_array=output_shape,\n target_array=input_shape,\n source_idx=out_idx,\n start_target_idx=in_idx,\n source_to_target_map=out_map,\n target_to_source_map=in_map,\n )\n if not res or mode == ReshapeMode.EXTEND:\n return None, None, ReshapeMode.DEFAULT\n mode = ReshapeMode.SHRINK\n in_idx += 1\n out_idx += 1\n\n if mode == ReshapeMode.DEFAULT:\n mode = ReshapeMode.IDENTITY_WITHOUT_ONES\n\n in_map_not_cut = cls._convert_to_not_cut(in_indexes_not_cut_map, out_indexes_not_cut_map, in_map)\n out_map_not_cut = cls._convert_to_not_cut(out_indexes_not_cut_map, in_indexes_not_cut_map, out_map)\n return in_map_not_cut, out_map_not_cut, mode",
"def reshape(self, *shape):\n return F.Reshape.apply(self, shape)",
"def reshape(x, shape):\n return Reshape(shape)(x)",
"def reshape_output_shape_0(input_shape): \n shape_1 = input_shape[0]\n shape_2 = input_shape[1]\n shape_3 = input_shape[2]\n return(shape_1, shape_2, shape_3, 1)",
"def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)",
"def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)",
"def reshape(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = Reshape(shape).apply((x,))\n return y",
"def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = nest.flatten(target_shape)",
"def _reshape_output_batch(self, number, output):\n #tt = cutotime('reshape')\n #tt.start()\n output = output.reshape(self.output_shapes[number]) # batch, h, w, 3, (5 + 80)\n #tt.stop()\n return output",
"def reshape(tensor, newshape):\n raise NotImplementedError",
"def add_input_and_output_shape(self, input_shape, output_shape):",
"def _eager_reshape(tensor, shape, ctx):\n attr_t = tensor._datatype_enum() # pylint: disable=protected-access\n attr_tshape, (shape,) = execute.args_to_matching_eager(\n [shape], ctx, [dtypes.int32, dtypes.int64], dtypes.int32)\n inputs_flat = [tensor, shape]\n attrs = (\"T\", attr_t, \"Tshape\", attr_tshape)\n [result] = execute.execute(\n b\"Reshape\", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)\n return result",
"def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))",
"def reshape(self, shape, ndim=None):\r\n\r\n if ndim is not None:\r\n if not isinstance(ndim, int):\r\n raise ValueError(\"Expected ndim to be an integer, is \" +\r\n str(type(ndim)))\r\n\r\n return theano.tensor.basic.reshape(self, shape, ndim=ndim)",
"def reshape(self, new_shape):\n return self.__class__(pos=self.pos.reshape(new_shape),\n vel=self.vel.reshape(new_shape),\n frame=self.frame)",
"def __init__(self, incoming, shape, name='ReshapeLayer'):\n super(ReshapeLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.shape = shape\n self.out = tf.zeros(self.get_output_shape())\n self.name = name",
"def test_jax_Reshape_concrete_shape():\n a = vector(\"a\")\n x = reshape(a, a.shape)\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])\n\n x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2))\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])",
"def compute_output_shape(self, input_shape):\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]",
"def output_shape(self):\n raise NotImplementedError",
"def augment_graph(self):\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n # When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.\n # To make the code simple, we always let keepdims to be 1.\n keepdims = 1\n\n # Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)\n reduce_output = tensor_name + \"_\" + reduce_op_name\n intermediate_output = reduce_output + \"_Reshape\"\n reduce_node = onnx.helper.make_node(\n reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output\n )\n\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[intermediate_output, reshape_shape_name],\n outputs=[reduce_output],\n name=intermediate_output,\n )\n\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))\n\n for tensor in tensors:\n add_reduce_min_max(tensor, \"ReduceMin\")\n add_reduce_min_max(tensor, \"ReduceMax\")\n\n onnx.save(\n self.model,\n self.augmented_model_path,\n save_as_external_data=self.use_external_data_format,\n )",
"def local_reshape_lift(node):\r\n if (isinstance(node.op, T.Reshape) and\r\n node.inputs[0].owner and\r\n isinstance(node.inputs[0].owner.op, T.Elemwise) and\r\n len(node.inputs[0].owner.inputs) == 1):\r\n r = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])\r\n e = node.inputs[0].owner.op(r)\r\n return [e]",
"def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]",
"def reshape(self, *shape):\n return Signal(self._initial_value.reshape(*shape),\n name=\"%s.reshape(%s)\" % (self.name, shape),\n base=self.base)",
"def compute_output_shape(self, input_shape):\r\n return input_shape"
] | [
"0.7556744",
"0.7085336",
"0.7027707",
"0.67682624",
"0.67622006",
"0.67400914",
"0.65949494",
"0.65602094",
"0.6389615",
"0.6363775",
"0.62704414",
"0.61320436",
"0.6128499",
"0.6113789",
"0.5977804",
"0.5916986",
"0.58968294",
"0.5860646",
"0.58554107",
"0.5846493",
"0.58376455",
"0.58133507",
"0.5808323",
"0.57981336",
"0.5795353",
"0.57619846",
"0.5711903",
"0.56979144",
"0.5673069",
"0.5668712"
] | 0.77601105 | 0 |
Map MXNet's slice_axis operator attributes to onnx's Slice operator and return the created node. | def convert_slice_axis(node, **kwargs):
name, input_nodes, input_shapes, attrs = get_inputs(node, kwargs, with_shapes=True)
axes = int(attrs.get("axis"))
starts = int(attrs.get("begin"))
ends = attrs.get("end", None)
if not ends or ends == 'None':
# ONNX doesn't support None for ends. Since ends=None depicts
# length of dimension, passing dimension in this case.
in_shape = input_shapes[0]
ends = in_shape[axes]
export_nodes = []
starts = np.atleast_1d(np.asarray(starts, dtype=np.int))
ends = np.atleast_1d(np.asarray(ends, dtype=np.int))
axes = np.atleast_1d(np.asarray(axes, dtype=np.int))
starts_node = create_helper_tensor_node(starts, name + '__starts', kwargs)
export_nodes.extend(starts_node)
starts_node = starts_node[-1].name
ends_node = create_helper_tensor_node(ends, name + '__ends', kwargs)
export_nodes.extend(ends_node)
ends_node = ends_node[-1].name
axes_node = create_helper_tensor_node(axes, name + '__axes', kwargs)
export_nodes.extend(axes_node)
axes_node = axes_node[-1].name
input_node = input_nodes[0]
node = onnx.helper.make_node(
"Slice",
[input_node, starts_node, ends_node, axes_node],
[name],
name=name,
)
export_nodes.extend([node])
return export_nodes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_slice(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n for attr in ['starts', 'ends', 'axes', 'steps']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def _fix_slice(self, inputs, new_attr):\n begin = new_attr.get('begin')\n end = new_attr.get('end')\n axes = new_attr.get('axis', tuple(range(len(begin))))\n slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])\n if len(axes) > 1:\n for i, axis in enumerate(axes):\n slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])\n return slice_op",
"def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)",
"def _create_slice(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n starts = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n ends = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n # sometime onnx may ignore these two inputs, axes and step\n if len(inputs) >= 2 and onnx_node.inputs[3] != '':\n axes = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n else:\n axes = None\n steps = tensor.to_numpy(inputs.pop(1)).astype(\n np.int32).tolist() if len(inputs) >= 2 else None\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(starts, ends, axes, steps)",
"def _special_handle_slice(cls, op, X, W):\n tensor_list = []\n # slice add starts, ends, axes, steps\n append_inputs = {\n \"starts\": op.starts,\n \"ends\": op.ends,\n \"axes\": op.axes,\n \"steps\": op.steps,\n }\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n numpy_helper.from_array(np.array(append_input), node_name))\n return tensor_list",
"def convert_slice_channel(node, **kwargs):\n name, input_nodes, input_shapes, attrs = get_inputs(node, kwargs, with_shapes=True)\n\n num_outputs = int(attrs.get(\"num_outputs\"))\n axis = int(attrs.get(\"axis\", 1))\n squeeze_axis = int(attrs.get(\"squeeze_axis\", 0))\n\n if squeeze_axis == 1 and num_outputs == 1:\n node = onnx.helper.make_node(\n \"Squeeze\",\n input_nodes,\n [name],\n axes=[axis],\n name=name,\n )\n return [node]\n elif squeeze_axis == 0 and num_outputs > 1:\n in_shape = input_shapes[0]\n split = in_shape[axis] // num_outputs\n node = onnx.helper.make_node(\n \"Split\",\n input_nodes,\n [name+'_output'+str(i) for i in range(num_outputs)],\n axis=axis,\n split=[split for _ in range(num_outputs)],\n name=name,\n )\n return [node]\n else:\n raise NotImplementedError(\"SliceChannel operator with num_outputs>1 and\"\n \"squeeze_axis true is not implemented.\")",
"def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]",
"def convert_slice(g, op, block):\n\n data = g.get_node(op.input(\"Input\")[0])\n dims = len(infer_shape(data))\n\n axes = op.attr(\"axes\")\n indices = _expr.const(axes, dtype=\"int64\")\n\n decrease_axis = op.attr(\"decrease_axis\")\n if isinstance(decrease_axis, int):\n decrease_axis = [decrease_axis]\n\n if op.input(\"StartsTensor\"):\n starts = g.get_node(op.input(\"StartsTensor\")[0])\n starts, infered = try_infer_value(starts, g.get_params())\n if infered:\n starts = starts.tolist()\n elif op.input(\"StartsTensorList\"):\n starts = []\n for start_index in op.input(\"StartsTensorList\"):\n start_index = g.get_node(start_index).astype(\"int64\")\n starts.append(start_index)\n starts = _op.concatenate(starts, axis=0)\n starts, infered = try_infer_value(starts, g.get_params())\n if infered:\n starts = starts.tolist()\n else:\n starts = op.attr(\"starts\")\n\n if len(axes) < dims:\n if isinstance(starts, _expr.Expr):\n starts = _op.scatter_elements(\n _op.const([0] * dims, dtype=infer_type(starts).checked_type.dtype),\n indices,\n starts,\n axis=0,\n )\n else:\n base = [0] * dims\n for i, axis in enumerate(axes):\n base[axis] = starts[i]\n starts = base\n\n if op.input(\"EndsTensor\"):\n ends = g.get_node(op.input(\"EndsTensor\")[0])\n ends, infered = try_infer_value(ends, g.get_params())\n if infered:\n ends = ends.tolist()\n elif op.input(\"EndsTensorList\"):\n ends = []\n for end_index in op.input(\"EndsTensorList\"):\n end_index = g.get_node(end_index).astype(\"int64\")\n ends.append(end_index)\n ends = _op.concatenate(ends, axis=0)\n ends, infered = try_infer_value(ends, g.get_params())\n if infered:\n ends = ends.tolist()\n else:\n ends = op.attr(\"ends\")\n\n if len(axes) < dims:\n if isinstance(ends, _expr.Expr):\n ends = _op.scatter_elements(\n _expr.const(\n np.array([np.iinfo(np.int32).max] * dims),\n dtype=infer_type(ends).checked_type.dtype,\n ),\n indices,\n ends,\n axis=0,\n )\n else:\n base = [np.iinfo(np.int32).max] * dims\n for i, axis in enumerate(axes):\n base[axis] = ends[i]\n ends = base\n\n strides = None\n if \"StridesTensor\" in op.input_names and op.input(\"StridesTensor\"):\n strides = g.get_node(op.input(\"StridesTensor\")[0])\n strides, infered = try_infer_value(strides, g.get_params())\n if infered:\n strides = strides.tolist()\n elif \"StridesTensorList\" in op.input_names and op.input(\"StridesTensorList\"):\n strides = []\n for strides_index in op.input(\"StridesTensorList\"):\n strides_index = g.get_node(strides_index).astype(\"int64\")\n strides.append(strides_index)\n strides = _op.concatenate(strides, axis=0)\n strides, infered = try_infer_value(strides, g.get_params())\n if infered:\n strides = strides.tolist()\n elif op.has_attr(\"strides\"):\n strides = op.attr(\"strides\")\n\n if len(axes) < dims:\n if isinstance(strides, _expr.Expr):\n strides = _op.scatter_elements(\n _expr.const(np.array([1] * dims), dtype=infer_type(strides).checked_type.dtype),\n indices,\n strides,\n axis=0,\n )\n elif strides:\n base = [1] * dims\n for i, axis in enumerate(axes):\n base[axis] = strides[i]\n strides = base\n if not strides:\n strides = _op.const([1] * dims, dtype=\"int64\")\n\n out = _op.strided_slice(data, begin=starts, end=ends, strides=strides)\n out_shape = infer_shape(out)\n if decrease_axis and len(out_shape) > 1:\n out = _op.squeeze(out, axis=decrease_axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def getslice(self, *args, **kwargs):\n return _image.image_getslice(self, *args, **kwargs)",
"def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)",
"def as_slice(self):\n # slice for accessing arrays of values\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)",
"def slice_ty(ty : MIRType) -> 'MIRSliceType':\n return MIRSliceType(ty)",
"def __getslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___getslice__(self, *args)",
"def convert_crop(node, **kwargs):\n\n name, inputs, attrs = get_inputs(node, kwargs)\n\n start = np.array([0, 0, 0, 0], dtype=np.int) # index是int类型\n\n export_nodes = []\n\n start_node = create_helper_tensor_node(start, name + '__starts', kwargs)\n export_nodes.extend(start_node)\n start_node = start_node[-1].name\n shape_node = create_helper_shape_node(inputs[1], inputs[1] + '__shape')\n export_nodes.extend(shape_node)\n shape_node = shape_node[-1].name\n\n crop_node = onnx.helper.make_node(\n \"Slice\",\n inputs=[inputs[0], name + '__starts', inputs[1] + '__shape'], # data、start、end\n outputs=[name],\n name=name\n )\n\n logging.warning(\n \"Using an experimental ONNX operator: Crop. \" \\\n \"Its definition can change.\")\n export_nodes.extend([crop_node])\n\n return export_nodes",
"def __getslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint2___getslice__(self, *args)",
"def getslice(arr: tf.Tensor, slice: tf.Tensor, axis: int) -> tf.Tensor:\n if arr is None:\n return None\n return tf.boolean_mask(arr, slice, axis=axis)",
"def __getslice__( self, *args):\n return array.array.__getslice__(self, *args).tostring()",
"def slice_layer(start, end, step=None, axis=1):\n if axis < 0:\n raise ValueError(\"'slice_layer' can only work on a specified axis > 0\")\n\n def slice_func(x):\n slices = [slice(None)] * axis\n slices.append(slice(start, end, step))\n return x[tuple(slices)]\n\n return Lambda(slice_func)",
"def _slice_along_axis(x, start=0, stop=None, step=1, axis=0):\n axis_ = tf.get_static_value(axis)\n if axis_ is None: # Non-static axis: use `gather`.\n axis_len = ps.shape(x)[axis]\n start = 0 if start is None else start if start >= 0 else start + axis_len\n stop = axis_len if stop is None else stop if stop >= 0 else stop + axis_len\n return tf.gather(x, tf.range(start, stop, delta=step), axis=axis)\n\n # Static axis: construct explicit slice sequence.\n axis = int(axis_)\n if axis >= 0:\n slices = [slice(None)] * axis + [slice(start, stop, step)]\n else:\n slices = [Ellipsis, slice(start, stop, step)] + [slice(None)] * (-1 - axis)\n return x[tuple(slices)]",
"def slicing(self, name, slicer, axis='y'):\n for n in name:\n if self._is_array_item(n):\n raise ValueError('Cannot slice on array items.')\n if 'rules' not in self._meta['columns'][n]:\n self._meta['columns'][n]['rules'] = {'x': {}, 'y': {}}\n if not isinstance(slicer, list): slicer = [slicer]\n sl = self._clean_codes_against_meta(n, slicer)\n rule_update = {'slicex': {'values': sl}}\n for ax in axis:\n self._meta['columns'][n]['rules'][ax].update(rule_update)\n return None",
"def getSlice(properties=None, **kw):",
"def apply_slice(*, value : Any, slice : slice) -> Any:\n return value[slice]",
"def __getslice__(self, i, j):\n return OutputGroup(list.__getslice__(self, i, j))",
"def __getslice__(self,i,j):\n return self.x[i:j]",
"def _get_slice(index, axis, num_axes):\n idx = [slice(None)] * num_axes\n idx[axis] = index\n return tuple(idx)",
"def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))",
"def ToXMLElement(self, dataset):\n slice_element = xml.etree.ElementTree.Element('slice')\n slice_element.set('id', self.slice_id)\n\n dimension_mapping_elements = []\n metric_mapping_elements = []\n\n for dimension_ref in self.dimension_refs:\n dimension = dataset.GetConcept(dimension_ref)\n\n new_dimension = xml.etree.ElementTree.Element('dimension')\n new_dimension.set('concept', dimension.concept_id)\n slice_element.append(new_dimension)\n\n # Handle dimension->column mappings\n if dimension.concept_id in self.dimension_map:\n dimension_mapping_element = (\n xml.etree.ElementTree.Element('mapDimension'))\n dimension_mapping_element.set('concept', dimension.concept_id)\n dimension_mapping_element.set('toColumn',\n self.dimension_map[dimension.concept_id])\n dimension_mapping_elements.append(dimension_mapping_element)\n\n for metric_ref in self.metric_refs:\n metric = dataset.GetConcept(metric_ref)\n\n new_metric = xml.etree.ElementTree.Element('metric')\n new_metric.set('concept', metric.concept_id)\n slice_element.append(new_metric)\n\n # Handle metric->column metrics\n if metric.concept_id in self.metric_map:\n metric_mapping_element = (\n xml.etree.ElementTree.Element('mapMetric'))\n metric_mapping_element.set('concept', metric.concept_id)\n metric_mapping_element.set('toColumn',\n self.metric_map[metric.concept_id])\n metric_mapping_elements.append(metric_mapping_element)\n\n if self.table_ref:\n slice_table = xml.etree.ElementTree.Element('table')\n slice_table.set('ref', self.table_ref)\n\n for mapping_element in (\n dimension_mapping_elements + metric_mapping_elements):\n slice_table.append(mapping_element)\n\n slice_element.append(slice_table)\n\n return slice_element",
"def special_slice(self, form):\n obj = self.reallyCompile(form[1])\n rest = form[2:]\n if len(rest) == 1:\n return ast.Subscript(obj, 'OP_APPLY', [self.reallyCompile(rest[0])])\n elif len(rest) == 2:\n return ast.Slice(obj, 'OP_APPLY', *self.compileForms(rest))\n elif len(rest) == 3:\n return ast.Subscript(obj, 'OP_APPLY', [ast.Sliceobj(self.compileForms(rest))])\n else:\n raise SyntaxError(\"Too many thingies to slice! %r\" % rest)",
"def get_slice(self, node_id, nodes_in_slice, is_origin=False, is_reverse=False):\n if is_reverse:\n return self.get_reverse_slice(node_id, nodes_in_slice)\n return self.get_level_element_slice(node_id, nodes_in_slice, is_origin=is_origin)",
"def _view_roi(array, original_area_slice, axis):\n axis += 1\n sl = (slice(None),) * axis + original_area_slice[axis:]\n return array[sl]"
] | [
"0.71705914",
"0.68219244",
"0.6615894",
"0.6356295",
"0.6166811",
"0.6157213",
"0.61249214",
"0.5888455",
"0.58519363",
"0.58514374",
"0.57256836",
"0.57235664",
"0.5691767",
"0.5674897",
"0.5673684",
"0.5644206",
"0.56324863",
"0.5606119",
"0.5604062",
"0.56020904",
"0.55815357",
"0.5552159",
"0.55401754",
"0.55359745",
"0.552994",
"0.54741997",
"0.54734325",
"0.5471102",
"0.5460762",
"0.5426837"
] | 0.7049499 | 1 |
Map MXNet's SliceChannel operator attributes to onnx's Squeeze or Split operator based on squeeze_axis attribute and return the created node. | def convert_slice_channel(node, **kwargs):
name, input_nodes, input_shapes, attrs = get_inputs(node, kwargs, with_shapes=True)
num_outputs = int(attrs.get("num_outputs"))
axis = int(attrs.get("axis", 1))
squeeze_axis = int(attrs.get("squeeze_axis", 0))
if squeeze_axis == 1 and num_outputs == 1:
node = onnx.helper.make_node(
"Squeeze",
input_nodes,
[name],
axes=[axis],
name=name,
)
return [node]
elif squeeze_axis == 0 and num_outputs > 1:
in_shape = input_shapes[0]
split = in_shape[axis] // num_outputs
node = onnx.helper.make_node(
"Split",
input_nodes,
[name+'_output'+str(i) for i in range(num_outputs)],
axis=axis,
split=[split for _ in range(num_outputs)],
name=name,
)
return [node]
else:
raise NotImplementedError("SliceChannel operator with num_outputs>1 and"
"squeeze_axis true is not implemented.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\")\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes)",
"def _create_slice(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n for attr in ['starts', 'ends', 'axes', 'steps']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def _create_slice(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n starts = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n ends = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n # sometime onnx may ignore these two inputs, axes and step\n if len(inputs) >= 2 and onnx_node.inputs[3] != '':\n axes = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n else:\n axes = None\n steps = tensor.to_numpy(inputs.pop(1)).astype(\n np.int32).tolist() if len(inputs) >= 2 else None\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(starts, ends, axes, steps)",
"def _fix_squeeze(self, inputs, new_attr):\n axes = new_attr.get('axis')\n op = mx.sym.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)\n for i in axes[1:]:\n op = mx.sym.split(op, axis=i-1, num_outputs=1, squeeze_axis=1)\n return op",
"def convert_slice_axis(node, **kwargs):\n name, input_nodes, input_shapes, attrs = get_inputs(node, kwargs, with_shapes=True)\n\n axes = int(attrs.get(\"axis\"))\n starts = int(attrs.get(\"begin\"))\n ends = attrs.get(\"end\", None)\n if not ends or ends == 'None':\n # ONNX doesn't support None for ends. Since ends=None depicts\n # length of dimension, passing dimension in this case.\n in_shape = input_shapes[0]\n ends = in_shape[axes]\n\n export_nodes = []\n\n starts = np.atleast_1d(np.asarray(starts, dtype=np.int))\n ends = np.atleast_1d(np.asarray(ends, dtype=np.int))\n axes = np.atleast_1d(np.asarray(axes, dtype=np.int))\n\n starts_node = create_helper_tensor_node(starts, name + '__starts', kwargs)\n export_nodes.extend(starts_node)\n starts_node = starts_node[-1].name\n\n ends_node = create_helper_tensor_node(ends, name + '__ends', kwargs)\n export_nodes.extend(ends_node)\n ends_node = ends_node[-1].name\n\n axes_node = create_helper_tensor_node(axes, name + '__axes', kwargs)\n export_nodes.extend(axes_node)\n axes_node = axes_node[-1].name\n\n input_node = input_nodes[0]\n node = onnx.helper.make_node(\n \"Slice\",\n [input_node, starts_node, ends_node, axes_node],\n [name],\n name=name,\n )\n export_nodes.extend([node])\n\n return export_nodes",
"def convert_squeeze(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = attrs.get(\"axis\", None)\n if not axis:\n raise AttributeError(\"Squeeze: Missing axis attribute: ONNX currently requires axis to \"\n \"be specified for squeeze operator\")\n axis = convert_string_to_list(axis)\n\n node = onnx.helper.make_node(\n \"Squeeze\",\n input_nodes,\n [name],\n axes=axis,\n name=name,\n )\n return [node]",
"def _fix_slice(self, inputs, new_attr):\n begin = new_attr.get('begin')\n end = new_attr.get('end')\n axes = new_attr.get('axis', tuple(range(len(begin))))\n slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])\n if len(axes) > 1:\n for i, axis in enumerate(axes):\n slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])\n return slice_op",
"def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node",
"def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)",
"def convert_squeeze(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axes = op.attr(\"axes\")\n if not axes:\n axes = None\n x = _op.squeeze(x, axis=axes)\n g.add_node(op.output(\"Out\")[0], x)",
"def convert_crop(node, **kwargs):\n\n name, inputs, attrs = get_inputs(node, kwargs)\n\n start = np.array([0, 0, 0, 0], dtype=np.int) # index是int类型\n\n export_nodes = []\n\n start_node = create_helper_tensor_node(start, name + '__starts', kwargs)\n export_nodes.extend(start_node)\n start_node = start_node[-1].name\n shape_node = create_helper_shape_node(inputs[1], inputs[1] + '__shape')\n export_nodes.extend(shape_node)\n shape_node = shape_node[-1].name\n\n crop_node = onnx.helper.make_node(\n \"Slice\",\n inputs=[inputs[0], name + '__starts', inputs[1] + '__shape'], # data、start、end\n outputs=[name],\n name=name\n )\n\n logging.warning(\n \"Using an experimental ONNX operator: Crop. \" \\\n \"Its definition can change.\")\n export_nodes.extend([crop_node])\n\n return export_nodes",
"def convert_unsqueeze(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axes = sorted(op.attr(\"axes\"))\n for axis in axes:\n x = _op.expand_dims(x, axis=axis, num_newaxis=1)\n g.add_node(op.output(\"Out\")[0], x)",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def _special_handle_slice(cls, op, X, W):\n tensor_list = []\n # slice add starts, ends, axes, steps\n append_inputs = {\n \"starts\": op.starts,\n \"ends\": op.ends,\n \"axes\": op.axes,\n \"steps\": op.steps,\n }\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n numpy_helper.from_array(np.array(append_input), node_name))\n return tensor_list",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_slice(g, op, block):\n\n data = g.get_node(op.input(\"Input\")[0])\n dims = len(infer_shape(data))\n\n axes = op.attr(\"axes\")\n indices = _expr.const(axes, dtype=\"int64\")\n\n decrease_axis = op.attr(\"decrease_axis\")\n if isinstance(decrease_axis, int):\n decrease_axis = [decrease_axis]\n\n if op.input(\"StartsTensor\"):\n starts = g.get_node(op.input(\"StartsTensor\")[0])\n starts, infered = try_infer_value(starts, g.get_params())\n if infered:\n starts = starts.tolist()\n elif op.input(\"StartsTensorList\"):\n starts = []\n for start_index in op.input(\"StartsTensorList\"):\n start_index = g.get_node(start_index).astype(\"int64\")\n starts.append(start_index)\n starts = _op.concatenate(starts, axis=0)\n starts, infered = try_infer_value(starts, g.get_params())\n if infered:\n starts = starts.tolist()\n else:\n starts = op.attr(\"starts\")\n\n if len(axes) < dims:\n if isinstance(starts, _expr.Expr):\n starts = _op.scatter_elements(\n _op.const([0] * dims, dtype=infer_type(starts).checked_type.dtype),\n indices,\n starts,\n axis=0,\n )\n else:\n base = [0] * dims\n for i, axis in enumerate(axes):\n base[axis] = starts[i]\n starts = base\n\n if op.input(\"EndsTensor\"):\n ends = g.get_node(op.input(\"EndsTensor\")[0])\n ends, infered = try_infer_value(ends, g.get_params())\n if infered:\n ends = ends.tolist()\n elif op.input(\"EndsTensorList\"):\n ends = []\n for end_index in op.input(\"EndsTensorList\"):\n end_index = g.get_node(end_index).astype(\"int64\")\n ends.append(end_index)\n ends = _op.concatenate(ends, axis=0)\n ends, infered = try_infer_value(ends, g.get_params())\n if infered:\n ends = ends.tolist()\n else:\n ends = op.attr(\"ends\")\n\n if len(axes) < dims:\n if isinstance(ends, _expr.Expr):\n ends = _op.scatter_elements(\n _expr.const(\n np.array([np.iinfo(np.int32).max] * dims),\n dtype=infer_type(ends).checked_type.dtype,\n ),\n indices,\n ends,\n axis=0,\n )\n else:\n base = [np.iinfo(np.int32).max] * dims\n for i, axis in enumerate(axes):\n base[axis] = ends[i]\n ends = base\n\n strides = None\n if \"StridesTensor\" in op.input_names and op.input(\"StridesTensor\"):\n strides = g.get_node(op.input(\"StridesTensor\")[0])\n strides, infered = try_infer_value(strides, g.get_params())\n if infered:\n strides = strides.tolist()\n elif \"StridesTensorList\" in op.input_names and op.input(\"StridesTensorList\"):\n strides = []\n for strides_index in op.input(\"StridesTensorList\"):\n strides_index = g.get_node(strides_index).astype(\"int64\")\n strides.append(strides_index)\n strides = _op.concatenate(strides, axis=0)\n strides, infered = try_infer_value(strides, g.get_params())\n if infered:\n strides = strides.tolist()\n elif op.has_attr(\"strides\"):\n strides = op.attr(\"strides\")\n\n if len(axes) < dims:\n if isinstance(strides, _expr.Expr):\n strides = _op.scatter_elements(\n _expr.const(np.array([1] * dims), dtype=infer_type(strides).checked_type.dtype),\n indices,\n strides,\n axis=0,\n )\n elif strides:\n base = [1] * dims\n for i, axis in enumerate(axes):\n base[axis] = strides[i]\n strides = base\n if not strides:\n strides = _op.const([1] * dims, dtype=\"int64\")\n\n out = _op.strided_slice(data, begin=starts, end=ends, strides=strides)\n out_shape = infer_shape(out)\n if decrease_axis and len(out_shape) > 1:\n out = _op.squeeze(out, axis=decrease_axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_expand_dims(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n\n node = onnx.helper.make_node(\n \"Unsqueeze\",\n input_nodes,\n [name],\n axes=[axis],\n name=name,\n )\n return [node]",
"def _create_split(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n split = onnx_node.getattr(\"split\", None)\n num_output = len(onnx_node.outputs)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, split, num_output)",
"def __init__(self, size: torch.Size, root: str = None, split='val', limit_var: int = np.infty, exclude: List[str] = ()):\n assert len(size) == 4 and size[2] == size[3]\n assert size[1] in [1, 3]\n root = pt.join(root, 'imagenet', )\n self.root = root\n super().__init__(root, split)\n self.transform = transforms.Compose([\n transforms.Resize((size[2], size[3])),\n transforms.Grayscale() if size[1] == 1 else transforms.Lambda(lambda x: x),\n transforms.ToTensor()\n ])\n self.size = size\n self.picks = None\n self.picks = list(range(len(self)))\n if exclude is not None and len(exclude) > 0:\n syns = {k: v.lower().replace(' ', ',').split(',') for k, v in IMAGENET1k_CLS_STR.items()}\n exclude_ids = [i for i, s in syns.items() if any([exs.lower() in s for exs in exclude])]\n self.picks = np.argwhere(np.isin(self.targets, exclude_ids, invert=True)).flatten().tolist()\n # self.show()\n # print()\n if limit_var is not None and limit_var < len(self):\n self.picks = np.random.choice(np.arange(len(self.picks)), size=limit_var, replace=False)\n if limit_var is not None and limit_var > len(self):\n print(\n 'OEImageNet shall be limited to {} samples, but ImageNet contains only {} samples, thus using all.'\n .format(limit_var, len(self))\n )\n if len(self) < size[0]:\n raise NotImplementedError()",
"def _create_reduceOp(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\", None)\n keepdims = onnx_node.getattr(\"keepdims\", 1)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes, keepdims)",
"def _create_tile(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n repeats = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(repeats)",
"def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.handle.pad_w + oddp[1],\n op.handle.pad_w + oddp[2],\n op.handle.pad_h + oddp[3],\n ]\n\n node.attribute.extend([\n helper.make_attribute('kernel_shape', k),\n helper.make_attribute('pads', p),\n helper.make_attribute('strides', s),\n ])\n if cls._get_singa_op_type(op) == '_Conv2d':\n node.op_type = cls._rename_operators.get('_Conv2d')\n node.attribute.extend([\n helper.make_attribute('group', op.handle.group),\n helper.make_attribute('auto_pad', 'NOTSET'),\n ])\n\n elif op.handle.is_max_pooling:\n node.op_type = cls._rename_operators.get('MaxPool2d')\n else:\n node.op_type = cls._rename_operators.get('AvgPool2d')\n return node",
"def voxel_space(self, copy=True):\n self._check_space()\n\n if self.space == 'voxel':\n if copy:\n return self.clone()\n else:\n return self\n\n skel = self.clone()\n skel.apply_inverse_transform()\n skel.space = 'voxel'\n return skel",
"def slice_nsp_tokens(model, pooler_input):\n\n config = model.config\n starts = config.sequence_length - config.max_sequences_per_pack\n ends = config.sequence_length\n pooler_input = model.builder.aiOnnxOpset9.slice([pooler_input], axes=[1], starts=[starts], ends=[ends])\n pooler_input = model.builder.reshape_const(model.builder.aiOnnx, [pooler_input],\n [config.micro_batch_size, config.max_sequences_per_pack,\n config.hidden_size])\n return pooler_input",
"def _fix_channels(self, op, attrs, inputs):\n if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]:\n return attrs\n weight_name = self._renames[inputs[1]]\n if not weight_name in self._params:\n raise ValueError(\"Unable to get channels/units attr from onnx graph.\")\n else:\n wshape = self._params[weight_name].shape\n assert len(wshape) >= 2, \"Weights shape is invalid: {}\".format(wshape)\n channels = wshape[0]\n if op in [mx.sym.FullyConnected]:\n attrs['num_hidden'] = channels\n else:\n attrs['num_filter'] = channels\n return attrs",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def create_channels(self, pad_segment, pool=None):\n assert len(self.tracks) > 0\n\n xs = []\n ys = []\n\n def set_track_ptc(track, ptc):\n node_d = self.nodes[track]._asdict()\n loc_d = self.nodes[track].loc._asdict()\n assert loc_d['ptc'] is None\n loc_d['ptc'] = ptc\n node_d['loc'] = NodeLoc(**loc_d)\n\n self.nodes[track] = Node(**node_d)\n\n for track in self.tracks:\n track_node = self.nodes[track]\n\n xs.append(track_node.loc.x_low)\n xs.append(track_node.loc.x_high)\n ys.append(track_node.loc.y_low)\n ys.append(track_node.loc.y_high)\n\n x_tracks = {}\n y_tracks = {}\n\n for track in self.tracks:\n track_node = self.nodes[track]\n\n if track_node.type == NodeType.CHANX:\n assert track_node.loc.y_low == track_node.loc.y_high\n\n if track_node.loc.y_low not in x_tracks:\n x_tracks[track_node.loc.y_low] = []\n\n x_tracks[track_node.loc.y_low].append((\n track_node.loc.x_low,\n track_node.loc.x_high,\n track))\n elif track_node.type == NodeType.CHANY:\n assert track_node.loc.x_low == track_node.loc.x_high\n\n if track_node.loc.x_low not in y_tracks:\n y_tracks[track_node.loc.x_low] = []\n\n y_tracks[track_node.loc.x_low].append((\n track_node.loc.y_low,\n track_node.loc.y_high,\n track))\n else:\n assert False, track_node\n\n x_list = []\n y_list = []\n\n x_channel_models = {}\n y_channel_models = {}\n\n\n if pool is not None:\n for y in x_tracks:\n x_channel_models[y] = pool.apply_async(process_track, (x_tracks[y],))\n\n for x in y_tracks:\n y_channel_models[x] = pool.apply_async(process_track, (y_tracks[x],))\n\n for y in progressbar.progressbar(range(max(x_tracks)+1)):\n if y in x_tracks:\n if pool is None:\n x_channel_models[y] = process_track(x_tracks[y])\n else:\n x_channel_models[y] = x_channel_models[y].get()\n\n x_list.append(len(x_channel_models[y].trees))\n for idx, tree in enumerate(x_channel_models[y].trees):\n for i in tree:\n set_track_ptc(track=i[2], ptc=idx)\n else:\n x_list.append(0)\n\n for x in progressbar.progressbar(range(max(y_tracks)+1)):\n if x in y_tracks:\n if pool is None:\n y_channel_models[x] = process_track(y_tracks[x])\n else:\n y_channel_models[x] = y_channel_models[x].get()\n\n y_list.append(len(y_channel_models[x].trees))\n for idx, tree in enumerate(y_channel_models[x].trees):\n for i in tree:\n set_track_ptc(track=i[2], ptc=idx)\n else:\n y_list.append(0)\n\n x_min=min(xs)\n y_min=min(ys)\n x_max=max(xs)\n y_max=max(ys)\n\n num_padding = 0\n for chan, channel_model in x_channel_models.items():\n for ptc, start, end in channel_model.fill_empty(x_min, x_max):\n num_padding += 1\n track_idx = self.add_track(\n track=Track(\n direction='X',\n x_low=start,\n y_low=chan,\n x_high=end,\n y_high=chan,\n ),\n segment_id=pad_segment,\n capacity=0,\n timing=None)\n\n set_track_ptc(track_idx, ptc)\n\n for chan, channel_model in y_channel_models.items():\n for ptc, start, end in channel_model.fill_empty(y_min, y_max):\n num_padding += 1\n track_idx = self.add_track(\n track=Track(\n direction='Y',\n x_low=chan,\n y_low=start,\n x_high=chan,\n y_high=end,\n ),\n segment_id=pad_segment,\n capacity=0,\n timing=None)\n\n set_track_ptc(track_idx, ptc)\n\n print('Number padding nodes {}'.format(num_padding))\n\n return Channels(\n chan_width_max=max(max(x_list), max(y_list)),\n x_min=x_min,\n y_min=y_min,\n x_max=x_max,\n y_max=y_max,\n x_list=[ChannelList(idx, info) for idx, info in enumerate(x_list)],\n y_list=[ChannelList(idx, info) for idx, info in enumerate(y_list)],\n )"
] | [
"0.6746628",
"0.6559421",
"0.61696434",
"0.61142856",
"0.5958457",
"0.5643979",
"0.5640216",
"0.54976195",
"0.54508215",
"0.54356575",
"0.53339934",
"0.53273505",
"0.51586306",
"0.5094175",
"0.5021248",
"0.50043905",
"0.49964705",
"0.49707195",
"0.4929398",
"0.4861826",
"0.48401994",
"0.48072243",
"0.48031127",
"0.47908136",
"0.47900352",
"0.4763863",
"0.47192135",
"0.47140986",
"0.46965894",
"0.46729082"
] | 0.669003 | 1 |
Map MXNet's expand_dims operator attributes to onnx's Unsqueeze operator and return the created node. | def convert_expand_dims(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis"))
node = onnx.helper.make_node(
"Unsqueeze",
input_nodes,
[name],
axes=[axis],
name=name,
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\")\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes)",
"def convert_unsqueeze(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axes = sorted(op.attr(\"axes\"))\n for axis in axes:\n x = _op.expand_dims(x, axis=axis, num_newaxis=1)\n g.add_node(op.output(\"Out\")[0], x)",
"def _fix_squeeze(self, inputs, new_attr):\n axes = new_attr.get('axis')\n op = mx.sym.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)\n for i in axes[1:]:\n op = mx.sym.split(op, axis=i-1, num_outputs=1, squeeze_axis=1)\n return op",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_squeeze(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = attrs.get(\"axis\", None)\n if not axis:\n raise AttributeError(\"Squeeze: Missing axis attribute: ONNX currently requires axis to \"\n \"be specified for squeeze operator\")\n axis = convert_string_to_list(axis)\n\n node = onnx.helper.make_node(\n \"Squeeze\",\n input_nodes,\n [name],\n axes=axis,\n name=name,\n )\n return [node]",
"def convert_squeeze(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axes = op.attr(\"axes\")\n if not axes:\n axes = None\n x = _op.squeeze(x, axis=axes)\n g.add_node(op.output(\"Out\")[0], x)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def convert_expand(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n if op.input(\"Shape\"):\n sizes = g.get_node(op.input(\"Shape\")[0])\n else:\n sizes = op.attr(\"shape\")\n\n if isinstance(sizes, _expr.Expr):\n sizes = try_infer_value(sizes, parameters=g.get_params())[0]\n\n if isinstance(sizes, np.ndarray):\n sizes = sizes.tolist()\n\n out = _op.broadcast_to(x, sizes)\n g.add_node(op.output(\"Out\")[0], out)",
"def _expand(x, ndim, axis=0):\n while F.rank(x) < ndim:\n x = F.expand_dims(x, axis)\n return x",
"def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n return node",
"def expand_many(x, axes):\n for ax in axes:\n x = torch.unsqueeze(x, ax)\n return x",
"def expand_dims(module, array, dimension):\n _import_modules()\n if module in [np, ma, jnp, tf]:\n return module.expand_dims(array, dimension)\n elif module == torch:\n return module.unsqueeze(array, dimension)\n raise UnknownModuleException(f\"Module {module.__name__} not supported.\")",
"def _squeeze_dims(ds):\n ds = ds.squeeze()\n for dim in ['lon', 'lat', 'bnds', 'depth', 'depth_2', 'depth_3']:\n if dim in ds:\n if ds[dim].size <= 1:\n del ds[dim]\n drop = []\n for dim in [\n 'hyai', 'hybi', 'hyam', 'hybm', 'time_bnds', 'lat_bnds', 'lon_bnds'\n ]:\n if dim in ds:\n drop.append(dim)\n ds = ds.drop(drop)\n return ds.squeeze()",
"def _maybe_expand_dims(x):\n x = tf.convert_to_tensor(x)\n if x.shape == ():\n return tf.expand_dims(x, axis=0)\n return x",
"def expand_dims(input, axis, _builder=None):\n axis = _constexpr_to_value(axis)\n axes = list(axis) if isinstance(axis, Sequence) else [axis]\n new_ndim = len(input.shape) + len(axes)\n axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes]\n\n if len(set(axes)) != len(axes):\n raise ValueError(f\"expand_dims recieved duplicate axes, normalized axes = {axes}\")\n\n ret = input\n for a in sorted(axes):\n ret = semantic.expand_dims(ret, a, _builder)\n return ret",
"def squeeze_batch_dim(nest: types.NestedTensor) -> types.NestedTensor:\n return tree.map_structure(lambda x: tf.squeeze(x, axis=0), nest)",
"def _create_reduceOp(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\", None)\n keepdims = onnx_node.getattr(\"keepdims\", 1)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes, keepdims)",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def convert_full(node, **kwargs):\n # ToDo: Use Constant or ConstantOfShape, when Issue #15101 is resolved?\n name, input_nodes, attrs = get_inputs(node, kwargs)\n del input_nodes\n\n # Convert \"0\"s dimensions to \"1\"s. This is a workaround for the case, where\n # mxnet symbols can broadcast \"0\"s, while ONNX can only broadcast over \"1\"s\n shape = convert_string_to_list(attrs[\"shape\"])\n shape = tuple(dim if dim else 1 for dim in shape)\n\n value = {\n '_zeros': 0.0,\n '_ones': 1.0,\n '_full': eval(attrs.get('value', '0')),\n }[node['op']]\n dtype = attrs.get('dtype')\n data = np.full(shape, value, dtype)\n\n return create_helper_tensor_node(data, name, kwargs)",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _expand_dims_(x: np.array) -> np.array:\n x = np.expand_dims(x, axis=-1)\n return x",
"def squeeze_expand_dim(tensor, axis):\n tensor = torch.squeeze(tensor)\n if len(list(tensor.size())) < 4:\n return tensor.unsqueeze(axis)\n else:\n return tensor",
"def _StaxSqueeze(axis=-1):\n\n def init_fun(rng, input_shape):\n ax = axis\n if ax < 0:\n ax = len(input_shape) + ax\n assert ax < len(input_shape), \"invalid axis %d for %d-dimensional tensor\" % (\n axis,\n len(input_shape),\n )\n assert input_shape[ax] == 1, \"axis %d is %d, not 1\" % (axis, input_shape[ax])\n output_shape = input_shape[:ax] + input_shape[ax + 1 :]\n return output_shape, ()\n\n def apply_fun(params, inputs, **kwargs):\n return jnp.squeeze(inputs, axis=axis)\n\n return init_fun, apply_fun",
"def expand_dims(array):\n return array[np.newaxis, np.newaxis, ...]",
"def create_helper_expand_node(input_name, output_name, expand_shape):\n expand_node = onnx.helper.make_node(\n \"Expand\",\n inputs=[input_name, expand_shape],\n outputs=[output_name],\n name=output_name,\n )\n return [expand_node]",
"def promote_empty_dims(ds):\n ds = ds.copy()\n for di in ds.dims:\n if di not in ds.coords:\n ds.coords[di] = ds[di]\n return ds"
] | [
"0.7386183",
"0.6739398",
"0.6726751",
"0.637353",
"0.62995607",
"0.60692775",
"0.60286963",
"0.60238683",
"0.59741277",
"0.59160703",
"0.5735533",
"0.5717082",
"0.5696729",
"0.5589094",
"0.55591923",
"0.5529139",
"0.55204725",
"0.5500257",
"0.54655224",
"0.54617375",
"0.54552186",
"0.5428289",
"0.5423528",
"0.54018694",
"0.53972113",
"0.53918755",
"0.5370919",
"0.5370033",
"0.5277761",
"0.52444726"
] | 0.716333 | 1 |
Map MXNet's log operator attributes to onnx's Log operator and return the created node. | def convert_log(node, **kwargs):
return create_basic_op_node('Log', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag_shape=self.diag_shape)",
"def log(self, base=None):\n return type(self)(self.parent(), self._simplify(self._express.log(base)))",
"def logIP(self): # just use base?\n np.log(self.t, out=self.t)\n return self",
"def convert_logsoftmax(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to int\n axis = int(attrs.get(\"axis\", -1))\n temp = attrs.get(\"temperature\", 'None')\n if temp != 'None':\n raise AttributeError(\"LogSoftMax: ONNX supports only temperature=None\")\n\n node = onnx.helper.make_node(\n 'LogSoftmax',\n input_nodes,\n [name],\n axis=axis,\n name=name\n )\n return [node]",
"def log2IP(self):\n np.log2(self.t, out=self.t)\n return self",
"def __init__(self, logger=logging.getLogger(\"dummy\")):\n super(OperatorObserver, self).__init__()\n self.logger = logger",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def get_new_oplog(cls):\n is_sharded = True\n primary_conn = Connection(HOSTNAME, int(PORTS_ONE[\"PRIMARY\"]))\n if primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n primary_conn = Connection(HOSTNAME, int(PORTS_ONE[\"SECONDARY\"]))\n\n mongos_addr = \"%s:%s\" % (HOSTNAME, PORTS_ONE['MAIN'])\n if PORTS_ONE[\"MAIN\"] == PORTS_ONE[\"PRIMARY\"]:\n mongos_addr = \"%s:%s\" % (HOSTNAME, PORTS_ONE['MAIN'])\n is_sharded = False\n oplog_coll = primary_conn['local']['oplog.rs']\n\n namespace_set = ['test.test']\n doc_manager = DocManager()\n oplog = OplogThread(primary_conn, mongos_addr, oplog_coll, is_sharded,\n doc_manager, LockingDict(),\n namespace_set, cls.AUTH_KEY, AUTH_USERNAME,\n repl_set=\"demo-repl\")\n return(oplog, primary_conn, oplog.main_connection, oplog_coll)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def BuildTermLogging(self, p_node):\n set_term = Tree('set')\n set_term.AddParent(p_node)\n action_val = self.term.action[0]\n if action_val == 'accept':\n action_val = 'allow'\n action=Tree('', 'action ' + action_val + ';')\n action.AddParent(set_term)\n log_event = ''\n if not self.term.logging:\n log_event = 'never'\n elif str(self.term.logging[0]).lower() == 'true':\n log_event = 'start'\n elif str(self.term.logging[0]) == 'log-both':\n log_event = 'both'\n elif str(self.term.logging[0]) == 'disable':\n log_event = 'never'\n else:\n log_event = 'never'\n lef = Tree('lef', 'event '+ log_event + ';')\n lef.AddParent(set_term)",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def assign_log(self, value):\n if not self._log:\n raise StructureError(\"Trying to assign log values to non-log weights.\")\n\n value = tf.where(tf.is_nan(value), tf.log(tf.ones_like(value) * 0.01), value)\n if self._mask and not all(self._mask):\n # Only perform masking if mask is given and mask contains any 'False'\n value += tf.log(tf.cast(tf.reshape(self._mask, value.shape), dtype=conf.dtype))\n normalized_value = value - tf.reduce_logsumexp(value, axis=-1, keepdims=True)\n return tf.assign(self._variable, normalized_value)",
"def log(self):\n return F.Log.apply(self)",
"def convert_log1p(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n one = _expr.const(1, dtype=dtype)\n out = _op.log(x + one)\n g.add_node(op.output(\"Out\")[0], out)",
"def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True",
"def log(tree, exact_src, **kw):\n new_tree = hq[wrap(unhygienic[log], u[exact_src(tree)], ast_literal[tree])]\n yield new_tree",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def _get_node_attr(self, node, attr):\n return self.metrics[attr].ix[node]",
"def get_logger(self, ast_ctx, log_type, *arg, **kw):\n\n name = ast_ctx.get_logger_name()\n if name not in self.loggers:\n #\n # Maintain a cache for efficiency.\n #\n self.loggers[name] = ast_ctx.get_logger()\n return getattr(self.loggers[name], log_type)",
"def push(self, oplog):\n ns = oplog['ns']\n if ns not in self._map:\n self._map[ns] = []\n self._map[ns].append(oplog)\n self._count += 1\n self._last_optime = oplog['ts']",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def map_output_and_node(cls, onnx_model: onnx.ModelProto):\n output2node = dict()\n for node in onnx_model.graph.node:\n for output_name in node.output:\n output2node[output_name] = node\n return output2node",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def convert_logsigmoid(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.log(_op.tensor.sigmoid(x))\n g.add_node(op.output(\"Out\")[0], out)",
"def loglf2py(store):\n loglike=0.0\n return loglinear.logl(store['xb'],store['xmatf'], store['beta'],store['yvec'],loglike)",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node"
] | [
"0.59120095",
"0.5716469",
"0.5401583",
"0.5346299",
"0.532194",
"0.5304277",
"0.5276738",
"0.5258929",
"0.51880515",
"0.51534116",
"0.5130242",
"0.5092342",
"0.5069547",
"0.5066726",
"0.505875",
"0.5041651",
"0.50384235",
"0.495094",
"0.49467453",
"0.49352145",
"0.4929605",
"0.49229765",
"0.49177834",
"0.49128872",
"0.49053505",
"0.48899338",
"0.48899338",
"0.48898476",
"0.48730457",
"0.48631176"
] | 0.7054104 | 0 |
Map MXNet's reciprocal operator attributes to onnx's Reciprocal operator and return the created node. | def convert_reciprocal(node, **kwargs):
return create_basic_op_node('Reciprocal', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def convert_reciprocal(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n out = _expr.const(1.0, dtype) / x\n g.add_node(op.output(\"Out\")[0], out)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def convert_relu(node, **kwargs):\n return create_basic_op_node('Relu', node, kwargs)",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def createRotoPaintNodeMI():\n return gr()",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]",
"def _common_onnx_node_to_singa_op(cls, onnx_node, inputs, opset_version):\n onnx_op_type = onnx_node.op_type\n assert onnx_op_type in cls._rename_operators, \"not support operator: {}\".format(\n onnx_op_type)\n autograd_op = getattr(autograd, cls._rename_operators[onnx_op_type])\n return None, autograd_op",
"def getnodeequation(self, node_p):\n node_p = self.getnodenamed(node_p) # Verify pointer.\n\n # (const node_bn* node)\n cnetica.GetNodeEquation_bn.argtypes = [c_void_p]\n cnetica.GetNodeEquation_bn.restype = c_char_p\n return cnetica.GetNodeEquation_bn(node_p) # equation",
"def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper",
"def reciprocal(\n self,\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n attr_path_and_name = \"syft.core.tensor.tensor.Tensor.reciprocal\"\n\n min_vals = self.min_vals.copy()\n min_vals.data = np.array(1 / min_vals.data)\n max_vals = self.max_vals.copy()\n max_vals.data = np.array(1 / max_vals.data)\n\n result = TensorWrappedPhiTensorPointer(\n data_subjects=self.data_subjects,\n min_vals=min_vals,\n max_vals=max_vals,\n client=self.client,\n )\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args,\n kwargs=downcast_kwargs,\n client=self.client,\n gc_enabled=False,\n )\n\n cmd = RunClassMethodAction(\n path=attr_path_and_name,\n _self=self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n id_at_location=result_id_at_location,\n address=self.client.address,\n )\n self.client.send_immediate_msg_without_reply(msg=cmd)\n\n inherit_tags(\n attr_path_and_name=attr_path_and_name,\n result=result,\n self_obj=self,\n args=[],\n kwargs={},\n )\n\n result.public_shape = self.public_shape\n result.public_dtype = self.public_dtype\n\n return result",
"def to_instruction(self):\n return self.to_circuit().to_gate()",
"def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)",
"def operator_rhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator.adjoint(inp)",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def __call__(cls, *args: Union['Node', Mapping[str, 'Node']], **kwargs: Any):\n args = cls._check_and_transform_args(args)\n cls._check_kwargs(kwargs)\n return OpNode(\n op_type=cls,\n args=args,\n output_data_type=cls._return_data_type,\n kwargs=kwargs)",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]"
] | [
"0.5679397",
"0.56778276",
"0.56740934",
"0.56724566",
"0.56111914",
"0.5536893",
"0.54996645",
"0.5413009",
"0.5394044",
"0.53590983",
"0.5352823",
"0.53090364",
"0.53014636",
"0.5288799",
"0.525179",
"0.52360773",
"0.5220722",
"0.5163405",
"0.5150799",
"0.51470494",
"0.5146389",
"0.51118875",
"0.5096147",
"0.50910693",
"0.5039488",
"0.49940175",
"0.49925002",
"0.49885422",
"0.49708742",
"0.49689898"
] | 0.689847 | 0 |
Map MXNet's _power operator attributes to onnx's Pow operator and return the created node. | def convert_power(node, **kwargs):
return create_basic_op_node('Pow', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor = _expr.const(factor, dtype=dtype)\n out = _op.power(x, factor)\n g.add_node(op.output(\"Out\")[0], out)",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError, 'expecting a float'\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = quantitative(name, func=self, transform=lambda x: N.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def __pow__(self, exponent):\n return type(self)(self.parent(),\n self._simplify(pow(self._express, exponent)))",
"def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self",
"def Attribute_Power(self, name):\n return Roll20.Attribute(self._power +\"-\" +name);",
"def get_setPower(self):\n self.read(\":POW?\")",
"def convert_pow_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Pow', **kwargs)",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError('expecting a float')\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = Quantitative(name, func=self, transform=lambda x: np.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def get_power_state(self, node):",
"def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)",
"def power(self) -> interface.Power:\n return cast(interface.Power, self._interfaces[interface.Power])",
"def power(self,p):\r\n\t\t\r\n\t\t# raise to power\r\n\t\tr,o = Li._expand(self,p)\r\n\t\t\r\n\t\treturn Li(r)",
"def py_pow(x, p, op_version=None):\n return x ** p",
"def get_power(self):\r\n return self.p",
"def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p",
"def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )",
"def get_power(self):\r\n x = self.query('POW?')\r\n if x == None: return None\r\n return float(x)",
"def pow(self, a: 'PFElement', n: int) -> 'PFElement':\n res = power(a, n)\n if not isinstance(res, PFElement):\n return self.element(res)\n else:\n return res",
"def power(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"power\")",
"def set_power(self, value):\n self.write(\":POW {}W\".format(value))",
"def __pow__(self, power):\n if power == 1:\n return self\n elif power == 0:\n return Polynomial(1)\n\n self.polynomials = {key: val for key, val in self.polynomials.items() if val != 0}\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n\n attributes = {}\n\n # Using Binomial theorem\n n = 0\n m = power\n use_n = True\n\n for k in range(0, power + 1):\n result = self.calculate_combinatorial_number(power, k)\n\n for index, polynomial in self.polynomials.items():\n if use_n:\n result *= pow(polynomial, (power - n))\n n += 1\n use_n = False\n else:\n result *= pow(polynomial, (power + m))\n m -= 1\n use_n = True\n\n attributes[\"x\" + str(n - 1)] = result\n\n return Polynomial(**attributes)",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def power(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"power\")",
"def get_power(self) -> float:\n\n #:READ[n][:CHANnel[m]][:SCALar]: POWer[:DC]?\n return float(self._inst.query(\":READ:POW?\"))",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))",
"def set_power_state(self, node, power_state):",
"def __pow__(self, power):\n value = power * (self.val) ** (power - 1)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiffReverse(self.val ** power, None, der)",
"def collect_powers(operator):\n new_tensors = []\n symbols = {}\n for tensor in operator.tensors:\n if tensor.is_field or tensor.name[0] == \"$\" or tensor.exponent is None:\n new_tensors.append(tensor)\n else:\n # Previusly collected exponent for same base and indices\n prev_exponent = symbols.get((tensor.name, tuple(tensor.indices)), 0)\n \n # The exponents of a product are added\n symbols[(tensor.name, tuple(tensor.indices))] = (\n tensor.exponent + prev_exponent)\n\n # Remove tensors with exponent 0\n new_op = Operator([])\n for (name, inds), exponent in symbols.items():\n if exponent != 0:\n new_op *= power_op(name, exponent, indices=inds)\n \n return new_op * Op(*new_tensors)"
] | [
"0.75909674",
"0.7083156",
"0.66979545",
"0.64450526",
"0.6441693",
"0.64366263",
"0.6400053",
"0.6366476",
"0.63472486",
"0.62409025",
"0.622891",
"0.6202606",
"0.60667837",
"0.6045029",
"0.6029663",
"0.6026515",
"0.60182786",
"0.59912276",
"0.5968232",
"0.5967135",
"0.5940714",
"0.5935297",
"0.5896266",
"0.58940005",
"0.5887795",
"0.58614534",
"0.58528113",
"0.584922",
"0.5817882",
"0.5812902"
] | 0.79412013 | 0 |
Map MXNet's _power operator attributes to onnx's Pow operator and return the created node. | def convert_broadcast_power(node, **kwargs):
return create_basic_op_node('Pow', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor = _expr.const(factor, dtype=dtype)\n out = _op.power(x, factor)\n g.add_node(op.output(\"Out\")[0], out)",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError, 'expecting a float'\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = quantitative(name, func=self, transform=lambda x: N.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def __pow__(self, exponent):\n return type(self)(self.parent(),\n self._simplify(pow(self._express, exponent)))",
"def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self",
"def Attribute_Power(self, name):\n return Roll20.Attribute(self._power +\"-\" +name);",
"def get_setPower(self):\n self.read(\":POW?\")",
"def convert_pow_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Pow', **kwargs)",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError('expecting a float')\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = Quantitative(name, func=self, transform=lambda x: np.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def get_power_state(self, node):",
"def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)",
"def power(self) -> interface.Power:\n return cast(interface.Power, self._interfaces[interface.Power])",
"def power(self,p):\r\n\t\t\r\n\t\t# raise to power\r\n\t\tr,o = Li._expand(self,p)\r\n\t\t\r\n\t\treturn Li(r)",
"def py_pow(x, p, op_version=None):\n return x ** p",
"def get_power(self):\r\n return self.p",
"def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p",
"def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )",
"def get_power(self):\r\n x = self.query('POW?')\r\n if x == None: return None\r\n return float(x)",
"def pow(self, a: 'PFElement', n: int) -> 'PFElement':\n res = power(a, n)\n if not isinstance(res, PFElement):\n return self.element(res)\n else:\n return res",
"def power(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"power\")",
"def set_power(self, value):\n self.write(\":POW {}W\".format(value))",
"def __pow__(self, power):\n if power == 1:\n return self\n elif power == 0:\n return Polynomial(1)\n\n self.polynomials = {key: val for key, val in self.polynomials.items() if val != 0}\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n\n attributes = {}\n\n # Using Binomial theorem\n n = 0\n m = power\n use_n = True\n\n for k in range(0, power + 1):\n result = self.calculate_combinatorial_number(power, k)\n\n for index, polynomial in self.polynomials.items():\n if use_n:\n result *= pow(polynomial, (power - n))\n n += 1\n use_n = False\n else:\n result *= pow(polynomial, (power + m))\n m -= 1\n use_n = True\n\n attributes[\"x\" + str(n - 1)] = result\n\n return Polynomial(**attributes)",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def power(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"power\")",
"def get_power(self) -> float:\n\n #:READ[n][:CHANnel[m]][:SCALar]: POWer[:DC]?\n return float(self._inst.query(\":READ:POW?\"))",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))",
"def set_power_state(self, node, power_state):",
"def __pow__(self, power):\n value = power * (self.val) ** (power - 1)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiffReverse(self.val ** power, None, der)",
"def collect_powers(operator):\n new_tensors = []\n symbols = {}\n for tensor in operator.tensors:\n if tensor.is_field or tensor.name[0] == \"$\" or tensor.exponent is None:\n new_tensors.append(tensor)\n else:\n # Previusly collected exponent for same base and indices\n prev_exponent = symbols.get((tensor.name, tuple(tensor.indices)), 0)\n \n # The exponents of a product are added\n symbols[(tensor.name, tuple(tensor.indices))] = (\n tensor.exponent + prev_exponent)\n\n # Remove tensors with exponent 0\n new_op = Operator([])\n for (name, inds), exponent in symbols.items():\n if exponent != 0:\n new_op *= power_op(name, exponent, indices=inds)\n \n return new_op * Op(*new_tensors)"
] | [
"0.7941838",
"0.7083369",
"0.66985244",
"0.64452976",
"0.64437544",
"0.6436942",
"0.64016074",
"0.63671076",
"0.63479775",
"0.6240992",
"0.62295127",
"0.6203682",
"0.6067832",
"0.6046127",
"0.60306716",
"0.60275626",
"0.6018355",
"0.5992741",
"0.5969637",
"0.5969036",
"0.59422266",
"0.593527",
"0.58969915",
"0.58959925",
"0.58885825",
"0.5862478",
"0.5852516",
"0.58501655",
"0.58178186",
"0.58129257"
] | 0.7592172 | 1 |
Map MXNet's sqrt operator attributes to onnx's Sqrt operator and return the created node. | def convert_sqrt(node, **kwargs):
return create_basic_op_node('Sqrt', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Sqrt(%s)\" % (node_A.name)\r\n return new_node",
"def sqrt(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sqrt()))",
"def sqrtw():\n return Operator([[(1.+1.j)/2,-1.j/np.sqrt(2)],[1./np.sqrt(2),(1.+1.j)/2]])",
"def sqrt(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.sqrt())",
"def sqrt(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.sqrt(), diag_shape=self.diag_shape)",
"def sqrt(tensor):\n raise NotImplementedError",
"def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)",
"def sqrt(obj):\n\tif isinstance(obj, Variable):\n \t\tnew_Variable = Variable(obj.val, obj.der)\n \t\treturn new_Variable.__pow__(0.5)\n\telse:\n\t\treturn np.sqrt(obj)",
"def convert_square(node, **kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n initializer = kwargs[\"initializer\"]\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]\n\n power2_name = \"square_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,))\n initializer.append(\n onnx.helper.make_tensor(\n name=power2_name,\n data_type=data_type,\n dims=(1,),\n vals=[2],\n raw=False,\n )\n )\n\n input_nodes.append(power2_name)\n\n node = onnx.helper.make_node(\n \"Pow\",\n input_nodes,\n [name],\n name=name\n )\n return [tensor_node, node]",
"def scalar_sqrt(self, dst, src):\n return self._scalar_single_func('sqrt', dst, src)",
"def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])",
"def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])",
"def sqrt(self, a):\n raise NotImplementedError",
"def sqrt(self):\n # There might be a conversion factor from taking the square root of the unit\n new_value = math.sqrt(self._value)\n new_unit = self.unit.sqrt()\n unit_factor = self.unit.conversion_factor_to(new_unit*new_unit)\n if unit_factor != 1.0:\n new_value *= math.sqrt(unit_factor)\n return Quantity(value=new_value, unit=new_unit)",
"def from_root(\n cls, root: \"ConstantDiagonalLinearOperator\"\n ) -> \"ConstantDiagonalLinearOperator\":\n return ConstantDiagonalLinearOperator(value=root.value**2, size=root.size)",
"def sqrt(x):\r\n # see decorator for function body\r",
"def sqrt(self):\n a = self.pop()\n c= math.sqrt(a)\n self.push(c)",
"def sqrt(a):",
"def createMath(self, *args):\n return _libsbml.ASTBasePlugin_createMath(self, *args)",
"def my_sqrt(x):\n square_root = x**(0.5)\n return square_root",
"def sqrt(data):\n return _make.sqrt(data)",
"def _create_constantOfShape(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n tensor_type = onnx.TensorProto.FLOAT if isinstance(\n op.value, float) else onnx.TensorProto.INT32\n tensor_value = onnx.helper.make_tensor(\"value\", tensor_type, [1],\n [op.value])\n node.attribute.extend([\n helper.make_attribute('value', tensor_value),\n ])\n return node",
"def sqrt(x):\n return 0.0",
"def test_sym_sqrtm(self): \n # create random symmetric n x n matrix\n n = 5\n A = 5.0 * 2.0*(torch.rand(n,n) - 0.5)\n A = A + A.T\n\n # reference implementation of scipy\n sqA_scipy = sla.sqrtm(A.numpy())\n isqA_scipy = sla.inv(sla.sqrtm(A.numpy()))\n # my own implementation using pure torch functions\n sqA,isqA = (x.numpy() for x in _sym_sqrtm(A))\n \n self.assertTrue(np.isclose(sqA, sqA_scipy).all())\n self.assertTrue(np.isclose(isqA, isqA_scipy).all())",
"def sqrt(n):\n pass",
"def get_bprop_sqrt(self):\n mul_func = P.Mul()\n fill_func = P.Fill()\n div_op = P.RealDiv()\n sqrt = P.Sqrt()\n dtype = P.DType()\n\n def bprop(x, out, dout):\n temp = div_op(fill_func(dtype(x), shape_op(x), 0.5), sqrt(x))\n dx = mul_func(dout, temp)\n return (dx,)\n return bprop",
"def _do_sqrt(x, prec=None, extend=True, all=False):\n if prec:\n if x >= 0:\n return RealField(prec)(x).sqrt(all=all)\n else:\n return ComplexField(prec)(x).sqrt(all=all)\n if x == -1:\n from sage.symbolic.pynac import I\n z = I\n else:\n z = SR(x) ** one_half\n\n if all:\n if z:\n return [z, -z]\n else:\n return [z]\n return z",
"def test_function_sqrt(self):\r\n self.assertEquals(preview.latex_preview('sqrt(3)'), r'\\sqrt{3}')",
"def radius(x) :\r\n return Feature(x, \"radius\")",
"def convert_square(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n out = _op.power(x, _expr.const(2, dtype))\n g.add_node(op.output(\"Out\")[0], out)"
] | [
"0.65579957",
"0.6451186",
"0.6441741",
"0.6321632",
"0.61004037",
"0.5692846",
"0.56454605",
"0.561679",
"0.55741465",
"0.5483834",
"0.5446249",
"0.5332103",
"0.527836",
"0.52485776",
"0.5248409",
"0.5226648",
"0.5189378",
"0.5185982",
"0.51702994",
"0.5157216",
"0.513852",
"0.5092477",
"0.5050442",
"0.50421405",
"0.5002616",
"0.49981633",
"0.49572638",
"0.49498913",
"0.49390507",
"0.49287987"
] | 0.7393713 | 0 |
Map MXNet's depth_to_space operator attributes to onnx's DepthToSpace operator and return the created node. | def convert_depthtospace(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
blksize = int(attrs.get("block_size", 0))
node = onnx.helper.make_node(
"DepthToSpace",
input_nodes,
[name],
blocksize=blksize,
name=name,
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_spacetodepth(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n blksize = int(attrs.get(\"block_size\", 0))\n\n node = onnx.helper.make_node(\n \"SpaceToDepth\",\n input_nodes,\n [name],\n blocksize=blksize,\n name=name,\n )\n return [node]",
"def convert_depthwise_conv2d(self, op):\n return self.convert_conv(op, \"depthwise\")",
"def get_space_attr(space, attr='shape'):\n assert isinstance(space, gym.Space)\n if hasattr(space, 'spaces'):\n return tuple(get_space_attr(s, attr=attr) for s in space.spaces)\n else:\n value = getattr(space, attr)\n # If this value is seen as nested (i.e. a tuple with shape), make it\n # an array so that it is seen as a single object by tf.nest\n if tf.nest.is_nested(value):\n value = np.array(value) \n return value",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _depth_to_segment(self, depth):\r\n segment = depth.clone()\r\n segment[segment > 0] = 1\r\n return segment",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def convertDepthtomm(self,depth):\n\n depth = 2.968*10**-05*depth+0.02079*depth+0.5146\n \n return depth",
"def set_depth(node, depth):\n setattr(node[0], \"depth\", depth)",
"def convert_pad(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mxnet_pad_width = convert_string_to_list(attrs.get(\"pad_width\"))\n onnx_pad_width = transform_padding(mxnet_pad_width)\n\n pad_mode = attrs.get(\"mode\")\n\n if pad_mode == \"constant\":\n pad_value = float(attrs.get(\"constant_value\")) \\\n if \"constant_value\" in attrs else 0.0\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode='constant',\n value=pad_value,\n pads=onnx_pad_width,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode=pad_mode,\n pads=onnx_pad_width,\n name=name\n )\n\n return [node]",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def space(dim, dim2, dim3):\n space = Space()\n space.register(dim)\n space.register(dim2)\n space.register(dim3)\n return space",
"def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths",
"def convert(token, depth=1):\n\n # finds the root token\n if token.kind == 'EQUALS':\n # asssign left Token as output pin\n new_node = Node(token.left, pin=True, root=True)\n\n # recursively go through new_node to find children\n new_child_node = convert(token.right, depth + 1)\n new_node.add(new_child_node)\n\n # must be an input pin\n elif token.kind == 'ID' or token.kind == 'LITERAL':\n new_node = Node(token, pin=True, weight=1)\n\n # determines depth of tree\n self.depth = depth if depth > self.depth else self.depth\n\n # goes through tokens that are not pins or the root\n else:\n new_node = Node(token, gate=True)\n\n # recursively checks for right Tokens\n if token.right:\n new_child_node = convert(token.right, depth + 1)\n new_node.children += [new_child_node]\n\n # recursively checks for left Tokens\n if token.left:\n\n # OPTIMIZE PART\n # left child Token might be the same kind as root Token\n # if so, don't add the child Token, just add its children\n if token.left.kind == token.kind:\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # checks if left child is a gate and applies not function\n elif new_node.kind == 'not' and token.left.terminal:\n if token.left.kind[0].lower() == 'n':\n new_node.kind = token.left.kind[1:].lower()\n else:\n new_node.kind = 'n' + token.left.kind.lower()\n\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # no optimizing to be done\n else:\n new_child_node = convert(token.left, depth + 1)\n new_node.children += [new_child_node]\n\n new_node.calculate_weight()\n return new_node",
"def setDepth(self, *args):\n return _libsbml.Dimensions_setDepth(self, *args)",
"def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics):\n\tX = (pixel_x - camera_intrinsics.ppx)/camera_intrinsics.fx *depth\n\tY = (pixel_y - camera_intrinsics.ppy)/camera_intrinsics.fy *depth\n\treturn X, Y, depth",
"def _convert_to_depth(self, dist):\n return self.dist_to_bottom - dist",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics):\r\n\tX = (pixel_x - camera_intrinsics.ppx)/camera_intrinsics.fx *depth\r\n\tY = (pixel_y - camera_intrinsics.ppy)/camera_intrinsics.fy *depth\r\n\treturn X, Y, depth",
"def to_pyrado_space(space) -> [BoxSpace, EmptySpace]:\n if space is None:\n return EmptySpace\n return BoxSpace(space.min, space.max, labels=space.names)",
"def get_space(self, name, outer_space):\n if name not in self._register:\n self._register[name] = Space(name=name, outer_space=outer_space)\n return self._register[name]",
"def coords_to_node(self,row,col):\n return row*self.cols + col + 1",
"def make(self):\n return make_operation_space()",
"def _convert_geometry_to_port(label, layer = 0):\n name, width, orientation = json.loads(label.text)\n new_port = Port(name = name, width = width, orientation = orientation)\n new_port.midpoint = label.position - _calculate_label_offset(new_port)\n return new_port",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def _cell_to_global(self, xy, wh):\n # grid setup\n line = tf.range(0, self.num_cells)\n rows = tf.reshape(line, [self.num_cells, 1])\n rows = tf.tile(rows, [1, self.num_cells])\n cols = tf.reshape(line, [1, self.num_cells])\n cols = tf.tile(cols, [self.num_cells, 1])\n grid = tf.stack([cols, rows], axis=-1)\n grid = tf.reshape(grid, [1, self.num_cells, self.num_cells, 1, 2])\n grid = tf.cast(grid, tf.float32)\n # box transformation\n xy += grid\n wh *= tf.reshape(self.anchors, [1, 1, 1, self.num_anchors, 2])\n return tf.concat([xy, wh], axis=-1) / self.num_cells",
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def expand_var(nc, out, name, direction):\n if name == direction:\n return\n\n var1 = nc.variables[name]\n\n print(\"Processing %s...\" % name)\n\n # Copy coordinate variables and stop:\n if name in ['t', 'z', 'y', 'x', 'zb']:\n var2 = out.createVariable(name, var1.dtype, (name,))\n var2[:] = var1[:]\n copy_attributes(var1, var2)\n return\n\n dims = var1.dimensions\n if len(dims) == 1:\n dims = ('y', 'x')\n elif len(dims) == 2:\n dims = ('t', 'y', 'x')\n elif len(dims) == 3:\n if name == \"litho_temp\": # litho_temp is the only variable depending on 'zb'.\n dims = ('t', 'zb', 'y', 'x')\n else:\n dims = ('t', 'z', 'y', 'x')\n\n var2 = out.createVariable(name, var1.dtype, dims)\n copy_attributes(var1, var2)\n\n for j in range(3):\n if direction == 'x':\n var2[get_slice(var2.dimensions, x=j)] = permute(var1)\n elif direction == 'y':\n var2[get_slice(var2.dimensions, y=j)] = permute(var1)"
] | [
"0.654902",
"0.5397657",
"0.5092263",
"0.5052654",
"0.49498823",
"0.49392277",
"0.4938975",
"0.48952127",
"0.48248088",
"0.48029906",
"0.47768003",
"0.4760767",
"0.47406405",
"0.46349868",
"0.4629043",
"0.45984888",
"0.45853606",
"0.45547014",
"0.45455354",
"0.4535628",
"0.45154038",
"0.44973174",
"0.44949403",
"0.4482171",
"0.447909",
"0.4427471",
"0.44245192",
"0.44134894",
"0.44089794",
"0.4400974"
] | 0.71486926 | 0 |
Map MXNet's space_to_depth operator attributes to onnx's SpaceToDepth operator and return the created node. | def convert_spacetodepth(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
blksize = int(attrs.get("block_size", 0))
node = onnx.helper.make_node(
"SpaceToDepth",
input_nodes,
[name],
blocksize=blksize,
name=name,
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_depthtospace(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n blksize = int(attrs.get(\"block_size\", 0))\n\n node = onnx.helper.make_node(\n \"DepthToSpace\",\n input_nodes,\n [name],\n blocksize=blksize,\n name=name,\n )\n return [node]",
"def convert_depthwise_conv2d(self, op):\n return self.convert_conv(op, \"depthwise\")",
"def convertDepthtomm(self,depth):\n\n depth = 2.968*10**-05*depth+0.02079*depth+0.5146\n \n return depth",
"def set_depth(node, depth):\n setattr(node[0], \"depth\", depth)",
"def get_space_attr(space, attr='shape'):\n assert isinstance(space, gym.Space)\n if hasattr(space, 'spaces'):\n return tuple(get_space_attr(s, attr=attr) for s in space.spaces)\n else:\n value = getattr(space, attr)\n # If this value is seen as nested (i.e. a tuple with shape), make it\n # an array so that it is seen as a single object by tf.nest\n if tf.nest.is_nested(value):\n value = np.array(value) \n return value",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def convert_pad(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mxnet_pad_width = convert_string_to_list(attrs.get(\"pad_width\"))\n onnx_pad_width = transform_padding(mxnet_pad_width)\n\n pad_mode = attrs.get(\"mode\")\n\n if pad_mode == \"constant\":\n pad_value = float(attrs.get(\"constant_value\")) \\\n if \"constant_value\" in attrs else 0.0\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode='constant',\n value=pad_value,\n pads=onnx_pad_width,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=input_nodes,\n outputs=[name],\n mode=pad_mode,\n pads=onnx_pad_width,\n name=name\n )\n\n return [node]",
"def _convert_to_depth(self, dist):\n return self.dist_to_bottom - dist",
"def convert(token, depth=1):\n\n # finds the root token\n if token.kind == 'EQUALS':\n # asssign left Token as output pin\n new_node = Node(token.left, pin=True, root=True)\n\n # recursively go through new_node to find children\n new_child_node = convert(token.right, depth + 1)\n new_node.add(new_child_node)\n\n # must be an input pin\n elif token.kind == 'ID' or token.kind == 'LITERAL':\n new_node = Node(token, pin=True, weight=1)\n\n # determines depth of tree\n self.depth = depth if depth > self.depth else self.depth\n\n # goes through tokens that are not pins or the root\n else:\n new_node = Node(token, gate=True)\n\n # recursively checks for right Tokens\n if token.right:\n new_child_node = convert(token.right, depth + 1)\n new_node.children += [new_child_node]\n\n # recursively checks for left Tokens\n if token.left:\n\n # OPTIMIZE PART\n # left child Token might be the same kind as root Token\n # if so, don't add the child Token, just add its children\n if token.left.kind == token.kind:\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # checks if left child is a gate and applies not function\n elif new_node.kind == 'not' and token.left.terminal:\n if token.left.kind[0].lower() == 'n':\n new_node.kind = token.left.kind[1:].lower()\n else:\n new_node.kind = 'n' + token.left.kind.lower()\n\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # no optimizing to be done\n else:\n new_child_node = convert(token.left, depth + 1)\n new_node.children += [new_child_node]\n\n new_node.calculate_weight()\n return new_node",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def depth_from_nemo_tag(function):\n def wrap(start, values):\n # print 'Depth %d | %d %s' %(self._depth, start, values)\n self._depth = start\n tokens = values[1]\n self._current_node = function(tokens)\n #print self._current_node\n return ''\n\n return wrap",
"def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor",
"def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics):\n\tX = (pixel_x - camera_intrinsics.ppx)/camera_intrinsics.fx *depth\n\tY = (pixel_y - camera_intrinsics.ppy)/camera_intrinsics.fy *depth\n\treturn X, Y, depth",
"def setDepth(self, *args):\n return _libsbml.Dimensions_setDepth(self, *args)",
"def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics):\r\n\tX = (pixel_x - camera_intrinsics.ppx)/camera_intrinsics.fx *depth\r\n\tY = (pixel_y - camera_intrinsics.ppy)/camera_intrinsics.fy *depth\r\n\treturn X, Y, depth",
"def ws_depth(self, symbol):\n return self.ws_request('%s@depth' % (symbol.lower()))",
"def _depth_to_segment(self, depth):\r\n segment = depth.clone()\r\n segment[segment > 0] = 1\r\n return segment",
"def depth_from_match(function):\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #print self._current_node\n self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap",
"def _get_node_attr(self, node, attr):\n return self.metrics[attr].ix[node]",
"def _new_depth(self, node, curr_depth):\n right = curr_depth\n left = curr_depth\n if node._rkid:\n right = self._new_depth(node._rkid, curr_depth + 1)\n if node._lkid:\n left = self._new_depth(node._lkid, curr_depth + 1)\n if right > left:\n return right\n return left",
"def setDepth(self, *args):\n return _CompuCell.Potts3D_setDepth(self, *args)",
"def GetDepth(*args, **kwargs):\n return _gdi_.DC_GetDepth(*args, **kwargs)",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def __init__(self, affinity, game_type, game_space, search_depth, opponent=None):\n\n super().__init__(affinity, game_type, game_space, opponent)\n self.__search_depth = search_depth\n self.nodes_expanded = 0",
"def __init__(self, affinity, game_type, game_space, search_depth, opponent=None):\n\n super().__init__(affinity, game_type, game_space, opponent)\n self.__search_depth = search_depth\n self.nodes_expanded = 0",
"def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0",
"def on_depth_image(self, depth_image):\n depth_image = depth_image.copy()\n mask = np.where(self.depth != 0)\n depth_image[mask] = self.depth[mask]\n return depth_image",
"def convert_size(node, **kwargs):\n return create_basic_op_node('Size', node, kwargs)"
] | [
"0.6864339",
"0.5582542",
"0.5029747",
"0.5017303",
"0.49569693",
"0.49251297",
"0.4901449",
"0.4830856",
"0.48081103",
"0.48042098",
"0.480372",
"0.4798086",
"0.4687717",
"0.46674845",
"0.4647743",
"0.4614791",
"0.45717818",
"0.455431",
"0.45452812",
"0.45188713",
"0.45099762",
"0.447949",
"0.4470821",
"0.44578215",
"0.44239184",
"0.44088483",
"0.44088483",
"0.4400864",
"0.43928477",
"0.43829906"
] | 0.6642687 | 1 |
Map MXNet's sum operator attributes to onnx's ReduceSum operator and return the created node. | def convert_sum(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes:
node = onnx.helper.make_node(
'ReduceSum',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
else:
node = onnx.helper.make_node(
'ReduceSum',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())",
"def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.array(np.sum(input_vals[0], node.const_attr))\r\n else:\r\n #print(np.sum(input_vals[0]))\r\n return np.array(np.sum(input_vals[0]))",
"def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)",
"def _create_reduceOp(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\", None)\n keepdims = onnx_node.getattr(\"keepdims\", 1)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes, keepdims)",
"def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)",
"def reduce(self, app, nodes, result):",
"def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)",
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)",
"def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)",
"def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)",
"def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)",
"def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)",
"def reduce_sum_as_einsum(x: JaxExpression, params: Params) -> Einsum:\n axis = params['axes']\n x_shape = x.shape\n x_dims = ''.join(it.islice(einsum.einsum_letters(), len(x_shape)))\n out_dims = ''.join([x_dims[i] for i in range(len(x_shape)) if i not in axis])\n formula = f'{x_dims}->{out_dims}'\n return Einsum(formula, (x,))",
"def get_sum(self):\n return self.__tree[0]",
"def sum(self):\n return self.aggregate(np.sum)",
"def get_bprop_reducesum(self):\n\n def bprop(x, axis, out, dout):\n dx = _sum_grad(x, axis, dout)\n return dx, zeros_like(axis)\n return bprop",
"def reduce(self, binary_operator):\n return functools.reduce(binary_operator, self)",
"def sum(data, **kwargs):\n return Component(\n \"Sum\",\n arguments={\n 'data': Component.of(data)\n },\n options={\n \n },\n constraints=kwargs)",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)",
"def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)",
"def convert_reduce(g, op, block):\n\n op_map = {\n \"reduce_all\": \"all\",\n \"reduce_any\": \"any\",\n \"reduce_max\": \"max\",\n \"reduce_min\": \"min\",\n \"reduce_prod\": \"prod\",\n \"reduce_sum\": \"sum\",\n \"reduce_mean\": \"mean\",\n }\n op_name = op_map[op.type]\n input_x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"dim\")\n if op.attr(\"reduce_all\"):\n axis = None\n keepdims = op.attr(\"keep_dim\")\n out = get_relay_op(op_name)(input_x, axis=axis, keepdims=keepdims)\n if not axis and not keepdims:\n # use `expand_dims` to solve the following situation\n # for TVM, the shape of `out` will be (, )\n # for Paddle, the shape of `out` will be [1]\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)"
] | [
"0.6900051",
"0.6317507",
"0.6197959",
"0.61500716",
"0.60951954",
"0.60030466",
"0.5988547",
"0.5975774",
"0.5914605",
"0.58477676",
"0.5730659",
"0.5723871",
"0.56718594",
"0.5667977",
"0.5665367",
"0.5665367",
"0.5665367",
"0.5665367",
"0.5665367",
"0.56367636",
"0.5594987",
"0.5593025",
"0.5589772",
"0.5580705",
"0.55360574",
"0.5527095",
"0.5525056",
"0.5489413",
"0.5489413",
"0.54450876"
] | 0.7541393 | 0 |
Map MXNet's shape_array operator attributes to onnx's Shape operator and return the created node. | def convert_shape(node, **kwargs):
return create_basic_op_node('Shape', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ashape(node):\n shp = node.shape\n assert shp is not None\n return shp",
"def create_helper_shape_node(input_name, output_name):\n shape_node = onnx.helper.make_node(\n \"Shape\",\n inputs=[input_name],\n outputs=[output_name],\n name=output_name,\n )\n return [shape_node]",
"def add_shape(self, input_name, attr=None, name=None):\n if attr is None:\n attr = {}\n return self._build_op('Shape', [input_name], attr=attr, name=name)",
"def _create_reshape(cls, op, op_t):\n # make the shape node\n # because the reshape in singa does not provide its shape as input tensor\n shape_node_name = op.name + \":shape\"\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n node.input.extend([shape_node_name])\n return node",
"def processed_shape(self, shape):\n return shape",
"def __get_shape(\n op_str: str,\n x_shape: Tuple[int],\n y_shape: Tuple[int],\n ) -> Tuple[int]:\n op = getattr(operator, op_str)\n res = op(np.empty(x_shape), np.empty(y_shape)).shape\n cast(Tuple[int], res)\n return tuple(res) # type: ignore",
"def add_shape(self, input_name, name=None):\n return self._build_op('Shape', [input_name], name=name)",
"def _create_constantOfShape(cls, onnx_node, inputs, opset_version):\n value = onnx_node.getattr(\"value\", 0)\n if isinstance(value, onnx.TensorProto):\n value = numpy_helper.to_array(value)[0].item()\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(value)",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _create_constantOfShape(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n tensor_type = onnx.TensorProto.FLOAT if isinstance(\n op.value, float) else onnx.TensorProto.INT32\n tensor_value = onnx.helper.make_tensor(\"value\", tensor_type, [1],\n [op.value])\n node.attribute.extend([\n helper.make_attribute('value', tensor_value),\n ])\n return node",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def shape(self) -> Shape:",
"def convert_shape(g, op, block):\n\n x = g.get_node(op.input(\"Input\")[0])\n out = shape_of(x, dtype=\"int32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def _special_handle_reshape(cls, op, X, W):\n node_name = op.name + \":shape\"\n return [\n numpy_helper.from_array(np.array(op.shape, dtype=np.int64),\n node_name)\n ]",
"async def infer_shape_shape(track, ary):\n shp = await ary['shape']\n return TupleShape((NOSHAPE,) * len(shp))",
"def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()",
"def __str__(self):\n return \"shape[]\"",
"def amplify_2d_shape(shape, x_amplify, y_amplify):",
"def shape(self):\n return self._shape",
"def shapes(self):\n return [load_node(item) for item in self.get_attribute('shapes')]",
"def ShapeFrom(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_ShapeFrom(self, *args)",
"def add_input_and_output_shape(self, input_shape, output_shape):",
"def get_output_shape(self):\n return self.incoming_shapes[0][:-1] + [sum([s[-1] for s in self.incoming_shapes])]",
"def __call__(self, shape):\n return np.ones(shape)",
"def shape(self):",
"def shape(self):",
"def shape(self):\r\n return self._shape",
"def output_shape(self):\n raise NotImplementedError",
"def __init__(self, shape):\n\n self.shape = shape"
] | [
"0.6381112",
"0.6290396",
"0.61286956",
"0.60487986",
"0.6037103",
"0.5871047",
"0.57438403",
"0.5713804",
"0.57093483",
"0.5691644",
"0.5663834",
"0.56613344",
"0.5644378",
"0.5637776",
"0.56361204",
"0.56357217",
"0.562241",
"0.56134427",
"0.56105673",
"0.55175805",
"0.5502873",
"0.54839754",
"0.5451837",
"0.5446488",
"0.54412353",
"0.5428283",
"0.5428283",
"0.5420983",
"0.5419601",
"0.54139894"
] | 0.6979719 | 0 |
Map MXNet's broadcast_lesser operator attributes to onnx's Less operator and return the created node. | def convert_broadcast_lesser(node, **kwargs):
return create_basic_op_node('Less', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)",
"def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )",
"def less(lhs, rhs):\n return _make.less(lhs, rhs)",
"def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)",
"def __lt__(self, *args):\n return _ida_hexrays.operand_locator_t___lt__(self, *args)",
"def less_than(self) -> global___Expression:",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )",
"def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)",
"def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)",
"def test_less_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::le\"},\n )",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def moderator_less(self, moderator_less):\n\n self._moderator_less = moderator_less",
"def __le__(self, *args):\n return _ida_hexrays.operand_locator_t___le__(self, *args)",
"def __lt__(self, other):\n return self.weight() < other.weight()",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def __lt__(self, other):\n return self.weight < other.weight",
"def __le__(self, other):\n return _generate_relational_expression(_le, self, other)",
"def setLesser(self,Node):\n self.lesser=Node",
"def _less_than_op(spec):",
"def __lt__(self, other):\n\t\tselfAttrs = (self.inflatedCost, self.label.winery.name, self.label.name, self.label.vintage)\n\t\totherAttrs = (other.inflatedCost, other.label.winery.name, other.label.name, other.label.vintage)\n\t\treturn selfAttrs < otherAttrs",
"def __lt__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__lt__\")",
"def lt(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<\", __key, __and, kwargs.items())",
"def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def test_less_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::le\"},\n )",
"def __le__(self, other):\n return self.master.abs2phy(pos=other)",
"def __lt__(self, other):\n return self.abs2phy.__lt__(other)",
"def __lt__(self, other):\n return self.abs2phy.__lt__(other)",
"def __lt__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Less(self, other)"
] | [
"0.5858624",
"0.5801406",
"0.5520665",
"0.5515037",
"0.54783213",
"0.5373597",
"0.5251242",
"0.5212264",
"0.520232",
"0.5198165",
"0.5109519",
"0.51010454",
"0.5100173",
"0.5031007",
"0.5028119",
"0.50162864",
"0.49891058",
"0.49844187",
"0.4968322",
"0.49516803",
"0.49433753",
"0.4890774",
"0.48904952",
"0.48613152",
"0.48487303",
"0.48465022",
"0.4835667",
"0.48201647",
"0.48201647",
"0.48183638"
] | 0.85602987 | 0 |
Map MXNet's broadcast_greater operator attributes to onnx's Greater operator and return the created node. | def convert_broadcast_greater(node, **kwargs):
return create_basic_op_node('Greater', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )",
"def greater_than(self) -> global___Expression:",
"def greater(input: Tensor, other: Tensor) -> Tensor:\n ctx = get_current_context()\n g = ctx.graph\n pb_g = g._pb_graph\n\n check_in_graph(g, input=input, other=other)\n check_tensor_ipu_and_tile_set(input=input, other=other)\n\n settings = ctx._get_op_settings(\"greater\")\n opid = _ir.OperatorIdentifier(\"ai.onnx\", \"Greater\", 9, _ir.NumInputs(2, 2), 1)\n op = pb_g.createConnectedOp_GreaterOp(\n {0: input.id, 1: other.id},\n {\n 0: g._create_tensor_id(\"greater_out\"),\n },\n opid,\n settings,\n )\n\n return Tensor._from_pb_tensor(op.outTensor(0))",
"def __gt__(self, other: Any) -> ColumnOperators:\n return self.operate(gt, other)",
"def __gt__(self, *args):\n return _ida_hexrays.operand_locator_t___gt__(self, *args)",
"def test_greater_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::ge\"},\n )",
"def greater(lhs, rhs):\n return _make.greater(lhs, rhs)",
"def setGreater(self,Node):\n self.greater=Node",
"def convert_broadcast_lesser(node, **kwargs):\n return create_basic_op_node('Less', node, kwargs)",
"def _greater_than_op(spec):",
"def create_greater_than_constraint(\n x,\n column_name,\n column_index,\n greater_than,\n upper_bound\n ):\n assert x.columns[column_index] == column_name\n return {\n \"name\": \"{0}_gt_{1}\".format(column_name, greater_than),\n \"type\": \"ineq\",\n \"fun\": lambda x: x[column_index] - greater_than,\n \"init\": lambda x: x.__setitem__(\n column_index, randint(greater_than, upper_bound))\n }",
"def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )",
"def __gt__(self, other):\n self.conds.append((self.name, '>', other))\n return self",
"def greater_than_or_equal(self) -> global___Expression:",
"def __gt__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Greater(self, other)",
"def __gt__(self, *args):\n return _ida_hexrays.cexpr_t___gt__(self, *args)",
"def convert_maximum(node, **kwargs):\n return create_basic_op_node('Max', node, kwargs)",
"def __gt__(self, other):\n return greater(self, other)",
"def gt(self, val):\n\t\treturn GreaterThan(self, val)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def __gt__(self, other):\n return self.weight > other.weight",
"def test_greater_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ge\"},\n )",
"def gt(self, other):\n\n return self._get(\"gt\", other, Bool)",
"def __gt__(self, other):\n return self.greaterThan(other)",
"def greater_equal(lhs, rhs):\n return _make.greater_equal(lhs, rhs)",
"def __gt__(self, *args):\n return _ida_hexrays.var_ref_t___gt__(self, *args)",
"def __gt__(self, *args):\n return _ida_hexrays.cdo_t___gt__(self, *args)",
"def greaterThan(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.GreaterThan)\n newq.setValue(value)\n return newq",
"def get_bprop_greater(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop",
"def __gt__(self, other):\n return self.weight() > other.weight()"
] | [
"0.636679",
"0.6333078",
"0.60892975",
"0.60536104",
"0.5972537",
"0.59382397",
"0.5819084",
"0.57593983",
"0.5737228",
"0.56998545",
"0.56720966",
"0.5599701",
"0.55706614",
"0.55538386",
"0.54705316",
"0.53752804",
"0.53341436",
"0.5307528",
"0.52884525",
"0.5286231",
"0.5279791",
"0.5242114",
"0.5229013",
"0.52283543",
"0.5213314",
"0.5173754",
"0.5144344",
"0.51379853",
"0.5126195",
"0.51186657"
] | 0.8361962 | 0 |
Map MXNet's broadcast_equal operator attributes to onnx's Equal operator and return the created node. | def convert_broadcast_equal(node, **kwargs):
return create_basic_op_node('Equal', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"equal\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::eq\"},\n )",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def _eqz_2PC(self):\n # Create BinarySharedTensors from shares\n x0 = MPCTensor(self.share, src=0, ptype=Ptype.binary)\n x1 = MPCTensor(-self.share, src=1, ptype=Ptype.binary)\n\n # Perform equality testing using binary shares\n x0._tensor = x0._tensor.eq(x1._tensor)\n x0.encoder = self.encoder\n\n # Convert to Arithmetic sharing\n result = x0.to(Ptype.arithmetic, bits=1)\n result.encoder._scale = 1\n\n return result",
"def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)",
"def __eq__(self, other: 'OperatorConfig'):\n operator_name = self.operator_name == other.operator_name\n return (self.params == other.params\n and operator_name)",
"def test_expression_equality(self):\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.id == 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.id==1 with models.Network.id=1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id == 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.id==1 with models.Network.id=2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id != 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.id!=1 with models.Network.id=1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id != 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.id!=1 with models.Network.id=2\")\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with string\n expression = BooleanExpression(\"NORMAL\", models.Network.label == \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_1\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label==\"network_1\" with models.Network.label=\"network_1\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label == \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label==\"network_1\" with models.Network.label=\"network_2\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_1\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label!=\"network_1\" with models.Network.label=\"network_1\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label!=\"network_1\" with models.Network.label=\"network_2\" \"\"\")\n\n # Checks on a specified attribute with operators \"IS\" with string\n expression = BooleanExpression(\"NORMAL\", models.Network.label == None)\n value = expression.evaluate(KeyedTuple([{\"label\": None}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label==None with models.Network.label=None \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label == None)\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label==None with models.Network.label=\"network_2\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != None)\n value = expression.evaluate(KeyedTuple([{\"label\": None}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label!=None with models.Network.label=None \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != None)\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label!=None with models.Network.label=\"network_2\" \"\"\")",
"def convert_broadcast_lesser(node, **kwargs):\n return create_basic_op_node('Less', node, kwargs)",
"def __eq__(self, other):\r\n return (type(self) == type(other) and\r\n other.broadcastable == self.broadcastable)",
"def test_less_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::le\"},\n )",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def __eq__(self, other):\n return self.master.phy2abs(pos=other)",
"def __eq__(self, other):\n return self.master.abs2phy(pos=other)",
"def covert_broadcast_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)",
"def node_assignment(edge_index: nb.int64[:,:],\n edge_label: nb.int64[:],\n n: nb.int64) -> nb.int64[:]:\n # Loop over on edges, reset the group IDs of connected node\n on_edges = edge_index[np.where(edge_label)[0]]\n return union_find(on_edges, n)[0]",
"def _equal_to_op(spec):",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def create_equal_displacement_constraint():\n return EqualDisplacementConstraint()",
"def _common_onnx_node_to_singa_op(cls, onnx_node, inputs, opset_version):\n onnx_op_type = onnx_node.op_type\n assert onnx_op_type in cls._rename_operators, \"not support operator: {}\".format(\n onnx_op_type)\n autograd_op = getattr(autograd, cls._rename_operators[onnx_op_type])\n return None, autograd_op",
"def equals(self, *args):\n return _libsbml.XMLNode_equals(self, *args)",
"def conv2d_broadcastto_op(node_A, node_B):\r\n return Conv2d_BroadcastToOp()(node_A, node_B)",
"def test_not_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::ne\"},\n )",
"def __eq__(self, other):\n # check equality of names and attributes as well as that of the incident Node objects\n return \\\n self.weight == other.get_weight() and \\\n self.attributes.__eq__(other.get_attributes()) and \\\n self.get_incident_nodes().__eq__(other.get_incident_nodes())",
"def test_graphid_operator_eq_and_neq():\n\n for xstr, ystr in itertools.product([\"g1\", \"g2\", \"y7\", \"z123\"], repeat=2):\n x = _ir.GraphId(xstr)\n y = _ir.GraphId(ystr)\n\n if xstr == ystr:\n assert x == y\n assert not (x != y)\n else:\n assert not (x == y)\n assert x != y",
"def __eq__(self, other):\n return ZeroaryOperator.__eq__(self, other) and \\\n self.relation_key == other.relation_key"
] | [
"0.593598",
"0.56330514",
"0.5556243",
"0.53851575",
"0.5349817",
"0.52744114",
"0.5272917",
"0.5226989",
"0.5213488",
"0.51595694",
"0.51184994",
"0.50618356",
"0.5043084",
"0.4965128",
"0.4951298",
"0.49477023",
"0.49306282",
"0.49269903",
"0.49220464",
"0.4916875",
"0.49101403",
"0.49043274",
"0.48984888",
"0.48967156",
"0.48924512",
"0.48827237",
"0.48815414",
"0.48555166",
"0.48449326",
"0.4831808"
] | 0.79504997 | 0 |
Map MXNet's broadcast logical or operator attributes to onnx's Or operator and return the created node. | def convert_broadcast_logical_or(node, **kwargs):
return create_basic_op_node('Or', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)",
"def create_net(self, shape1, shape2, ir_version):\n\n #\n # Create ONNX model\n #\n\n from onnx import helper\n from onnx import TensorProto\n\n input1 = helper.make_tensor_value_info('input1', TensorProto.BOOL, shape1)\n input2 = helper.make_tensor_value_info('input2', TensorProto.BOOL, shape2)\n output = helper.make_tensor_value_info('output', TensorProto.BOOL, shape1)\n\n node_def = helper.make_node(\n 'Or',\n inputs=['input1', 'input2'],\n outputs=['output']\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def],\n 'test_model',\n [input1, input2],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n # Create reference IR net\n\n ref_net = None\n if check_ir_version(10, None, ir_version):\n nodes_attributes = {\n 'input1': {'kind': 'op', 'type': 'Parameter'},\n 'input1_data': {'shape': shape1, 'kind': 'data'},\n 'input2': {'kind': 'op', 'type': 'Parameter'},\n 'input2_data': {'shape': shape2, 'kind': 'data'},\n 'node': {'kind': 'op', 'type': 'LogicalOr'},\n 'node_data': {'shape': shape1, 'kind': 'data'},\n 'result': {'kind': 'op', 'type': 'Result'}\n }\n ref_net = build_graph(nodes_attributes,\n [('input1', 'input1_data'),\n ('input2', 'input2_data'),\n ('input1_data', 'node'),\n ('input2_data', 'node'),\n ('node', 'node_data'),\n ('node_data', 'result')])\n\n return onnx_net, ref_net",
"def convert_binary_logical_op(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)",
"def __or__(self, other: Any) -> Operators:\n return self.operate(or_, other)",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def __or__(self, other):\n return self.fam.c_binop('or', self, other)",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def to_OR(self):\n \n # Create valid dummy variable\n dummy = \"d\"\n i = 0\n while dummy in self.items:\n dummy = \"d\" + str(i)\n i += 1\n new_bids = []\n\n # Add dummy variable to each bid\n for items, value in self.bids:\n new_items = list(items)\n new_items.append(dummy)\n new_bids.append((new_items, value))\n\n # Construct new OR bid\n return OR(new_bids)",
"def __or__(self, obj):\n return self._boolean_operation(obj, operator.__or__)",
"def to_orb(self):\n node_id = int(self.idd)\n node_type = GLOB.gmplsTypes.NODETYPE_UNKNOWN\n if type(self.typee) == str:\n node_type = GLOB.gmplsTypes.NODETYPE_NETWORK\n\n node_orb = GLOB.gmplsTypes.nodeIdent(node_id, node_type)\n return node_orb",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def createOr(self):\n return _libsbml.FbcOr_createOr(self)",
"def get_operator_to_make_TOD(self):\n if len(self) == 1:\n return self.get_operator()\n op = self._get_array_of_operators()\n return BlockRowOperator(op, new_axisin=0)",
"def createOr(self):\n return _libsbml.FbcAnd_createOr(self)",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]",
"def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)",
"def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node",
"def convert_elemwise(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.AddOptions import AddOptions\n from tflite.SubOptions import SubOptions\n from tflite.MulOptions import MulOptions\n from tflite.DivOptions import DivOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) == 2, \"input tensors length should be 2\"\n\n def get_input_nodes(tensor):\n if tensor.tensor_idx in self.tensor_tab:\n # In most cases, we can assume that TOCO fuses elemwise operators\n # with constants - it means both will be tensors.\n return self.tensor_tab[tensor.tensor_idx]\n else:\n # However, in some corner cases, the elemwise operator is not fused,\n # we can receive as constant.\n t_value = self.get_tensor_value(tensor)\n return self.nn_new_const(tensor, t_value)\n\n lhs_nodes = get_input_nodes(input_tensors[0])\n rhs_nodes = get_input_nodes(input_tensors[1])\n\n assert len(lhs_nodes) in [1, 3], \"Nodes list size should be 1 or 3\"\n assert len(lhs_nodes) == len(rhs_nodes), \"Left and right nodes list size should be equal\"\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n # Options (fused_activation_function)\n options = None\n if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:\n op_type = \"Add\"\n options = AddOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:\n op_type = \"Sub\"\n options = SubOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:\n op_type = \"Mul\"\n options = MulOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:\n op_type = \"Div\"\n options = DivOptions()\n\n if options is not None:\n op_options = op.BuiltinOptions()\n options.Init(op_options.Bytes, op_options.Pos)\n fused_activation_fn = options.FusedActivationFunction()\n # if we have activation fn\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Elemwise operators with fused activation are not supported yet.'\n\n out_nodes = self.nn_elemwise(lhs_nodes, rhs_nodes, op_type, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def _create_cast(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n map_dict = {\n tensor.float32: TensorProto.FLOAT, # FLOAT to float32\n tensor.int32: TensorProto.INT32, # INT32 to int32\n }\n node.attribute.extend([\n helper.make_attribute('to', map_dict[op.to]),\n ])\n return node",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def bitwise_or(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_or_op, other)",
"def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node",
"def __or__(self, other):\n return self._operation_or(other)",
"def action(self):\n operator = self.create_operator()\n operator.set_name(self.node_name)\n if not isinstance(operator, zautograd.Variable):\n z_tensor = operator(self.inputs)\n operator.set_weights(self.format_params(self.params))\n else:\n z_tensor = operator\n operator.node.element().set_weights(self.format_params(self.params))\n\n self.all_tensors[self.output] = z_tensor # update the all_tensors\n return z_tensor"
] | [
"0.6557331",
"0.59182656",
"0.58574575",
"0.5785354",
"0.5779746",
"0.5683941",
"0.563225",
"0.5597137",
"0.5566193",
"0.5559112",
"0.55208814",
"0.5506325",
"0.5495919",
"0.5447485",
"0.54401416",
"0.54356146",
"0.54324657",
"0.54322755",
"0.5418937",
"0.54083866",
"0.5404923",
"0.5402701",
"0.5395586",
"0.5386377",
"0.537473",
"0.53640723",
"0.53561425",
"0.5349764",
"0.53352624",
"0.53257203"
] | 0.76808363 | 0 |
Map MXNet's broadcast logical xor operator attributes to onnx's Xor operator and return the created node. | def convert_broadcast_logical_xor(node, **kwargs):
return create_basic_op_node('Xor', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_broadcast_logical_or(node, **kwargs):\n return create_basic_op_node('Or', node, kwargs)",
"def xor(self, *args):\n return Xor(self, *args)",
"def xor_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 0],\n [1, 0, 1],\n [0, 1, 1],\n [0, 0, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])",
"def __rxor__(self, other):\n return self.runtime.xor(self, other)",
"def Xor(*args, **kwargs):\n return _gdi_.Region_Xor(*args, **kwargs)",
"def convert_binary_logical_op(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)",
"def bitwise_xor(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_xor_op, other)",
"def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)",
"def convert_logical_not(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0)\n g.add_node(op.output(\"Out\")[0], out)",
"def __xor__(self, obj):\n return self._boolean_operation(obj, operator.__xor__)",
"def __xor__(self, y):\n result = self.clone()\n if isinstance(y, BinarySharedTensor):\n broadcast_tensors = torch.broadcast_tensors(result.share, y.share)\n result.share = broadcast_tensors[0].clone()\n elif is_tensor(y):\n broadcast_tensors = torch.broadcast_tensors(result.share, y)\n result.share = broadcast_tensors[0].clone()\n return result.__ixor__(y)",
"def logical_xor(lhs, rhs):\n return _make.logical_xor(lhs, rhs)",
"def bitwise_or(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] | self.registers[register[1]])\n logger.info(\"Bitwise OR on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))",
"def convert_logical_not(node, **kwargs):\n return create_basic_op_node('Not', node, kwargs)",
"def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)",
"def bitwise_or(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_or_op, other)",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def __ixor__(self, y):\n if is_tensor(y) or isinstance(y, int):\n if self.rank == 0:\n self.share ^= y\n elif isinstance(y, BinarySharedTensor):\n self.share ^= y.share\n else:\n raise TypeError(\"Cannot XOR %s with %s.\" % (type(y), type(self)))\n return self",
"def convert_broadcast_equal(node, **kwargs):\n return create_basic_op_node('Equal', node, kwargs)",
"def __or__(self, other):\n return self.fam.c_binop('or', self, other)",
"def bitwise_xor(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] ^ self.registers[register[1]])\n logger.info(\"Bitwise XOR on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def create_net(self, shape1, shape2, ir_version):\n\n #\n # Create ONNX model\n #\n\n from onnx import helper\n from onnx import TensorProto\n\n input1 = helper.make_tensor_value_info('input1', TensorProto.BOOL, shape1)\n input2 = helper.make_tensor_value_info('input2', TensorProto.BOOL, shape2)\n output = helper.make_tensor_value_info('output', TensorProto.BOOL, shape1)\n\n node_def = helper.make_node(\n 'Or',\n inputs=['input1', 'input2'],\n outputs=['output']\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def],\n 'test_model',\n [input1, input2],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n # Create reference IR net\n\n ref_net = None\n if check_ir_version(10, None, ir_version):\n nodes_attributes = {\n 'input1': {'kind': 'op', 'type': 'Parameter'},\n 'input1_data': {'shape': shape1, 'kind': 'data'},\n 'input2': {'kind': 'op', 'type': 'Parameter'},\n 'input2_data': {'shape': shape2, 'kind': 'data'},\n 'node': {'kind': 'op', 'type': 'LogicalOr'},\n 'node_data': {'shape': shape1, 'kind': 'data'},\n 'result': {'kind': 'op', 'type': 'Result'}\n }\n ref_net = build_graph(nodes_attributes,\n [('input1', 'input1_data'),\n ('input2', 'input2_data'),\n ('input1_data', 'node'),\n ('input2_data', 'node'),\n ('node', 'node_data'),\n ('node_data', 'result')])\n\n return onnx_net, ref_net",
"def test_execute_xor(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, XOR1_ID, I1, I2] = names.lookup(\n [\"Sw1\", \"Sw2\", \"Xor1\", \"I1\", \"I2\"])\n\n # Make devices\n devices.make_device(XOR1_ID, devices.XOR)\n devices.make_device(SW1_ID, devices.SWITCH, 0)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n\n # Make connections\n network.make_connection(SW1_ID, None, XOR1_ID, I1)\n network.make_connection(SW2_ID, None, XOR1_ID, I2)\n\n network.execute_network()\n assert new_network.get_output_signal(XOR1_ID, None) == devices.LOW\n\n # Set Sw1 to HIGH\n devices.set_switch(SW1_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.HIGH\n\n # Set Sw2 to HIGH\n devices.set_switch(SW2_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.LOW",
"def __or__(self, other: Any) -> Operators:\n return self.operate(or_, other)",
"def __or__(self, other):\n return BitBoard(self.num | other.num)",
"def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def",
"def _create_onehot(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n # axis, indices, depth, values\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n for attr in ['depth', 'values']:\n node.input.append(op.name + \":\" + attr)\n return node",
"def to_operator(self) -> Operator:\n return Operator(self.to_instruction())",
"def __xor__(self, other):\n return Or([self, whitespaces.CURRENT.normalize(other)])"
] | [
"0.738217",
"0.64635605",
"0.6400872",
"0.62084186",
"0.5969626",
"0.59505045",
"0.59500635",
"0.58331794",
"0.58050483",
"0.57880586",
"0.56789726",
"0.5678336",
"0.5673798",
"0.567273",
"0.5661514",
"0.5651046",
"0.5638466",
"0.5637232",
"0.55427027",
"0.5538561",
"0.54883206",
"0.54612195",
"0.5458417",
"0.54280037",
"0.5418826",
"0.5415662",
"0.5407078",
"0.5392876",
"0.5391496",
"0.53828573"
] | 0.79945916 | 0 |
Map MXNet's logical not operator attributes to onnx's Not operator and return the created node. | def convert_logical_not(node, **kwargs):
return create_basic_op_node('Not', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_logical_not(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0)\n g.add_node(op.output(\"Out\")[0], out)",
"def logical_not(data):\n return _make.logical_not(data)",
"def bitwise_not(self) -> ColumnOperators:\n\n return self.operate(bitwise_not_op)",
"def NotNet(condition_blob_or_net):\n if isinstance(condition_blob_or_net, core.Net):\n condition_blob = GetConditionBlobFromNet(condition_blob_or_net)\n else:\n condition_blob = condition_blob_or_net\n\n not_net = core.Net('not_net')\n out_blob = not_net.Not(condition_blob)\n not_net.AddExternalOutput(out_blob)\n\n return not_net, out_blob",
"def _logical_not(x):\n x_ = _static_value(x)\n if x_ is None:\n return math_ops.logical_not(x)\n return constant_op.constant(np.logical_not(x_))",
"def RewriteNOT(self, expr):\n return None",
"def cnot(control: QubitInput, target: QubitInput) -> Instruction:\n return Instruction(CNot(), target=[control, target])",
"def logical_not(x, f=None):\n return _cur_framework(x, f=f).logical_not(x)",
"def NOT(expression):\n return {'$not': [expression]}",
"def is_not(self, other: Any) -> ColumnOperators:\n return self.operate(is_not, other)",
"def to_implies_not(formula: Formula) -> Formula:\r\n # Task 3.6c\r\n convert_and_op_1 = to_not_and(formula)\r\n and_formula_1 = Formula('->', Formula('p'), Formula('~', Formula('q')))\r\n and_formula_2 = Formula('->', Formula('~', Formula('p')), Formula('q'))\r\n\r\n map_and = {'&': Formula('~', Formula('->', and_formula_2, and_formula_1))}\r\n return convert_and_op_1.substitute_operators(map_and)",
"def __ne__(self, *args):\n return _ida_hexrays.operand_locator_t___ne__(self, *args)",
"def get_bprop_logical_not(self):\n\n def bprop(x, out, dout):\n return (zeros_like(x),)\n return bprop",
"def _not(self, _not):\n\n self.__not = _not",
"def _not(self, _not):\n\n self.__not = _not",
"def _not(self, _not):\n\n self.__not = _not",
"def _not(self, _not):\n\n self.__not = _not",
"def _not(self, _not):\n\n self.__not = _not",
"def bitwise_not(data):\n return _make.bitwise_not(data)",
"def __invert__(self):\n not_filter = proto.FilterExpression()\n not_filter.filter_not.filter_expression.MergeFrom(self.filter)\n self.filter = not_filter\n return self",
"def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)",
"def invert(self):\n if( self.cond == CT.NOT ):\n return Cond(self.cond.right)\n elif( isLogicalConst(self.cond) ):\n return Cond( invert(self.cond), None, None, cleaned = self.cleaned )\n elif ( isLogicalOp(self.cond) ):\n return Cond( invert(self.cond), self.left.invert(), self.right.invert(), cleaned = self.cleaned )\n else:\n return Cond( invert(self.cond), self.left, self.right, cleaned = self.cleaned )",
"def NotLTL(element: LTL) -> LTL:\n vars = element.variables\n formula = Not(element.formula)\n return LTL(formula, vars)",
"def negated(self):\n ops = {Eq: Ne, Ge: Lt, Gt: Le, Le: Gt, Lt: Ge, Ne: Eq}\n # If there ever will be new Relational subclasses, the following line\n # will work until it is properly sorted out\n # return ops.get(self.func, lambda a, b, evaluate=False: ~(self.func(a,\n # b, evaluate=evaluate)))(*self.args, evaluate=False)\n return Relational.__new__(ops.get(self.func), *self.args)",
"def _create_dropout(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('ratio', op.ratio),\n ])\n return node",
"def to_nand(formula: Formula) -> Formula:\r\n # Task 3.6b\r\n not_in_nand = Formula('-&', Formula('p'), Formula('p'))\r\n and_in_nand_1 = Formula('-&', Formula('p'), Formula('q'))\r\n and_in_nand_2 = Formula('-&', and_in_nand_1, and_in_nand_1)\r\n map_not_and = {'~': not_in_nand, '&': and_in_nand_2}\r\n formula_not_and = to_not_and(formula)\r\n return formula_not_and.substitute_operators(map_not_and)",
"def CNOT(self, qubit_expr):\n self.apply_gate_operation(cirq.ops.CNOT, qubit_expr)",
"def __ne__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(ne, other)",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def ne(self, other):\n\n return self._get(\"ne\", other, Bool)"
] | [
"0.75782496",
"0.6934414",
"0.6626711",
"0.65900564",
"0.64096093",
"0.6354542",
"0.6351578",
"0.6329878",
"0.62601715",
"0.6213624",
"0.6126839",
"0.60965776",
"0.60675186",
"0.60501796",
"0.60501796",
"0.60501796",
"0.60501796",
"0.60501796",
"0.60476726",
"0.60350335",
"0.6015526",
"0.60046864",
"0.5978689",
"0.59666157",
"0.5870838",
"0.5859557",
"0.5853307",
"0.58446395",
"0.58192027",
"0.5788396"
] | 0.8060194 | 0 |
Map MXNet's size_array operator attributes to onnx's Size operator and return the created node. | def convert_size(node, **kwargs):
return create_basic_op_node('Size', node, kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_size(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n out = _op.ndarray_size(input_x, dtype=\"int64\")\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)",
"def arraySize( cls, value, typeCode = None ):\n return value.size",
"def _to_node_size(self, data):\n return NodeSize(\n id=data[\"id\"],\n name=data[\"name\"],\n ram=data[\"ram\"],\n disk=data[\"disk\"],\n bandwidth=data[\"bandwidth\"],\n price=data[\"price\"],\n driver=self.connection.driver,\n extra={\"max_data_disks\": data[\"max_data_disks\"], \"cores\": data[\"cores\"]},\n )",
"def size(self, obj):\n return np.array([self.width(obj), self.height(obj)])",
"def size(self):\n return self.getattr('size')",
"def get_size(self, shape_info):\r\n if shape_info:\r\n return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize\r\n else: # a scalar\r\n return numpy.dtype(self.dtype).itemsize",
"def size(x, reduce_instance_dims=True, name=None):\n with tf.name_scope(name, 'size'):\n # Note: Calling `sum` defined in this module, not the builtin.\n return sum(tf.ones_like(x), reduce_instance_dims)",
"def size(map):\n return map['size']",
"def size(*args):",
"def _get_state_sizes(self):\n ds = self.builder.nodes[self.ds_inputs[0]]\n return [[ds.xdim]]",
"def size(self) -> str:\n return self._search_in_description(REGEXP_ATTR_SIZE)",
"def get_size(self, valueid):",
"def sizes(self) -> dict:\n raise NotImplementedError",
"def sized_ngrams_with_ele(self, target, size):\n return self.root.sized_ngrams_with_ele(target, size)",
"def size(name):",
"def eamap_size(*args):\n return _ida_hexrays.eamap_size(*args)",
"def size(self):\n return reduce(mul, self.shape, 1)",
"def _assign_sizes(self):",
"def _get_node_size(self, index):\n\n pass",
"def __len__(self):\n return self.n_node.shape[0]",
"def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a",
"def size(self):\n return self._N",
"def size(self):\n res = {\n f.name: ta.Column([v[idx] for v, _ in self._groups.items()], f.dtype)\n for idx, f in enumerate(self._key_fields)\n }\n\n res[\"size\"] = ta.Column([len(c) for _, c in self._groups.items()], dt.int64)\n\n return self._parent._fromdata(res, None)",
"def size(self):\n return _libsbml.ListOf_size(self)",
"def size(self):",
"def size(self):\n\t\treturn self.dims",
"def sizes(self) -> np.ndarray:\n\n return self.shape.prod(axis=0)",
"def udcall_map_size(*args):\n return _ida_hexrays.udcall_map_size(*args)",
"def ndarray_size(self) -> int:\n pass",
"def size(self) -> tf.Tensor:"
] | [
"0.6672948",
"0.59488493",
"0.5767115",
"0.5739616",
"0.5733506",
"0.57241845",
"0.5683179",
"0.56279117",
"0.5627782",
"0.55931187",
"0.5568864",
"0.5536573",
"0.55343175",
"0.5522237",
"0.55192995",
"0.5505195",
"0.55028814",
"0.5481147",
"0.54725236",
"0.54569185",
"0.5453374",
"0.54033214",
"0.5396406",
"0.5395145",
"0.53912526",
"0.5390945",
"0.53898054",
"0.53889674",
"0.533759",
"0.5332087"
] | 0.6832319 | 0 |
Map MXNet's log_softmax operator attributes to onnx's LogSoftMax operator and return the created node. | def convert_logsoftmax(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to int
axis = int(attrs.get("axis", -1))
temp = attrs.get("temperature", 'None')
if temp != 'None':
raise AttributeError("LogSoftMax: ONNX supports only temperature=None")
node = onnx.helper.make_node(
'LogSoftmax',
input_nodes,
[name],
axis=axis,
name=name
)
return [node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_logsoftmax(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n ndim = len(infer_shape(x))\n if axis < 0:\n axis += ndim\n m = _op.max(x, [axis], keepdims=True)\n e = _op.exp(x - m)\n s = _op.sum(e, [axis], keepdims=True)\n out = x - m - _op.log(s)\n g.add_node(op.output(\"Out\")[0], out)",
"def add_logsoftmax(self, input_name, name=None):\n return self._build_op('LogSoftmax', [input_name], name=name)",
"def _create_softmax(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def log_softmax(input, dim, inplace=False):\n return FunctionLib.apply(\n 'LogSoftmax', input.device, [input],\n outputs=[input if inplace else None], axis=dim)",
"def convert_softmax(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n axis = int(attrs.get(\"axis\", -1))\n\n c_softmax_node = []\n axis = -1\n\n transpose_node1 = onnx.helper.make_node(\n \"Transpose\",\n inputs=input_nodes,\n perm=(0, 2, 3, 1), # NCHW--NHWC--(NHW,C)\n name=name + '_tr1',\n outputs=[name + '_tr1']\n )\n\n softmax_node = onnx.helper.make_node(\n \"Softmax\",\n inputs=[name + '_tr1'],\n axis=axis,\n name=name + '',\n outputs=[name + '']\n )\n\n transpose_node2 = onnx.helper.make_node(\n \"Transpose\",\n inputs=[name + ''],\n perm=(0, 3, 1, 2), # NHWC--NCHW\n name=name + '_tr2',\n outputs=[name + '_tr2']\n )\n\n c_softmax_node.append(transpose_node1)\n c_softmax_node.append(softmax_node)\n c_softmax_node.append(transpose_node2)\n\n return c_softmax_node",
"def convert_softmax(g, op, block):\n\n axis = op.attr(\"axis\")\n input_shape = block.var(op.input(\"X\")[0]).shape\n if axis < 0:\n axis = len(input_shape) + axis\n x = g.get_node(op.input(\"X\")[0])\n m = _op.max(x, axis, keepdims=True)\n e = _op.exp(x - m)\n out = e / _op.sum(e, axis, keepdims=True)\n g.add_node(op.output(\"Out\")[0], out)",
"def new_softmax(labels, logits):\n target = tf.reshape(labels, [-1])\n f_logits = tf.exp(logits)\n\n # this is the negative part of the objf\n row_sums = tf.reduce_sum(f_logits, 1)\n\n t2 = tf.expand_dims(target, 1)\n range = tf.cast(tf.expand_dims(tf.range(tf.shape(target)[0]), 1), dtype=tf.int64)\n ind = tf.concat([range, t2], 1)\n res = tf.gather_nd(logits, ind)\n\n return -res + row_sums - 1",
"def log_softmax(x: jnp.DeviceArray, *, axis: int = 0) -> jnp.DeviceArray:\n return x - jnp.expand_dims(jnp.log(jnp.sum(jnp.exp(x), axis=axis)), axis)",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())",
"def log_softmax(logits, axis=None, name=None, dim=None):\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n if axis is None:\n axis = -1\n return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)",
"def convert_log(node, **kwargs):\n return create_basic_op_node('Log', node, kwargs)",
"def log_softmax_v2(logits, axis=None, name=None):\n if axis is None:\n axis = -1\n return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def forward(self, x):\n return F.log_softmax(self.proj(x), dim=-1)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag_shape=self.diag_shape)",
"def __init__(self, dim, inplace=False):\n super(LogSoftmax, self).__init__()\n self.dim = dim\n self.inplace = inplace",
"def convert_logsigmoid(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.log(_op.tensor.sigmoid(x))\n g.add_node(op.output(\"Out\")[0], out)",
"def log_softmax_nd(logits, axes=(-1,)):\n logits -= tf.reduce_max(logits, axis=axes, keepdims=True)\n return logits - tf.reduce_logsumexp(logits, axis=axes, keepdims=True)",
"def assign_log(self, value):\n if not self._log:\n raise StructureError(\"Trying to assign log values to non-log weights.\")\n\n value = tf.where(tf.is_nan(value), tf.log(tf.ones_like(value) * 0.01), value)\n if self._mask and not all(self._mask):\n # Only perform masking if mask is given and mask contains any 'False'\n value += tf.log(tf.cast(tf.reshape(self._mask, value.shape), dtype=conf.dtype))\n normalized_value = value - tf.reduce_logsumexp(value, axis=-1, keepdims=True)\n return tf.assign(self._variable, normalized_value)",
"def softmax(x, name):\n with tf.name_scope(name):\n outputs = tf.nn.softmax (x)\n # Return layer's output\n return outputs",
"def convert_softmax_with_cross_entropy(g, op, block):\n\n logits = g.get_node(op.input(\"Logits\")[0])\n labels = g.get_node(op.input(\"Label\")[0])\n ignore_index = op.attr(\"ignore_index\")\n axis = op.attr(\"axis\")\n if axis < 0:\n axis = len(infer_shape(logits)) + axis\n\n softmax = _op.nn.softmax(logits, axis=axis)\n\n g.add_node(op.output(\"Softmax\")[0], softmax)\n\n softmax = _op.log(softmax)\n soft_label = op.attr(\"soft_label\")\n if soft_label:\n loss = _op.sum(-labels * softmax, axis=axis)\n else:\n labels_one = _op.one_hot(\n labels,\n on_value=_expr.const(1.0, dtype=\"float32\"),\n off_value=_expr.const(0.0, dtype=\"float32\"),\n depth=infer_shape(logits)[axis],\n axis=axis + 1,\n dtype=\"float32\",\n )\n labels_one = _op.squeeze(labels_one, axis=axis)\n loss = _op.sum(-labels_one * softmax, axis=axis)\n loss = _op.expand_dims(loss, axis=axis)\n if ignore_index != -100: # noly when soft_label is False\n assert not soft_label, \"soft_label and ignore_index cannot be set at the same time.\"\n ignore_mask = _op.not_equal(labels, _expr.const(ignore_index, dtype=\"int64\"))\n ignore_mask = _op.cast(ignore_mask, \"float32\")\n loss = _op.multiply(loss, ignore_mask)\n\n g.add_node(op.output(\"Loss\")[0], loss)",
"def add_output_ops(self, graph, output):\n with graph.as_default():\n softmax = []\n with tf.name_scope('inference'):\n for i, logits in enumerate(output):\n softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))\n output = softmax\n return output",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def masked_log_softmax(vector, mask):\n if mask is not None:\n mask = mask.float()\n vector = vector + mask.log()\n return torch.nn.functional.log_softmax(vector)",
"def st_gumbel_softmax(self,logits, temperature=1.0, mask=None):\n\t def convert_to_one_hot(indices, num_classes):\n\t \tbatch_size = indices.size(0)\n\t \tindices = indices.unsqueeze(1)\n\t \tone_hot = Variable(indices.data.new(batch_size, num_classes).zero_().scatter_(1, indices.data, 1))\n\t \treturn one_hot\n\n\t eps = 1e-20\n\t u = logits.data.new(*logits.size()).uniform_()\n\t gumbel_noise = Variable(-torch.log(-torch.log(u + eps) + eps))\n\t y = logits + gumbel_noise\n\t y = self.masked_softmax(logits=y / temperature, mask=mask)\n\t y_argmax = y.max(1)[1]\n\t # pdb.set_trace()\n\t y_hard = convert_to_one_hot(\n\t indices=y_argmax,\n\t num_classes=y.size(1)).float()\n\t y = (y_hard - y).detach() + y\n\t return y",
"def convert_argmax(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"axis\"))\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n node = onnx.helper.make_node(\n 'ArgMax',\n inputs=input_nodes,\n axis=axis,\n keepdims=keepdims,\n outputs=[name],\n name=name\n )\n return [node]",
"def get_output(self, X):\n return softmax(X)"
] | [
"0.72433263",
"0.6945794",
"0.6428142",
"0.63807946",
"0.6325919",
"0.62042975",
"0.6159842",
"0.60255045",
"0.5992672",
"0.58925354",
"0.5820617",
"0.5788801",
"0.5781208",
"0.55802464",
"0.55784833",
"0.5528118",
"0.55134857",
"0.55087835",
"0.5452463",
"0.5428512",
"0.54056734",
"0.5396093",
"0.53504765",
"0.5332223",
"0.5253514",
"0.5253514",
"0.5250401",
"0.52391875",
"0.52148765",
"0.5179763"
] | 0.75760037 | 0 |
Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators and return the created node. | def convert_norm(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
ord = int(attrs.get("ord", 2))
onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2"
if axes:
reduce_node = onnx.helper.make_node(
onnx_op_name,
input_nodes,
[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [reduce_node]
else:
reduce_node = onnx.helper.make_node(
onnx_op_name,
input_nodes,
[name],
keepdims=keepdims,
name=name
)
return [reduce_node] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_l2normalization(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mode = attrs.get(\"mode\", \"instance\")\n\n if mode != \"channel\":\n raise AttributeError(\"L2Normalization: ONNX currently supports channel mode only\")\n\n l2norm_node = onnx.helper.make_node(\n \"LpNormalization\",\n input_nodes,\n [name],\n axis=1, # channel only\n name=name\n )\n return [l2norm_node]",
"def convert_instance_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n gamma = g.get_node(op.input(\"Scale\")[0])\n beta = g.get_node(op.input(\"Bias\")[0])\n epsilon = op.attr(\"epsilon\")\n\n scale = center = True\n out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale)\n g.add_node(op.output(\"Y\")[0], out)",
"def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node",
"def _create_batchnorm(cls, op, op_t):\n # first, we init batchnorm node\n epsilon = 1e-5 # the epsilon value used in singa\n bn_node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n bn_node.attribute.extend([\n helper.make_attribute('momentum', op.handle.factor),\n helper.make_attribute('epsilon', epsilon),\n ])\n # then we add nodes of scal, bias, mean, var\n nodes = []\n running_values = {\"mean\": op.running_mean, \"var\": op.running_var}\n for tmp_name, running_value in running_values.items():\n node_name = op.name + \":\" + tmp_name\n bn_node.input.append(node_name)\n\n nodes.append(bn_node)\n return nodes",
"def _create_elu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n ])\n return node",
"def get_norm_layer():\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n return norm_layer",
"def _create_reduceOp(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axes)),\n helper.make_attribute('keepdims', op.keepdims),\n ])\n return node",
"def norm2(self):\n return getattr(self, self.norm2_name)",
"def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(\n op_ctor(op0, op1, precision=precision)\n )\n # assigning attributes to the resulting node\n result = local_list[0]\n result.set_attributes(**kw)\n return result",
"def norm(self):",
"def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]",
"def convert_relu(node, **kwargs):\n return create_basic_op_node('Relu', node, kwargs)",
"def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def convert_instancenorm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n eps = float(attrs.get(\"eps\", 0.001))\n\n node = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=input_nodes,\n outputs=[name],\n name=name,\n epsilon=eps)\n\n return [node]",
"def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n \r\n if node.const_attr!=None:\r\n #print(\"hahah\")\r\n shape = tuple(input_vals[1])\r\n oldshape = list(input_vals[0].shape)\r\n for i in node.const_attr:\r\n oldshape.insert(i%(len(oldshape)+1),1)\r\n #print(oldshape)\r\n #print(shape)\r\n return np.array(np.broadcast_to(input_vals[0].reshape(tuple(oldshape)),shape))\r\n #return np.broadcast_to(input_vals[0], node.const_attr)\r\n else:\r\n return np.broadcast_to(input_vals[0], tuple(input_vals[1]))",
"def operator_one_norm(W):\n return torch.max(torch.sum(torch.abs(W), dim=(0, 2, 3)))",
"def _create_flatten(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def convert_layer_norm(g, op, block):\n\n begin_norm_axis = op.attr(\"begin_norm_axis\")\n epsilon = op.attr(\"epsilon\")\n x = g.get_node(op.input(\"X\")[0])\n bias_input = op.input(\"Bias\")\n scale_input = op.input(\"Scale\")\n\n x_shape = infer_shape(x)\n assert begin_norm_axis in (\n len(x_shape) - 1,\n -1,\n ), \"Support only normalization over last one dimension.\"\n\n if bias_input:\n bias = g.get_node(bias_input[0])\n else:\n bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))\n\n if scale_input:\n scale = g.get_node(scale_input[0])\n else:\n scale = _expr.const(np.ones(x_shape[begin_norm_axis]))\n\n out = _op.nn.layer_norm(\n x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True\n )\n g.add_node(op.output(\"Y\")[0], out)",
"def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)",
"def convert_broadcast_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)",
"def compute(self, node, input_vals):\n assert len(input_vals) == 1\n return input_vals[0] + node.const_attr",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n return input_vals[0] + node.const_attr",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n #assert len(input_vals[1].shape) ==1\r\n return input_vals[0].reshape(tuple(node.const_attr))",
"def _create_elu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n shape = tuple(input_vals[1])\r\n divby = 1\r\n if node.const_attr!=None:\r\n oldshape = list(input_vals[0].shape)\r\n #print(\"hahah\")\r\n for i in node.const_attr:\r\n oldshape.insert(i%(len(oldshape)+1),1)\r\n divby *= shape[i]\r\n #print(oldshape)\r\n #print(shape)\r\n return np.array(np.broadcast_to(input_vals[0].reshape(tuple(oldshape)),shape))/divby\r\n #return np.broadcast_to(input_vals[0], node.const_attr)\r\n else:\r\n for i in shape:\r\n divby *= i\r\n return np.broadcast_to(input_vals[0], shape)/divby",
"def convert_broadcast_to(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n shape_list = convert_string_to_list(attrs[\"shape\"])\n\n initializer = kwargs[\"initializer\"]\n output_shape_np = np.array(shape_list, dtype='int64')\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]\n dims = np.shape(output_shape_np)\n\n output_shape_name = \"expand_attr_tensor\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=output_shape_name,\n data_type=data_type,\n dims=dims,\n vals=shape_list,\n raw=False,\n )\n )\n\n input_nodes.append(output_shape_name)\n expand_node = onnx.helper.make_node(\n \"Expand\",\n input_nodes,\n [name],\n name=name\n )\n\n return [tensor_node, expand_node]"
] | [
"0.5756237",
"0.56087494",
"0.5600301",
"0.55728614",
"0.55290496",
"0.54875606",
"0.5486537",
"0.5459905",
"0.53227746",
"0.52392274",
"0.5229864",
"0.5215761",
"0.52136004",
"0.5174905",
"0.5169228",
"0.5152919",
"0.5142199",
"0.51399326",
"0.51165926",
"0.5102471",
"0.5090136",
"0.5086156",
"0.5080589",
"0.507389",
"0.5069882",
"0.50624853",
"0.50203216",
"0.5011571",
"0.5006988",
"0.4995831"
] | 0.7403831 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.