query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Sets the last_modified_on of this JsonJdbcIngestionProperties. | def last_modified_on(self, last_modified_on):
self._last_modified_on = last_modified_on | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setLastModified(when):",
"def last_modified(self, last_modified):\n\n self._last_modified = last_modified",
"def last_modified_dts(self, last_modified_dts):\n\n self._last_modified_dts = last_modified_dts",
"def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by",
"def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by",
"def last_modification(self, last_modification):\n\n self._last_modification = last_modification",
"def last_updated(self, last_updated: str):\n\n self._last_updated = last_updated",
"def last_updated(self, last_updated):\n\n self._last_updated = last_updated",
"def last_updated(self, last_updated):\n\n self._last_updated = last_updated",
"def last_updated(self, last_updated):\n\n self._last_updated = last_updated",
"def last_updated(self, last_updated):\n\n self._last_updated = last_updated",
"def set_LastUpdatedAfter(self, value):\n super(ListOrdersInputSet, self)._set_input('LastUpdatedAfter', value)",
"def modified_at(self, modified_at):\n\n self._modified_at = modified_at",
"def modified_at(self, modified_at):\n\n self._modified_at = modified_at",
"def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_by(self, last_modified_by):\n if last_modified_by is not None and len(last_modified_by) > 100:\n raise ValueError(\"Invalid value for `last_modified_by`, length must be less than or equal to `100`\")\n\n self._last_modified_by = last_modified_by",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_update_timestamp(self, last_update_timestamp):\n\n self._last_update_timestamp = last_update_timestamp",
"def date_modified(self, date_modified):\n \n self._date_modified = date_modified",
"def last_modified_at(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_at\")",
"def file_last_updated(self, file_last_updated):\n\n self._file_last_updated = file_last_updated",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified"
]
| [
"0.6223496",
"0.62083447",
"0.5805215",
"0.5642214",
"0.5642214",
"0.5547025",
"0.55205786",
"0.5488712",
"0.5488712",
"0.5488712",
"0.5488712",
"0.5435409",
"0.54256123",
"0.54256123",
"0.54190284",
"0.54190284",
"0.53778666",
"0.53735816",
"0.53735816",
"0.53735816",
"0.53735816",
"0.53735816",
"0.53735816",
"0.5334284",
"0.5284289",
"0.52762",
"0.52744406",
"0.5218128",
"0.5218128",
"0.5218128"
]
| 0.7030811 | 0 |
Sets the system of this JsonJdbcIngestionProperties. | def system(self, system):
self._system = system | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def set_system_name(self, system_name):\n\n\t\tif system_name is not None and not isinstance(system_name, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: system_name EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__system_name = system_name\n\t\tself.__key_modified['system_name'] = 1",
"def system_id(self, system_id):\n\n self._system_id = system_id",
"def system_id(self, system_id):\n\n self._system_id = system_id",
"def __add_system_property_args(self, other_sys_props_dict):\n for key, value in other_sys_props_dict.iteritems():\n self.__sys_props[key] = value",
"def _update_system(self, system):\n for p in self.required_properties:\n if p not in self.results:\n raise MDCalculatorError('Requested property {:s} not in '\n 'results'.format(p))\n elif p == self.force_handle:\n self._set_system_forces(system)\n else:\n dim = self.results[p].shape\n system.properties[p] = self.results[p].view(\n system.n_replicas, system.n_molecules, *dim[1:]) * \\\n self.property_conversion[p]",
"def __init__(__self__, *,\n system_service_type: pulumi.Input[Union[str, 'SystemServiceType']]):\n pulumi.set(__self__, \"system_service_type\", system_service_type)",
"def _set_config(self):\n\n self.config.data_path = \"http://{0}:{1}/db/data\".format(\n self.config.host,\n self.config.port)\n\n self.config.node_path = \"/\".join([self.config.data_path, \"node\"])\n self.config.headers = dict([])\n self.config.headers[\"get\"] = {\"Accept\": \"application/json\"}\n self.config.headers[\"put\"] = {\"Content-Type\": \"application/json\"}",
"def sys_prefix(self, sys_prefix):\n self._path_sys_prefix = sys_prefix",
"def set_property(self, prop_name, prop_value):\n if prop_name == \"database_name\":\n if (prop_value and isinstance(prop_value, str)\n and prop_value[-3:] == \".db\"):\n self.config[\"config\"][\"database_name\"] = prop_value\n self.database_name = prop_value\n elif prop_name == \"volume_limit\":\n if isinstance(prop_value, int) and prop_value > 0:\n self.config[\"config\"][\"volume_limit\"] = str(prop_value)\n self.volume_limit = prop_value\n elif prop_name == \"series_per_page\":\n if isinstance(prop_value, int) and prop_value >= 0:\n self.config[\"config\"][\"series_per_page\"] = str(prop_value)\n self.series_per_page = prop_value\n elif prop_name == \"compact_list\":\n if ((isinstance(prop_value, int) and prop_value in [0, 1])\n or isinstance(prop_value, bool)):\n self.config[\"config\"][\"compact_list\"] = str(prop_value)\n self.compact_list = prop_value\n elif prop_name == \"show_empty_series\":\n if ((isinstance(prop_value, int) and prop_value in [0, 1])\n or isinstance(prop_value, bool)):\n self.config[\"config\"][\"show_empty_series\"] = str(prop_value)\n self.show_empty_series = prop_value\n elif prop_name == \"default_to_gui\":\n if ((isinstance(prop_value, int) and prop_value in [0, 1])\n or isinstance(prop_value, bool)):\n self.config[\"config\"][\"default_to_gui\"] = str(prop_value)\n self.default_to_gui = prop_value\n with open(self.filename, 'w') as config_ini:\n self.config.write(config_ini)",
"def system_amount(self, system_amount):\n\n self._system_amount = system_amount",
"def set_system_definition(\n self,\n system_id,\n allocation\n ): # pylint:disable=arguments-differ\n system = self.get_exec_system(system_id)\n\n if not system.available:\n system.enable()\n\n storage_settings = {}\n exec_settings = {}\n for host, val in settings.PORTAL_EXEC_SYSTEMS.items():\n if host in system.storage.host:\n storage_settings = val\n if host in system.login.host:\n exec_settings = val\n\n system.site = settings.PORTAL_DOMAIN\n system.name = \"Execution system for user {}\".format(self.user.username)\n system.storage.home_dir = storage_settings['home_dir'].format(\n self.user_systems_mgr.get_private_directory()) if 'home_dir' in storage_settings else ''\n system.storage.port = system.login.port\n system.storage.root_dir = '/'\n system.storage.auth.username = self.user.username\n system.storage.auth.type = system.AUTH_TYPES.SSHKEYS\n system.login.auth.username = self.user.username\n system.login.auth.type = system.AUTH_TYPES.SSHKEYS\n system.work_dir = '/work/{}'.format(self.user_systems_mgr.get_private_directory())\n system.scratch_dir = exec_settings['scratch_dir'].format(\n self.user_systems_mgr.get_private_directory()) if 'scratch_dir' in exec_settings else ''\n\n if system.scheduler == 'SLURM':\n for queue in system.queues.all():\n if queue.custom_directives:\n queue.custom_directives = '-A {}'.format(allocation)\n return system",
"def configure(self):\n # Defaults\n self.db_type = DB_TYPE.POSTGRES\n self.db_name = \"ambari\"\n self.db_user = \"ambari\"\n self.db_password = \"bigdata\"\n self.db_host = \"localhost\"\n self.db_url = None\n\n if os.path.exists(AMBARI_PROPERTIES_LOCATION):\n self.ambari_props = self.read_conf_file(AMBARI_PROPERTIES_LOCATION)\n\n if \"server.jdbc.database\" in self.ambari_props:\n self.db_type = self.ambari_props[\"server.jdbc.database\"].upper()\n if \"server.jdbc.database_name\" in self.ambari_props:\n self.db_name = self.ambari_props[\"server.jdbc.database_name\"]\n if \"server.jdbc.user.name\" in self.ambari_props:\n self.db_user = self.ambari_props[\"server.jdbc.user.name\"]\n if \"server.jdbc.user.passwd\" in self.ambari_props:\n self.db_password = self.read_file(self.ambari_props[\"server.jdbc.user.passwd\"])\n if \"server.jdbc.hostname\" in self.ambari_props:\n self.db_host = self.ambari_props[\"server.jdbc.hostname\"]\n if \"server.jdbc.url\" in self.ambari_props:\n self.db_url = self.ambari_props[\"server.jdbc.url\"]\n if \"ambari-server.user\" in self.ambari_props:\n self.ambari_server_user = self.ambari_props[\"ambari-server.user\"]\n\n #Logger.info(\"Using database type: {0}, name: {1}, host: {2}\".format(self.db_type, self.db_name, self.db_host))\n connection_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(self.db_name, self.db_user, self.db_host, self.db_password)\n\n if self.db_type == DB_TYPE.POSTGRES:\n try:\n import psycopg2 # covered by GNU Lesser General Public License\n except Exception, e:\n Logger.error(\"Need to install python-psycopg2 package for Postgres DB. E.g., yum install python-psycopg2\\n\")\n self.terminate()\n elif self.db_type == DB_TYPE.MYSQL:\n try:\n import pymysql # covered by MIT License\n except Exception, e:\n Logger.error(\"Need to install PyMySQL package for Python. E.g., yum install python-setuptools && easy_install pip && pip install PyMySQL\\n\")\n self.terminate()\n else:\n Logger.error(\"Unknown database type: {0}.\".format(self.db_type))\n self.terminate()\n\n self.conn = None\n self.cursor = None\n try:\n Logger.debug(\"Initializing database connection and cursor.\")\n if self.db_type == DB_TYPE.POSTGRES:\n self.conn = psycopg2.connect(connection_string)\n self.cursor = self.conn.cursor()\n elif self.db_type == DB_TYPE.MYSQL:\n self.conn = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_name)\n self.cursor = self.conn.cursor()\n\n Logger.debug(\"Created database connection and cursor.\")\n self.cursor.execute(\"SELECT metainfo_key, metainfo_value FROM metainfo WHERE metainfo_key='version';\")\n rows = self.cursor.fetchall()\n if rows and len(rows) == 1:\n self.ambari_version = rows[0][1]\n # Logger.info(\"Connected to database!!! Ambari version is {0}\\n\".format(self.ambari_version))\n\n # Must be Ambari 2.0.0 or higher\n if self.compare_versions(self.ambari_version, MIN_AMBARI_VERSION) < 0:\n Logger.error(\"Must be running Ambari Version {0} or higher.\\n\".format(MIN_AMBARI_VERSION))\n self.terminate()\n else:\n Logger.error(\"Unable to determine Ambari version.\")\n self.terminate()\n\n self.set_cluster()\n except Exception, e:\n Logger.error(\"I am unable to connect to the database. Error: {0}\\n\".format(e))\n self.terminate()\n else:\n raise Exception(\"Could not find file {0}\".format(AMBARI_PROPERTIES_LOCATION))",
"def set(self, properties):\n raise NotImplementedError",
"def set_pyenv_cfg(self): # noqa: D205\n super().set_pyenv_cfg()\n self.pyenv_cfg[\"base-prefix\"] = self.interpreter.system_prefix\n self.pyenv_cfg[\"base-exec-prefix\"] = self.interpreter.system_exec_prefix\n self.pyenv_cfg[\"base-executable\"] = self.interpreter.system_executable",
"def system_time(self, system_time):\n\n self._system_time = system_time",
"def system_wide(self, system_wide):\n\n self._system_wide = system_wide",
"def __init__(__self__, *,\n management_servers: Sequence['outputs.CSIPowerMaxRevProxySpecConfigStandAloneConfigManagementServers'],\n storage_arrays: Sequence['outputs.CSIPowerMaxRevProxySpecConfigStandAloneConfigStorageArrays']):\n pulumi.set(__self__, \"management_servers\", management_servers)\n pulumi.set(__self__, \"storage_arrays\", storage_arrays)",
"def remote_setSysinfo(self, request, value):\r\n raise NotImplementedError",
"def __init__(self, env, system=None):\n self._env = env\n self._system = system if system is not None else {}",
"def system_to_user(self, system_to_user):\n\n self._system_to_user = system_to_user",
"def system_status(self, system_status):\n\n self._system_status = system_status",
"def _editSysconfig():\n dbUrl = \"jdbc:postgresql://\" + getDbHostName() + \":\" + getDbPort() + \"/\" + basedefs.DB_NAME\n if \"DB_SECURE_CONNECTION\" in controller.CONF.keys() and controller.CONF[\"DB_SECURE_CONNECTION\"] == \"yes\":\n dbUrl = dbUrl + \"?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\"\n\n proxyEnabled = utils.compareStrIgnoreCase(controller.CONF[\"OVERRIDE_HTTPD_CONFIG\"], \"yes\")\n utils.editEngineSysconfig(proxyEnabled=proxyEnabled,\n dbUrl=dbUrl,\n dbUser=utils.getDbUser(),\n fqdn=controller.CONF[\"HOST_FQDN\"],\n http=controller.CONF[\"HTTP_PORT\"],\n https=controller.CONF[\"HTTPS_PORT\"],\n javaHome=controller.CONF[\"JAVA_HOME\"])",
"def __setattr__(self, name, value):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n object.__setattr__(self, name, value)\n else:\n setattr(self.env, name, value)",
"def set_system_defined(self, system_defined):\n\n\t\tif system_defined is not None and not isinstance(system_defined, bool):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: system_defined EXPECTED TYPE: bool', None, None)\n\t\t\n\t\tself.__system_defined = system_defined\n\t\tself.__key_modified['system_defined'] = 1",
"def setSystemGrid(self):\n fromSystem = self.myGalaxy.systems[self.fromSystem]\n toSystem = self.myGalaxy.systems[self.toSystem]\n self.systemGrid = funcs.getMapQuadrant(toSystem, self, fromSystem.x, fromSystem.y,\n toSystem.x, toSystem.y)",
"def system(self):\n return self['system']",
"def set_json(config):\n global CAX_CONFIGURE\n CAX_CONFIGURE = config",
"def system_properties(self):\r\n return dict(self._get_system_properties(self.java))",
"def __init__(self):\n '''Lets find ot the system we run on'''\n self.syst = platform.system()\n '''And where we are'''\n self.module_abs_path = os.path.abspath(os.path.dirname(__file__))\n if self.syst == 'Windows':\n self.sonata_suite_config_json = self.vm_logsrv_cnf_location = os.path.join(self.module_abs_path,\n \"..\\\\configs_sonata\\sonata_conf.json\")\n elif self.syst == 'Linux':\n self.sonata_suite_config_json = self.vm_logsrv_cnf_location = os.path.join(self.module_abs_path,\n \"../configs_sonata/sonata_conf.json\")\n '''get some tools ready'''\n self.__utils__=var_utils.Varutils()\n '''MAP OF CONFIG PARAMS FROM JSON'''\n self.sonata_suite_config = self.__utils__.read_json_to_map(data_location=self.sonata_suite_config_json)"
]
| [
"0.5509424",
"0.52224773",
"0.5148992",
"0.5148992",
"0.49638796",
"0.49095058",
"0.48922232",
"0.48535156",
"0.48432377",
"0.48115724",
"0.4799595",
"0.47686774",
"0.471078",
"0.46972197",
"0.46735558",
"0.46624678",
"0.46403193",
"0.4637089",
"0.46324444",
"0.4600796",
"0.45877287",
"0.45833337",
"0.45614034",
"0.4557304",
"0.45438993",
"0.45401058",
"0.45087463",
"0.44997135",
"0.44978717",
"0.44738308"
]
| 0.57980996 | 1 |
Sets the jdbc_properties of this JsonJdbcIngestionProperties. | def jdbc_properties(self, jdbc_properties):
self._jdbc_properties = jdbc_properties | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jdbc_driver(self, jdbc_driver):\n\n self._jdbc_driver = jdbc_driver",
"def featurestore_jdbc_connector_connections(self, featurestore_jdbc_connector_connections):\n\n self._featurestore_jdbc_connector_connections = featurestore_jdbc_connector_connections",
"def set(self, properties):\n raise NotImplementedError",
"def configure(self):\n # Defaults\n self.db_type = DB_TYPE.POSTGRES\n self.db_name = \"ambari\"\n self.db_user = \"ambari\"\n self.db_password = \"bigdata\"\n self.db_host = \"localhost\"\n self.db_url = None\n\n if os.path.exists(AMBARI_PROPERTIES_LOCATION):\n self.ambari_props = self.read_conf_file(AMBARI_PROPERTIES_LOCATION)\n\n if \"server.jdbc.database\" in self.ambari_props:\n self.db_type = self.ambari_props[\"server.jdbc.database\"].upper()\n if \"server.jdbc.database_name\" in self.ambari_props:\n self.db_name = self.ambari_props[\"server.jdbc.database_name\"]\n if \"server.jdbc.user.name\" in self.ambari_props:\n self.db_user = self.ambari_props[\"server.jdbc.user.name\"]\n if \"server.jdbc.user.passwd\" in self.ambari_props:\n self.db_password = self.read_file(self.ambari_props[\"server.jdbc.user.passwd\"])\n if \"server.jdbc.hostname\" in self.ambari_props:\n self.db_host = self.ambari_props[\"server.jdbc.hostname\"]\n if \"server.jdbc.url\" in self.ambari_props:\n self.db_url = self.ambari_props[\"server.jdbc.url\"]\n if \"ambari-server.user\" in self.ambari_props:\n self.ambari_server_user = self.ambari_props[\"ambari-server.user\"]\n\n #Logger.info(\"Using database type: {0}, name: {1}, host: {2}\".format(self.db_type, self.db_name, self.db_host))\n connection_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(self.db_name, self.db_user, self.db_host, self.db_password)\n\n if self.db_type == DB_TYPE.POSTGRES:\n try:\n import psycopg2 # covered by GNU Lesser General Public License\n except Exception, e:\n Logger.error(\"Need to install python-psycopg2 package for Postgres DB. E.g., yum install python-psycopg2\\n\")\n self.terminate()\n elif self.db_type == DB_TYPE.MYSQL:\n try:\n import pymysql # covered by MIT License\n except Exception, e:\n Logger.error(\"Need to install PyMySQL package for Python. E.g., yum install python-setuptools && easy_install pip && pip install PyMySQL\\n\")\n self.terminate()\n else:\n Logger.error(\"Unknown database type: {0}.\".format(self.db_type))\n self.terminate()\n\n self.conn = None\n self.cursor = None\n try:\n Logger.debug(\"Initializing database connection and cursor.\")\n if self.db_type == DB_TYPE.POSTGRES:\n self.conn = psycopg2.connect(connection_string)\n self.cursor = self.conn.cursor()\n elif self.db_type == DB_TYPE.MYSQL:\n self.conn = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_name)\n self.cursor = self.conn.cursor()\n\n Logger.debug(\"Created database connection and cursor.\")\n self.cursor.execute(\"SELECT metainfo_key, metainfo_value FROM metainfo WHERE metainfo_key='version';\")\n rows = self.cursor.fetchall()\n if rows and len(rows) == 1:\n self.ambari_version = rows[0][1]\n # Logger.info(\"Connected to database!!! Ambari version is {0}\\n\".format(self.ambari_version))\n\n # Must be Ambari 2.0.0 or higher\n if self.compare_versions(self.ambari_version, MIN_AMBARI_VERSION) < 0:\n Logger.error(\"Must be running Ambari Version {0} or higher.\\n\".format(MIN_AMBARI_VERSION))\n self.terminate()\n else:\n Logger.error(\"Unable to determine Ambari version.\")\n self.terminate()\n\n self.set_cluster()\n except Exception, e:\n Logger.error(\"I am unable to connect to the database. Error: {0}\\n\".format(e))\n self.terminate()\n else:\n raise Exception(\"Could not find file {0}\".format(AMBARI_PROPERTIES_LOCATION))",
"def with_property(self, schema):\n self.properties = self.properties if self.properties != None else []\n self.properties.append(schema)\n return self",
"def set_properties(self, dict_properties):\n for name, value in dict_properties.items():\n if name.startswith(\"_\") and hasattr(self, name[1:]):\n setattr(self, name[1:], value)\n elif name.startswith(\"_\") and not hasattr(self, name[1:]):\n continue\n else:\n self.properties[name] = TempAdProperty(name, value)",
"def properties(self, properties):\n\n self._properties = properties",
"def properties(self, properties):\n\n self._properties = properties",
"def properties(self, properties):\n\n self._properties = properties",
"def __from_json__(self, properties: dict):\r\n # Look for units first so the temperatures are set correctly.\r\n value = properties.pop(\"temperature_scale\", None)\r\n if value is not None:\r\n eval(f\"self.set_temperature_scale('{value}')\")\r\n\r\n # Let superclass handle the rest\r\n super().__from_json__(properties)",
"def properties_set(self, properties):\n self._put('properties', properties)",
"def set_properties(self, property_dict):\n self.properties.update(property_dict)",
"def set_JobProperties(self,data):\n tp=type(data)\n if tp.__name__=='dict':\n list_context=list(JobProperty._nInstancesContextDict.keys())\n for i in data.keys():\n for j in data[i].keys():\n if list_context.count(i+'.'+j)==1:\n jp=JobProperty._nInstancesContextDict[i+'.'+j]\n jp.set_Value(data[i][j])\n self._log.info(\"The JobProperty %s has been set to %s\",\n i+'.'+j,data[i][j])\n else:\n self._log.warning(\"The JobProperty %s does not exist\",\n i+'.'+j)\n else:\n raise ValueError('The received data is has not the expected'\n 'type/format')",
"def set_datajoint_config(jwt_payload: dict):\n dj.config['database.host'] = jwt_payload['databaseAddress']\n dj.config['database.user'] = jwt_payload['username']\n dj.config['database.password'] = jwt_payload['password']\n\n dj.conn(reset=True)",
"def parse_json_profile(self, **kwargs):\n \n print(\"[%] Setting attributes from JSON Profile\")\n # This snippet takes the keys ignoring the first key which is task and then shows\n # what should be set in the kwargs parsing. \n print(f\"[-] The following keys are needed for this task : {[x for x in list(kwargs.keys())[1:]]}\")\n\n try:\n self.database_location = kwargs[\"database_location\"]\n self.masterpassword = kwargs[\"masterpassword\"]\n \n print(f\"[*] Setting the command attribute : {self.database_location}\")\n print(f\"[*] Setting the command attribute : {self.masterpassword}\")\n\n \n except:\n print(self.cl.red(\"[!] Error Setting JSON Profile attributes, check matching key values in the profile\"))\n\n # once these have all been set in here, then self.create_autoIT_block() gets called which pushes the task on the stack\n self.create_autoIT_block()",
"def set(self, property_dict):\r\n self.metadata = self.db.update(self.path, property_dict).json()",
"def instance_properties(self, instance_properties):\n\n self._instance_properties = instance_properties",
"def set_properties(props):\n return impl.set_properties(**locals())",
"def __eq__(self, other):\n if not isinstance(other, JsonJdbcIngestionProperties):\n return False\n\n return self.__dict__ == other.__dict__",
"def __init__(self, properties_dict):\n for k, v in properties_dict.items():\n self.__setattr__(k,v)",
"def set(self, properties):\n self._column.attrs = properties",
"def _set_config(self):\n\n self.config.data_path = \"http://{0}:{1}/db/data\".format(\n self.config.host,\n self.config.port)\n\n self.config.node_path = \"/\".join([self.config.data_path, \"node\"])\n self.config.headers = dict([])\n self.config.headers[\"get\"] = {\"Accept\": \"application/json\"}\n self.config.headers[\"put\"] = {\"Content-Type\": \"application/json\"}",
"def copyJdbcConnectionToDestination(self, jdbcConnection, destination, deployMode=False):\n\t\tlocalSession = self.configDBSession()\n\n\t\tif self.copyDestinations == None:\t\n\t\t\tif deployMode == False:\n\t\t\t\tlogging.warning(\"There are no destination for this table to receive a copy\")\n\t\t\telse:\n\t\t\t\tlogging.warning(\"There are no destination for this deployment\")\n\t\t\treturn\n\n\t\tremoteSession = self.remoteInstanceConfigDBSession()\n\n\t\tjdbcConnections = aliased(configSchema.jdbcConnections)\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\t# Check if if we are going to sync the credentials for this destination\n\t\tresult = (localSession.query(\n\t\t\t\tdbimportInstances.sync_credentials\n\t\t\t)\n\t\t\t.select_from(dbimportInstances)\n\t\t\t.filter(dbimportInstances.name == destination)\n\t\t\t.one())\n\n\t\tif result[0] == 1:\n\t\t\tsyncCredentials = True\n\t\telse:\n\t\t\tsyncCredentials = False\n\n\t\t# Check if the jdbcConnection exists on the remote DBImport instance\n\t\tresult = (remoteSession.query(\n\t\t\t\tjdbcConnections\n\t\t\t)\n\t\t\t.filter(jdbcConnections.dbalias == jdbcConnection)\n\t\t\t.count())\n\n\t\tif result == 0:\n\t\t\tnewJdbcConnection = configSchema.jdbcConnections(\n\t\t\t\tdbalias = jdbcConnection,\n\t\t\t\tjdbc_url = '')\n\t\t\tremoteSession.add(newJdbcConnection)\n\t\t\tremoteSession.commit()\n\n\t\t# Read the entire import_table row from the source database\n\t\tsourceJdbcConnection = pd.DataFrame(localSession.query(configSchema.jdbcConnections.__table__)\n\t\t\t.filter(configSchema.jdbcConnections.dbalias == jdbcConnection)\n\t\t\t)\n\n\t\t# Table to update with values from import_table source\n\t\tremoteJdbcConnection = (remoteSession.query(configSchema.jdbcConnections.__table__)\n\t\t\t.filter(configSchema.jdbcConnections.dbalias == jdbcConnection)\n\t\t\t.one()\n\t\t\t)\n\n\t\t# Create dictonary to be used to update the values in import_table on the remote Instance\n\t\tupdateDict = {}\n\t\tfor name, values in sourceJdbcConnection.iteritems():\n\t\t\tif name == \"dbalias\":\n\t\t\t\tcontinue\n\n\t\t\tif syncCredentials == False and name in (\"credentials\", \"private_key_path\", \"public_key_path\"):\n\t\t\t\tcontinue\n\n\t\t\tvalue = str(values[0])\n\t\t\tif value == \"None\":\n\t\t\t\tvalue = None\n\n\t\t\tupdateDict[\"%s\"%(name)] = value \n\n\n\t\t# Update the values in jdbc_connections on the remote instance\n\t\t(remoteSession.query(configSchema.jdbcConnections)\n\t\t\t.filter(configSchema.jdbcConnections.dbalias == jdbcConnection)\n\t\t\t.update(updateDict))\n\t\tremoteSession.commit()\n\n\t\tlocalSession.close()\n\t\tremoteSession.close()",
"def setup (cls, **kwargs):\n\n cherrypy.log (\"Using PostgresSession\",\n context = 'SESSION', severity = logging.INFO)\n\n for k, v in kwargs.items ():\n setattr (cls, k, v)",
"def set_property(self, prop_name, prop_value):\n if prop_name == \"database_name\":\n if (prop_value and isinstance(prop_value, str)\n and prop_value[-3:] == \".db\"):\n self.config[\"config\"][\"database_name\"] = prop_value\n self.database_name = prop_value\n elif prop_name == \"volume_limit\":\n if isinstance(prop_value, int) and prop_value > 0:\n self.config[\"config\"][\"volume_limit\"] = str(prop_value)\n self.volume_limit = prop_value\n elif prop_name == \"series_per_page\":\n if isinstance(prop_value, int) and prop_value >= 0:\n self.config[\"config\"][\"series_per_page\"] = str(prop_value)\n self.series_per_page = prop_value\n elif prop_name == \"compact_list\":\n if ((isinstance(prop_value, int) and prop_value in [0, 1])\n or isinstance(prop_value, bool)):\n self.config[\"config\"][\"compact_list\"] = str(prop_value)\n self.compact_list = prop_value\n elif prop_name == \"show_empty_series\":\n if ((isinstance(prop_value, int) and prop_value in [0, 1])\n or isinstance(prop_value, bool)):\n self.config[\"config\"][\"show_empty_series\"] = str(prop_value)\n self.show_empty_series = prop_value\n elif prop_name == \"default_to_gui\":\n if ((isinstance(prop_value, int) and prop_value in [0, 1])\n or isinstance(prop_value, bool)):\n self.config[\"config\"][\"default_to_gui\"] = str(prop_value)\n self.default_to_gui = prop_value\n with open(self.filename, 'w') as config_ini:\n self.config.write(config_ini)",
"def connection(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def set_compression(self, compression):\n converter = geowave_pkg.datastore.redis.config.RedisOptions.CompressionConverter()\n self._java_ref.setCompression(converter.convert(compression))",
"def setup_schemaProperties(self):\n\n propname = 'xml_schema'\n curr = getattr(self, propname, '')\n try:\n self._delProperty(propname)\n except ValueError:\n pass\n try:\n delattr(self, propname)\n except:\n pass\n setattr(self, propname, curr)\n\n properties = list(self._properties)\n properties.append({'id': propname,\n 'type': 'selection',\n 'select_variable': 'get_schemaCandidates',\n 'mode': 'w'})\n\n self._properties = tuple(properties)",
"def setProperties(self, *args):\n return _libsbml.SBMLConverter_setProperties(self, *args)",
"def _setup_origin_table(self):\n if self._create_table_if_not_exists(self.dataset):\n return\n\n directory, pipeline_builder = self._directory_origin(MAX_CONCURRENCY)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=self.dataset)\n\n directory >> jdbc_producer\n\n pipeline = pipeline_builder.build().configure_for_environment(self.environments['database'])\n self.sdc_executor.add_pipeline(pipeline)\n self.sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(self.record_count, timeout_sec=LOAD_TIMEOUT)\n self.sdc_executor.stop_pipeline(pipeline)\n self.sdc_executor.remove_pipeline(pipeline)"
]
| [
"0.5649309",
"0.50222313",
"0.47574311",
"0.46739405",
"0.46268708",
"0.45842722",
"0.45746607",
"0.45746607",
"0.45746607",
"0.45319366",
"0.44661254",
"0.44294205",
"0.4381224",
"0.43291444",
"0.42857668",
"0.42779317",
"0.42766798",
"0.42484736",
"0.42474186",
"0.4227801",
"0.42005703",
"0.41525275",
"0.4138241",
"0.4136169",
"0.41349927",
"0.40938473",
"0.40856332",
"0.40833157",
"0.403837",
"0.40270293"
]
| 0.80829346 | 0 |
Sets the jdbc_driver of this JsonJdbcIngestionProperties. | def jdbc_driver(self, jdbc_driver):
self._jdbc_driver = jdbc_driver | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def set_driver(self, driver):\n self.driver = driver",
"def featurestore_jdbc_connector_connections(self, featurestore_jdbc_connector_connections):\n\n self._featurestore_jdbc_connector_connections = featurestore_jdbc_connector_connections",
"def driver(self, driver):\n\n self._driver = driver",
"def driver(self, driver):\n\n self._driver = driver",
"def set_backend(self, backend):\n self.backend = backend",
"def driver_name(self, driver_name):\n\n self._driver_name = driver_name",
"def set_datajoint_config(jwt_payload: dict):\n dj.config['database.host'] = jwt_payload['databaseAddress']\n dj.config['database.user'] = jwt_payload['username']\n dj.config['database.password'] = jwt_payload['password']\n\n dj.conn(reset=True)",
"def configure(self):\n # Defaults\n self.db_type = DB_TYPE.POSTGRES\n self.db_name = \"ambari\"\n self.db_user = \"ambari\"\n self.db_password = \"bigdata\"\n self.db_host = \"localhost\"\n self.db_url = None\n\n if os.path.exists(AMBARI_PROPERTIES_LOCATION):\n self.ambari_props = self.read_conf_file(AMBARI_PROPERTIES_LOCATION)\n\n if \"server.jdbc.database\" in self.ambari_props:\n self.db_type = self.ambari_props[\"server.jdbc.database\"].upper()\n if \"server.jdbc.database_name\" in self.ambari_props:\n self.db_name = self.ambari_props[\"server.jdbc.database_name\"]\n if \"server.jdbc.user.name\" in self.ambari_props:\n self.db_user = self.ambari_props[\"server.jdbc.user.name\"]\n if \"server.jdbc.user.passwd\" in self.ambari_props:\n self.db_password = self.read_file(self.ambari_props[\"server.jdbc.user.passwd\"])\n if \"server.jdbc.hostname\" in self.ambari_props:\n self.db_host = self.ambari_props[\"server.jdbc.hostname\"]\n if \"server.jdbc.url\" in self.ambari_props:\n self.db_url = self.ambari_props[\"server.jdbc.url\"]\n if \"ambari-server.user\" in self.ambari_props:\n self.ambari_server_user = self.ambari_props[\"ambari-server.user\"]\n\n #Logger.info(\"Using database type: {0}, name: {1}, host: {2}\".format(self.db_type, self.db_name, self.db_host))\n connection_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(self.db_name, self.db_user, self.db_host, self.db_password)\n\n if self.db_type == DB_TYPE.POSTGRES:\n try:\n import psycopg2 # covered by GNU Lesser General Public License\n except Exception, e:\n Logger.error(\"Need to install python-psycopg2 package for Postgres DB. E.g., yum install python-psycopg2\\n\")\n self.terminate()\n elif self.db_type == DB_TYPE.MYSQL:\n try:\n import pymysql # covered by MIT License\n except Exception, e:\n Logger.error(\"Need to install PyMySQL package for Python. E.g., yum install python-setuptools && easy_install pip && pip install PyMySQL\\n\")\n self.terminate()\n else:\n Logger.error(\"Unknown database type: {0}.\".format(self.db_type))\n self.terminate()\n\n self.conn = None\n self.cursor = None\n try:\n Logger.debug(\"Initializing database connection and cursor.\")\n if self.db_type == DB_TYPE.POSTGRES:\n self.conn = psycopg2.connect(connection_string)\n self.cursor = self.conn.cursor()\n elif self.db_type == DB_TYPE.MYSQL:\n self.conn = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_name)\n self.cursor = self.conn.cursor()\n\n Logger.debug(\"Created database connection and cursor.\")\n self.cursor.execute(\"SELECT metainfo_key, metainfo_value FROM metainfo WHERE metainfo_key='version';\")\n rows = self.cursor.fetchall()\n if rows and len(rows) == 1:\n self.ambari_version = rows[0][1]\n # Logger.info(\"Connected to database!!! Ambari version is {0}\\n\".format(self.ambari_version))\n\n # Must be Ambari 2.0.0 or higher\n if self.compare_versions(self.ambari_version, MIN_AMBARI_VERSION) < 0:\n Logger.error(\"Must be running Ambari Version {0} or higher.\\n\".format(MIN_AMBARI_VERSION))\n self.terminate()\n else:\n Logger.error(\"Unable to determine Ambari version.\")\n self.terminate()\n\n self.set_cluster()\n except Exception, e:\n Logger.error(\"I am unable to connect to the database. Error: {0}\\n\".format(e))\n self.terminate()\n else:\n raise Exception(\"Could not find file {0}\".format(AMBARI_PROPERTIES_LOCATION))",
"def set_connection(scheme):\n sqlhub.processConnection = connectionForURI(scheme)",
"def remove_jdbc_driver(self, feed_id, server_id, driver_name):\n payload = {\"resourcePath\": \"/t;{}/f;{}/r;{}~%2Fsubsystem%3Ddatasources%2Fjdbc-driver%3D{}\"\n .format(self.tenant_id, feed_id, server_id, driver_name)}\n return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name=\"RemoveJdbcDriver\",\n payload=payload)",
"def set_connection(self, **kwargs):\n if self.schema is None:\n self.conn = psycopg2.connect(\n host=self.host,\n port=self.port,\n user=self.user,\n password=self.password,\n dbname=self.db_name,\n **self.kwargs)\n else:\n self.conn = psycopg2.connect(\n host=self.host,\n port=self.port,\n user=self.user,\n password=self.password,\n dbname=self.db_name,\n options=f'--search_path={self.schema}',\n **self.kwargs)",
"def set_backend(self, backend):\n if backend not in AVAILABLE_BACKENDS:\n raise StorageError(f'Unrecognized backend {backend}; use one of {AVAILABLE_BACKENDS}')\n if backend == 'tinydb':\n LOGGER.debug(\"Using TinyDB database as requested for %s\", self.name)\n self._backend = DB_TINYDB\n elif backend == 'sqlite':\n LOGGER.debug(\"Using SQLite database as requested for %s\", self.name)\n self._backend = DB_SQLITE\n elif backend == 'auto':\n if self._sqlite_storage.database_exists():\n LOGGER.debug(\"Using SQLite database in AUTO mode because one already exists for %s\", self.name)\n self._backend = DB_SQLITE\n else:\n LOGGER.debug(\"Using TinyDB (default) in AUTO because no database already exists for %s\", self.name)\n self._backend = DB_TINYDB",
"def get_usas_jdbc_url():\n if not CONFIG.DATABASE_URL:\n raise ValueError(\"DATABASE_URL config val must provided\")\n\n return get_jdbc_url_from_pg_uri(CONFIG.DATABASE_URL)",
"def __init__(__self__, *,\n driver: Optional[pulumi.Input[str]] = None):\n if driver is not None:\n pulumi.set(__self__, \"driver\", driver)",
"def driver_username(self, driver_username):\n\n self._driver_username = driver_username",
"def driver(self) -> GraphDatabase.driver:\n raise NotImplementedError\n # if not self._driver:\n # self._driver = GraphDatabase.driver(\n # self.url,\n # auth=(self.username, self.password),\n # )\n #\n # return self._driver",
"def set_backend(*backend):\n global _BACKEND\n if not backend:\n raise ValueError('Need at least one backend.')\n _BACKEND = backend",
"def add_jdbc_driver(self, feed_id, server_id, driver_name, module_name,\n driver_class, driver_jar_name=None, binary_content=None,\n binary_file_location=None):\n if driver_jar_name and not binary_content and not binary_file_location:\n raise KeyError(\"If 'driver_jar_name' field is set the jar file must be passed\"\n \" as binary or file location\")\n resource_path = \"/t;{}/f;{}/r;{}~~\".format(self.tenant_id, feed_id, server_id)\n payload = {\"resourcePath\": resource_path, \"driverJarName\": driver_jar_name,\n \"driverName\": driver_name, \"moduleName\": module_name,\n \"driverClass\": driver_class}\n return self.cmd_gw_ws_api.hwk_invoke_operation(operation_name=\"AddJdbcDriver\",\n payload=payload,\n binary_file_location=binary_file_location,\n binary_content=binary_content)",
"def __init__(__self__, *,\n driver: pulumi.Input[str]):\n pulumi.set(__self__, \"driver\", driver)",
"def set_compression(self, compression):\n converter = geowave_pkg.datastore.redis.config.RedisOptions.CompressionConverter()\n self._java_ref.setCompression(converter.convert(compression))",
"def configure_driver(self, config):\n raise NotImplementedError",
"def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']",
"def set_schema_class(self, schema):\n self.schema_class = schema",
"def set_schema(self, schema):\r\n self.__schema = schema",
"def set_sampling(self, sampling: str = 'training'):\n self._sampling_accessor = sampling\n with open(self._inputPath + '/../sampling.json', encoding='utf-8') as data_file:\n self._sampling = json.load(data_file)[sampling]\n return self",
"def set_connection(self, connection, **connect_kwargs):\n if connection and connect_kwargs:\n raise DBSchemaError(\n 'Either connection or connect_kwargs is allowed, not both.')\n elif connection:\n self._conn = connection\n self._conn_kwargs = None\n else:\n self._conn = None\n self._conn_kwargs = connect_kwargs",
"def defaultDriver(self):\n return Enums.SQLite3",
"def SetSerializersProfiler(self, serializers_profiler):\n self._serializers_profiler = serializers_profiler\n if self._storage_file:\n self._storage_file.SetSerializersProfiler(serializers_profiler)",
"def driver(self, cores=1, memory=None, extraClassPath=None):\n if cores:\n self.sparkProperties[SparkProperties.SPARK_DRIVER_CORES] = cores\n if memory:\n self.sparkProperties[SparkProperties.SPARK_DRIVER_MEMORY] = memory\n if extraClassPath:\n self.sparkProperties[SparkProperties.SPARK_DRIVER_EXTRACLASSPATH] = extraClassPath\n return self"
]
| [
"0.59748155",
"0.5136365",
"0.4919816",
"0.4734082",
"0.4734082",
"0.44693297",
"0.44617736",
"0.44019923",
"0.43831828",
"0.43056816",
"0.43017954",
"0.43000233",
"0.42807668",
"0.42476252",
"0.41980818",
"0.4153368",
"0.41307756",
"0.4118861",
"0.40781423",
"0.40676874",
"0.40599054",
"0.40424943",
"0.39716944",
"0.39572835",
"0.39501914",
"0.39067236",
"0.3883478",
"0.38702768",
"0.38283923",
"0.38035783"
]
| 0.73837024 | 0 |
Sets the cron_expression of this JsonJdbcIngestionProperties. | def cron_expression(self, cron_expression):
self._cron_expression = cron_expression | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cron_time_zone(self, cron_time_zone):\n\n self._cron_time_zone = cron_time_zone",
"def cron(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cron\")",
"def cron(self):\n return",
"def __init__(__self__, *,\n cron_schedule: Optional[pulumi.Input[str]] = None,\n paused: Optional[pulumi.Input[bool]] = None):\n if cron_schedule is not None:\n pulumi.set(__self__, \"cron_schedule\", cron_schedule)\n if paused is not None:\n pulumi.set(__self__, \"paused\", paused)",
"def cron(self):\n hour, day = (self.pk // 30) % 24, self.pk % 30 + 1\n return f\"0 {hour} {day} * *\"",
"def expression(self, expr):\n self.set(expression=expr)",
"def __setRewriteTimestamp(self, expr):\n self.rewriteTimestamp = expr",
"def expression(self, expression):\n\n self._expression = expression",
"async def _start_cron_task(self):\n pass",
"def schedule_expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schedule_expression\")",
"def schedule_expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schedule_expression\")",
"def cronjobs():\n cj.update_cronjob_db()",
"def _configure_sphinx_cron(self):\n logger.info(\"Configuring sphinx index-building cronjobs\")\n\n # Ensure that the cron jobs for re-indexing are configured\n script_path = '/var/lib/sphinxsearch/policystat_indexer.sh'\n cron_location = '/etc/cron.d/policystat_sphinx'\n\n context = {\n 'indexer_script': script_path,\n 'log_dir': '/var/log/pstat',\n }\n with hide(*fab_output_hides):\n upload_template(\n \"../config/tpl/sphinx/cron.d/policystat_sphinx\",\n cron_location,\n context,\n use_sudo=True)\n sudo('chown root:root %s' % cron_location)",
"def modify_minute_job_schedule(self):\n job_schedule_modify_minute = netapp_utils.zapi\\\n .NaElement.create_node_with_children(\n 'job-schedule-cron-modify',\n **{'job-schedule-name': self.name})\n job_schedule_modify_minute.add_node_with_children(\n 'job-schedule-cron-minute',\n **{'cron-minute': str(self.job_minutes)})\n try:\n self.server.invoke_successfully(job_schedule_modify_minute,\n enable_tunneling=True)\n except netapp_utils.zapi.NaApiError as error:\n self.module.fail_json(msg='Error modifying job schedule %s: %s'\n % (self.name, to_native(error)),\n exception=traceback.format_exc())",
"def schedule_expression(self) -> Optional[str]:\n return pulumi.get(self, \"schedule_expression\")",
"def schedule_expression(self) -> Optional[str]:\n return pulumi.get(self, \"schedule_expression\")",
"def enableCron():\n console_write(\"[IRQ] CRON IRQ SETUP: {} SEQ: {}\".format(cfgget(cfgget('cron')), cfgget(\"cronseq\")))\n console_write(\"|- [IRQ] CRON CBF:{}\".format(cfgget('crontasks')))\n if cfgget(\"cron\") and cfgget('crontasks').lower() != 'n/a':\n from machine import Timer\n # INIT TIMER 1 IRQ with callback function wrapper\n timer = Timer(1)\n timer.init(period=int(cfgget(\"cronseq\")), mode=Timer.PERIODIC,\n callback=lambda timer: timirq_cbf_sched(cfgget('crontasks'), int(cfgget(\"cronseq\")/1000)))",
"def add_cron_job(nondated_url, curr_date):\n user = os.environ.get('USER', '')\n if user == '':\n usage(\"no USER env var set.\")\n api_key = os.environ.get('MTA_API_KEY', '')\n if api_key == '':\n usage(\"no MTA_API_KEY env var set. Please add `export MTA_API_KEY=<KEY>` to .bashrc/.zshrc and try again.\")\n if platform.system().startswith('Windows'):\n cron = CronTab(tab=\"\"\"50 23 * * * pyenv activate mta && python {}/cron.py {} {} {} {} {} {}\"\"\".format(os.getcwd(), api_key, curr_date, BASE_DIR, REALTIME_COLOR_TO_FEEDID[LINE], LINE, STATS_FILENAME))\n cron.write()\n else:\n cron = CronTab(user=\"{}\".format(user))\n job = cron.new(command=\"pyenv activate mta && python {}/cron.py {} {} {} {} {} {}\".format(os.getcwd(), api_key, curr_date, BASE_DIR, REALTIME_COLOR_TO_FEEDID[LINE], LINE, STATS_FILENAME), comment=\"mta_downloader-{}\".format(FILENAME_TS))\n job.setall('50 23 * * *')\n cron.write()",
"def set_eval(self):\n self.eval()\n self.volatile = True\n self.scheduled_sampling = False",
"def __call__(self, expression):\n self.set_expression(expression)",
"def cron_validator(value: str) -> str:\n try:\n croniter(value)\n return value\n except ValueError as e:\n raise ValueError(f\"Invalid cron expression: {e}\")",
"def setScheduleRate(self, rate, unit='hz'):\n DPxSetDinSchedRate(rate, unit)",
"def set_irrigation_schedule(schedule):\n global irrigation_schedule\n\n # Read and parse the properties file.\n f = open(FILE_PROPERTIES)\n try:\n data = json.loads(f.read())\n except JSONDecodeError:\n data = {}\n finally:\n f.close()\n\n # Parse the given schedule.\n try:\n sch_json = json.loads(schedule)\n except JSONDecodeError:\n sch_json = {}\n\n data[PROP_SCHEDULE] = sch_json[PROP_SCHEDULE]\n\n # Write the file with the new schedule.\n f = open(FILE_PROPERTIES, \"w\")\n f.write(json.dumps(data))\n f.close()\n\n irrigation_schedule = data[PROP_SCHEDULE]\n\n print_log(\"Changed the irrigation schedule: {}\".format(irrigation_schedule))",
"def __init_auto_update_for_yum_cron(self):\n self.os_patch_configuration_settings_file_path = self.yum_cron_configuration_settings_file_path\n self.download_updates_identifier_text = self.yum_cron_download_updates_identifier_text\n self.apply_updates_identifier_text = self.yum_cron_apply_updates_identifier_text\n self.enable_on_reboot_identifier_text = self.yum_cron_enable_on_reboot_identifier_text\n self.installation_state_identifier_text = self.yum_cron_installation_state_identifier_text\n self.auto_update_config_pattern_match_text = self.yum_cron_config_pattern_match_text\n self.enable_on_reboot_check_cmd = self.yum_cron_enable_on_reboot_check_cmd\n self.install_check_cmd = self.yum_cron_install_check_cmd\n self.current_auto_os_update_service = Constants.YumAutoOSUpdateServices.YUM_CRON",
"def regexp(self, regexp):\n\n self._regexp = regexp",
"def _schedule(self):\n return self._event['schedule_expression']",
"def cron_process(self):\n raise CronException(\"Base {} method called. Must be overriden.\".format(self.__class__))",
"def _configure_scheduler(self, scheduler: Scheduler, callback: Callable[[], None]) -> None:\n if self.is_cron:\n # Scheduler always executes at the exact minute to check for cron triggering\n scheduler.every().minute.at(\":00\").do(callback)\n else:\n # Only activate when an interval is specified\n # If not the only way is to trigger the poll by the api `trigger` endpoint\n if self._poll_interval:\n # Scheduler executes every interval seconds to execute the poll\n scheduler.every(self._poll_interval).seconds.do(callback)",
"def set_expression(self, expression):\n\n self.raw_expression = expression\n\n # Check if expression contains initial mode\n splited = self.raw_expression.split(\":\")\n if len(splited) == 1:\n # CHECKING new expression validity\n self.check_syntax(isInitialSet=False)\n # string contains initial mode\n self.initial_mode = None\n # no initial mode is provided(or maybe it's unsafe set, doesn't need a initial mode)\n self.aMatrix, self.bMatrix, self.eqMatrix = self.raw_to_matrix()\n else:\n # CHECKING new expression validity\n self.check_syntax(isInitialSet=True)\n # string contains initial mode\n self.initial_mode = splited[0].strip()\n self.raw_expression = splited[1].strip()\n # convert the expression to Matrix\n self.aMatrix, self.bMatrix, self.eqMatrix = self.raw_to_matrix()\n \n if self.initial_mode is not None:\n # if expression has mode information, then it must be initial set, so boundedness has to be checked\n self.check_boundedness()",
"def get_crons(self) -> dict:\n uri = f\"{self.uri}/crons\"\n\n response = self.request(uri=uri)\n return response.json()"
]
| [
"0.6386084",
"0.5834923",
"0.5414254",
"0.53603417",
"0.52862346",
"0.519446",
"0.5114584",
"0.511273",
"0.5026007",
"0.48927107",
"0.48927107",
"0.4794293",
"0.47415823",
"0.46956754",
"0.46733984",
"0.46733984",
"0.46656173",
"0.45883653",
"0.45881572",
"0.45879054",
"0.45830813",
"0.4577824",
"0.4572033",
"0.45565972",
"0.45348313",
"0.45114157",
"0.44611976",
"0.44608295",
"0.44069928",
"0.43988636"
]
| 0.7999758 | 0 |
Sets the cron_time_zone of this JsonJdbcIngestionProperties. | def cron_time_zone(self, cron_time_zone):
self._cron_time_zone = cron_time_zone | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cron_expression(self, cron_expression):\n\n self._cron_expression = cron_expression",
"def time_zone(self, time_zone):\n\n self._time_zone = time_zone",
"def set_timezone(self, to_tz):\n self.startdate = to_tz.localize(self.startdate.replace(tzinfo=None))\n self.enddate = to_tz.localize(self.enddate.replace(tzinfo=None))\n self.timezone = to_tz",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def timezone(self, timezone):\n\n self._timezone = timezone",
"def set_timezone(conn, timezone):\n with Tx(conn) as c:\n c.execute('SET timezone = %s', (timezone,))",
"def __init__(__self__, *,\n cron_schedule: Optional[pulumi.Input[str]] = None,\n paused: Optional[pulumi.Input[bool]] = None):\n if cron_schedule is not None:\n pulumi.set(__self__, \"cron_schedule\", cron_schedule)\n if paused is not None:\n pulumi.set(__self__, \"paused\", paused)",
"def sync_timezone(self, sync_timezone):\n\n self._sync_timezone = sync_timezone",
"def _timezone_observer(self, timezone):\n if timezone:\n self.timezone = timezone\n for assignment in self.assignments:\n assignment.timezone = timezone",
"def time_zone(self, time_zone):\n # type: (string_types) -> None\n\n if time_zone is not None:\n if not isinstance(time_zone, string_types):\n raise TypeError(\"Invalid type for `time_zone`, type has to be `string_types`\")\n\n self._time_zone = time_zone",
"def set(self, tzone):\n\t\t\n\t\tif self.no_dbus: return\n\t\t\n\t\tself.TimeZone.SetTimezone(\n\t\t\t'(sb)',\n\t\t\ttzone,\n\t\t\tTrue # User interaction\n\t\t)",
"def set_time_zone(self, tz_str): # TODO Figure out how to implement this as a nonfeature property\n if tz_str in pytz.all_timezones:\n self._data['timezone'] = tz_str\n else:\n raise InvalidTimezone()",
"def __init__(self, *args, **kwargs):\n\n timezone = kwargs.pop('timezone', False)\n self._column_type = Time(timezone=timezone)\n\n super().__init__(*args, **kwargs)",
"def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)",
"async def svc_set_zone_schedule(self, schedule: str, **kwargs) -> None:\n await self._device.set_schedule(json.loads(schedule))",
"def set_timezone(tz=None, deploy=False):\n\n if not tz:\n raise CommandExecutionError(\"Timezone name option must not be none.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/timezone\",\n \"element\": \"<timezone>{}</timezone>\".format(tz),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret",
"def __init__(self, *args, **kwargs):\n\n timezone = kwargs.pop('timezone', True)\n self._column_type = CoreDateTime(timezone=timezone)\n\n super().__init__(*args, **kwargs)",
"def cron(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cron\")",
"def __init__(self, *args, **kwargs):\n\n timezone = kwargs.pop('timezone', True)\n self._column_type = CoreTimeStamp(timezone=timezone)\n\n super().__init__(*args, **kwargs)",
"def cron(self):\n return",
"def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data",
"def set_time_by_timezone(df):\n df = set_city_time_by_timezone(df, 1078, 3)\n df = set_city_time_by_timezone(df, 22390, 4)\n df = set_city_time_by_timezone(df, 22430, 4)\n df = set_city_time_by_timezone(df, 22438, 5)\n return df",
"async def _start_cron_task(self):\n pass",
"def query_timezone(self, query_timezone):\n\n self._query_timezone = query_timezone",
"def set_timezone():\n tz_name = request.vars.name\n # Validates the name.\n from pytz import all_timezones_set\n if tz_name in all_timezones_set:\n session.user_timezone = tz_name\n # If the user is logged in, sets also the timezone for the user.\n # Otherwise, it can happen that a user expires a cookie, then click on edit.\n # When the user is presented the edit page, the translation is done according to UTC,\n # but when the user is done editing, due to autodetection, the user is then in\n # it's own time zone, and the dates of an assignment change.\n # This really happened.\n if auth.user is not None:\n db.auth_user[auth.user.id] = dict(user_timezone = tz_name)\n logger.info(\"Set timezone to: %r\" % tz_name)\n else:\n logger.warning(\"Invalid timezone received: %r\" % tz_name)",
"def set_timezone():\n tz_name = request.vars.name\n # Validates the name.\n from pytz import all_timezones_set\n if tz_name in all_timezones_set:\n session.user_timezone = tz_name\n # If the user is logged in, sets also the timezone for the user.\n # Otherwise, it can happen that a user expires a cookie, then click on edit.\n # When the user is presented the edit page, the translation is done according to UTC,\n # but when the user is done editing, due to autodetection, the user is then in\n # it's own time zone, and the dates of an assignment change.\n # This really happened.\n if auth.user is not None:\n db.auth_user[auth.user.id] = dict(user_timezone = tz_name)\n logger.info(\"Set timezone to: %r\" % tz_name)\n else:\n logger.warning(\"Invalid timezone received: %r\" % tz_name)",
"def set_irrigation_schedule(schedule):\n global irrigation_schedule\n\n # Read and parse the properties file.\n f = open(FILE_PROPERTIES)\n try:\n data = json.loads(f.read())\n except JSONDecodeError:\n data = {}\n finally:\n f.close()\n\n # Parse the given schedule.\n try:\n sch_json = json.loads(schedule)\n except JSONDecodeError:\n sch_json = {}\n\n data[PROP_SCHEDULE] = sch_json[PROP_SCHEDULE]\n\n # Write the file with the new schedule.\n f = open(FILE_PROPERTIES, \"w\")\n f.write(json.dumps(data))\n f.close()\n\n irrigation_schedule = data[PROP_SCHEDULE]\n\n print_log(\"Changed the irrigation schedule: {}\".format(irrigation_schedule))"
]
| [
"0.5902269",
"0.5746598",
"0.5613646",
"0.5612734",
"0.5612734",
"0.5612734",
"0.5612734",
"0.5612734",
"0.55559283",
"0.53948164",
"0.52916694",
"0.5215786",
"0.51940787",
"0.51104444",
"0.50565326",
"0.5027158",
"0.50026774",
"0.4984067",
"0.4952581",
"0.48982117",
"0.4888295",
"0.48741975",
"0.48697048",
"0.4841611",
"0.46813244",
"0.46518233",
"0.4527649",
"0.4496219",
"0.4496219",
"0.44840866"
]
| 0.83452237 | 0 |
Sets the execute_profiling of this JsonJdbcIngestionProperties. | def execute_profiling(self, execute_profiling):
self._execute_profiling = execute_profiling | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def setProfileJobs(self,profile=False):\n self.__profileJobs = profile",
"def execution_profile(self):\n raise NotImplementedError(\n method_name=\"execution_profile\",\n class_name=type(self))",
"def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])",
"def execute_info(self, execute_info):\n self._execute_info = execute_info",
"def set_statistics(self, statistics, asset=None):\n if statistics is not None:\n statistics = [s.to_dict() for s in statistics]\n self._set_property('pc:statistics', statistics, asset)",
"def set_profiler_result(\n self,\n profiler_result: 'profile.ProfilerResult',\n i_par: int,\n profile_list: int = None) -> None:\n if profile_list is None:\n profile_list = -1 # last\n self.list[profile_list][i_par] = copy.deepcopy(profiler_result)",
"def SetSerializersProfiler(self, serializers_profiler):\n self._serializers_profiler = serializers_profiler\n if self._storage_file:\n self._storage_file.SetSerializersProfiler(serializers_profiler)",
"def SetStorageProfiler(self, storage_profiler):\n self._storage_profiler = storage_profiler\n if self._storage_file:\n self._storage_file.SetStorageProfiler(storage_profiler)",
"def _enable_profiling():\n import cProfile\n import atexit\n global _profiler\n _profiler = cProfile.Profile()\n _profiler.enable()\n atexit.register(_profile_atexit)",
"def watch_profile(self):\r\n profile_parser = ProfileParser()\r\n databases = self._get_requested_databases()\r\n connection = pymongo.MongoClient(self._db_uri,\r\n document_class=OrderedDict,\r\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)\r\n enabled_profile = False\r\n\r\n if databases == []:\r\n try:\r\n databases = connection.database_names()\r\n except:\r\n message = \"Error: Could not list databases on server. Please \" \\\r\n + \"check the auth components of your URI.\\n\"\r\n sys.stderr.write(message)\r\n databases = []\r\n\r\n for ignore_db in IGNORE_DBS:\r\n if ignore_db in databases:\r\n databases.remove(ignore_db)\r\n\r\n if len(databases) != 1:\r\n message = \"Error: Please use namespaces (-n) to specify a single \" \\\r\n + \"database for profile watching.\\n\"\r\n sys.stderr.write(message)\r\n return 1\r\n\r\n database = databases[0]\r\n db = connection[database]\r\n\r\n initial_profile_level = db.profiling_level()\r\n\r\n if initial_profile_level is pymongo.OFF:\r\n message = \"Profile level currently 0. Dex is setting profile \" \\\r\n + \"level 1. To run --watch at profile level 2, \" \\\r\n + \"enable profile level 2 before running Dex.\\n\"\r\n sys.stderr.write(message)\r\n db.set_profiling_level(DEFAULT_PROFILE_LEVEL)\r\n\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n try:\r\n for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):\r\n self._process_query(profile_entry,\r\n profile_parser)\r\n if time.time() >= output_time:\r\n self._output_aggregated_report(sys.stderr)\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n except KeyboardInterrupt:\r\n sys.stderr.write(\"Interrupt received\\n\")\r\n finally:\r\n self._output_aggregated_report(sys.stdout)\r\n if initial_profile_level is pymongo.OFF:\r\n message = \"Dex is resetting profile level to initial value \" \\\r\n + \"of 0. You may wish to drop the system.profile \" \\\r\n + \"collection.\\n\"\r\n sys.stderr.write(message)\r\n db.set_profiling_level(initial_profile_level)\r\n\r\n return 0",
"def parse_json_profile(self, **kwargs):\n \n print(\"[%] Setting attributes from JSON Profile\")\n # This snippet takes the keys ignoring the first key which is task and then shows\n # what should be set in the kwargs parsing. \n print(f\"[-] The following keys are needed for this task : {[x for x in list(kwargs.keys())[1:]]}\")\n\n try:\n self.database_location = kwargs[\"database_location\"]\n self.masterpassword = kwargs[\"masterpassword\"]\n \n print(f\"[*] Setting the command attribute : {self.database_location}\")\n print(f\"[*] Setting the command attribute : {self.masterpassword}\")\n\n \n except:\n print(self.cl.red(\"[!] Error Setting JSON Profile attributes, check matching key values in the profile\"))\n\n # once these have all been set in here, then self.create_autoIT_block() gets called which pushes the task on the stack\n self.create_autoIT_block()",
"def SetPerfProfilingMode(self):\n self._ForceAllCpusOnline(True)\n self._SetScalingGovernorInternal('performance')\n if not self._AllCpusAreOnline():\n if not self._device.old_interface.IsRootEnabled():\n raise RuntimeError('Need root to force CPUs online.')\n raise RuntimeError('Failed to force CPUs online.')",
"def create_ipu_config(profiling=False,\n enable_ipu_events=False,\n use_poplar_text_report=False,\n use_poplar_cbor_report=False,\n profile_execution=False,\n report_every_nth_execution=0,\n max_report_size=0x10000000,\n report_directory=\"\",\n always_rearrange_copies_on_the_host=False,\n merge_infeed_io_copies=False,\n disable_graph_convolution_caching=False,\n retain_control_dependencies=False,\n max_cross_replica_sum_buffer_size=0):\n if profiling and enable_ipu_events:\n raise Exception(\n \"`profiling` and `enable_ipu_events` are mutually exclusive\")\n\n if profile_execution and not profiling:\n raise Exception(\"`profiling` is required when `profile_execution` is set\")\n\n opts = IpuOptions()\n opts.ipu_model_config.enable_ipu_model = True\n opts.ipu_model_config.compile_ipu_code = True\n\n opts.profiling.enable_ipu_trace_events = profiling or enable_ipu_events\n opts.profiling.enable_compilation_trace = profiling\n opts.profiling.enable_io_trace = profiling\n opts.profiling.enable_execution_trace = profiling and profile_execution\n opts.profiling.enable_poplar_reports_text = use_poplar_text_report\n opts.profiling.enable_poplar_reports_cbor = use_poplar_cbor_report\n opts.profiling.report_every_nth_execution = report_every_nth_execution\n opts.profiling.max_report_size = max_report_size\n opts.profiling.report_directory = report_directory\n\n opts.speed_size_config.always_rearrange_copies_on_the_host = always_rearrange_copies_on_the_host\n opts.speed_size_config.merge_infeed_io_copies = merge_infeed_io_copies\n opts.speed_size_config.disable_graph_convolution_caching = disable_graph_convolution_caching\n\n opts.retain_control_dependencies = retain_control_dependencies\n opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size\n\n return opts",
"def set_profile(self, profile: str):\n self._profile = profile",
"def update_profile(self, df_series, sample_size=None,\n min_true_samples=None, sample_ids=None,\n pool=None):\n if not sample_size:\n sample_size = len(df_series)\n if not sample_size:\n sample_size = self._get_sample_size(df_series)\n if not min_true_samples:\n min_true_samples = self._min_true_samples\n\n clean_sampled_df, base_stats = \\\n self.clean_data_and_get_base_stats(\n df_series=df_series, sample_size=sample_size,\n null_values=self._null_values,\n min_true_samples=min_true_samples, sample_ids=sample_ids)\n\n self._update_base_stats(base_stats)\n self.update_column_profilers(clean_sampled_df, pool)",
"def set_authentication_profile(profile=None, deploy=False):\n\n if not profile:\n raise CommandExecutionError(\"Profile name option must not be none.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/\"\n \"authentication-profile\"\n ),\n \"element\": \"<authentication-profile>{}</authentication-profile>\".format(\n profile\n ),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret",
"def update_profile(self, data, sample_size=None, min_true_samples=None):\n encoding = None\n file_type = None\n\n if min_true_samples is not None \\\n and not isinstance(min_true_samples, int):\n raise ValueError('`min_true_samples` must be an integer or `None`.')\n\n if isinstance(data, data_readers.base_data.BaseData):\n encoding = data.file_encoding\n file_type = data.data_type\n data = data.data\n elif isinstance(data, self._allowed_external_data_types):\n file_type = str(data.__class__)\n else:\n raise TypeError(\n f\"Data must either be imported using the data_readers or using \"\n f\"one of the following: {self._allowed_external_data_types}\"\n )\n\n if not len(data):\n warnings.warn(\"The passed dataset was empty, hence no data was \"\n \"profiled.\")\n return\n\n # set sampling properties\n if not min_true_samples:\n min_true_samples = self._min_true_samples\n if not sample_size:\n sample_size = self._get_sample_size(data)\n\n self._update_profile_from_chunk(data, sample_size, min_true_samples)\n\n # set file properties since data will be processed\n if encoding is not None:\n self.encoding = encoding\n if file_type is not None:\n self.file_type = file_type",
"def set_bindpoint(self, bindpoint):\n self.options['bindpoint'] = bindpoint",
"def magic_profile(self, parameter_s=''):\n if self.rc.profile:\n printpl('Current IPython profile: $self.rc.profile.')\n else:\n print 'No profile active.'",
"def execution_type(self, execution_type):\n self._execution_type = execution_type",
"def enable_execute_command(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_execute_command\")",
"def plogi_settings(self, plogi_settings):\n\n self._plogi_settings = plogi_settings",
"def set_execution_mode(frame, mode, recursive=False):\n if isinstance(frame, (pd.Series, pd.DataFrame)):\n frame = frame._query_compiler._modin_frame\n frame._force_execution_mode = mode\n if recursive and hasattr(frame._op, \"input\"):\n for child in frame._op.input:\n set_execution_mode(child, mode, True)",
"def set_print_statements(self, print_flag: bool) -> None:\n if print_flag:\n self._print_statements_enabled = print_flag\n else:\n self._print_statements_enabled = print_flag",
"def jdbc_driver(self, jdbc_driver):\n\n self._jdbc_driver = jdbc_driver",
"def __setattr__(self, name, value):\n if name == 'profiles':\n self._coveragepy_data = None\n super(MergedProfiles, self).__setattr__(name, value)",
"def set_visualization(visualization):\n Visualization.__current_visualization = visualization",
"def add_job_property(jobname, jproperty, value, dumpruninfo):\n if \"jobs\" not in dumpruninfo:\n dumpruninfo[\"jobs\"] = {}\n if jobname not in dumpruninfo[\"jobs\"]:\n dumpruninfo[\"jobs\"][jobname] = {}\n dumpruninfo[\"jobs\"][jobname][jproperty] = value",
"def save_profiles(self, fout, save_hybrid_meta=True):\n\n self._init_h5_out(fout, save_hybrid_meta=save_hybrid_meta)\n self._write_h5_out(fout, save_hybrid_meta=save_hybrid_meta)"
]
| [
"0.4877867",
"0.47615573",
"0.46750104",
"0.46304145",
"0.4542346",
"0.44202763",
"0.4358259",
"0.41864684",
"0.41732705",
"0.41306156",
"0.41144234",
"0.41056332",
"0.40532285",
"0.4051517",
"0.40302408",
"0.40086052",
"0.40018988",
"0.39283058",
"0.3907618",
"0.38800254",
"0.38791493",
"0.387477",
"0.38524592",
"0.38328",
"0.3818116",
"0.3806578",
"0.3790663",
"0.37720403",
"0.37652117",
"0.3761921"
]
| 0.7147761 | 0 |
Sets the extract_data_sample of this JsonJdbcIngestionProperties. | def extract_data_sample(self, extract_data_sample):
self._extract_data_sample = extract_data_sample | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_sample_data_extracted(self, is_sample_data_extracted):\n self._is_sample_data_extracted = is_sample_data_extracted",
"def set_sampling(self, sampling: str = 'training'):\n self._sampling_accessor = sampling\n with open(self._inputPath + '/../sampling.json', encoding='utf-8') as data_file:\n self._sampling = json.load(data_file)[sampling]\n return self",
"def sample_url(self, sample_url: str):\n\n self._sample_url = sample_url",
"def is_sample_data_extracted(self):\n return self._is_sample_data_extracted",
"def SetSampleParameters(self, data):\n self._SetParameters(data, 'SetSampleParameters')",
"def set_additionaldata_extractor(self, extractor):\r\n if not extractor:\r\n raise ValueError(\"extractor must not be null!\")\r\n self.additional_data_extractor = extractor",
"def sample_file(self, sample_file: str):\n\n self._sample_file = sample_file",
"def process_extract_samples(samples_to_extract):\r\n prefs = {}\r\n\r\n if samples_to_extract:\r\n samples = samples_to_extract.strip().strip(\"'\").split(',')\r\n\r\n for j, col in enumerate(samples):\r\n key = str(j)\r\n prefs[key] = {}\r\n prefs[key] = col\r\n\r\n return prefs",
"def process_extract_samples(samples_to_extract):\n prefs = {}\n\n if samples_to_extract:\n samples = samples_to_extract.strip().strip(\"'\").split(',')\n\n for j, col in enumerate(samples):\n key = str(j)\n prefs[key] = {}\n prefs[key] = col\n\n return prefs",
"def setSampleTime(self, sample_time):\n self.sample_time = sample_time",
"def setup(self):\n self.rows = test_helpers.fetch_sample_teradata_rows()\n self.csv_path = 'not/a/real/path'",
"def setSampleTime(self, sample_time):\r\n self.sample_time = sample_time",
"def setSampleTime(self, sample_time):\n\t\tself.sample_time = sample_time",
"def post_process_sample(self, sample: DataSample) -> DataSample:\n return sample",
"def setSampleId(self, value):\n return self.getDbRecord().setColumnValue(SAMPLE_ID_COLUMN, value)",
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def sample_data(self) -> Dict[str, Any]:\n return self._sample_data",
"def data_aggregation_setting(self, data_aggregation_setting):\n\n self._data_aggregation_setting = data_aggregation_setting",
"def set_sample(self, sample):\n self.reset()\n\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = (sample.substrate, index)",
"def sample_file_content(self, sample_file_content: DownloadableDataFileContentInterface):\n\n self._sample_file_content = sample_file_content",
"def sample_train(self, sample_frac):\n df_tr = self.get_dataset_type_df('train')\n original_col_cnt = len(df_tr)\n # Set the test records aside\n df_te = self.get_dataset_type_df('test')\n df_tr = df_tr.sample(frac=sample_frac)\n self.df = pd.concat([df_tr, df_te])\n logging.info(\"Sampled training set from {} to {} rows, fraction={:0.1%}\".format(original_col_cnt, len(df_tr), len(df_tr)/original_col_cnt))",
"def set_sample_action(self, sample_action):\n\n self.sample_action = sample_action",
"def sample_type(self, sample_type: str):\n if sample_type is None:\n raise ValueError(\"Invalid value for `sample_type`, must not be `None`\")\n\n self._sample_type = sample_type",
"def _set_sample(self, sample, PB_X, t):\n for sensor in PB_X.keys():\n sample.set(sensor, np.array(PB_X[sensor]), t=t+1)",
"def set_sample_count(profileDict, sampleCount):\n\n profileDict[\"samples\"][\"count\"]= str(sampleCount)",
"def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')",
"def samples(self, samples):\n\n self._samples = samples",
"def update_profile(self, data, sample_size=None, min_true_samples=None):\n encoding = None\n file_type = None\n\n if min_true_samples is not None \\\n and not isinstance(min_true_samples, int):\n raise ValueError('`min_true_samples` must be an integer or `None`.')\n\n if isinstance(data, data_readers.base_data.BaseData):\n encoding = data.file_encoding\n file_type = data.data_type\n data = data.data\n elif isinstance(data, self._allowed_external_data_types):\n file_type = str(data.__class__)\n else:\n raise TypeError(\n f\"Data must either be imported using the data_readers or using \"\n f\"one of the following: {self._allowed_external_data_types}\"\n )\n\n if not len(data):\n warnings.warn(\"The passed dataset was empty, hence no data was \"\n \"profiled.\")\n return\n\n # set sampling properties\n if not min_true_samples:\n min_true_samples = self._min_true_samples\n if not sample_size:\n sample_size = self._get_sample_size(data)\n\n self._update_profile_from_chunk(data, sample_size, min_true_samples)\n\n # set file properties since data will be processed\n if encoding is not None:\n self.encoding = encoding\n if file_type is not None:\n self.file_type = file_type",
"def extracts(self, extracts):\n\n self._extracts = extracts",
"def setDataset(self,dataset):\n self.__dataSet = dataset"
]
| [
"0.6318039",
"0.56455183",
"0.53558093",
"0.5122929",
"0.5103026",
"0.49853206",
"0.49181038",
"0.4829947",
"0.48101833",
"0.48060548",
"0.47768047",
"0.4770165",
"0.47481054",
"0.47255185",
"0.47225806",
"0.4699318",
"0.46696988",
"0.46533418",
"0.4651343",
"0.4646242",
"0.4608632",
"0.45903865",
"0.4589196",
"0.45346186",
"0.45190775",
"0.45004892",
"0.44476196",
"0.44379738",
"0.44333544",
"0.4417536"
]
| 0.7396614 | 0 |
Sets the detect_advanced_data_types of this JsonJdbcIngestionProperties. | def detect_advanced_data_types(self, detect_advanced_data_types):
self._detect_advanced_data_types = detect_advanced_data_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_dtype(self, dtype):\n _d = dtype.lower()\n if \"phot\" in _d:\n self.dtype = \"photon\"\n elif \"ener\" in _d:\n self.dtype = \"energy\"\n else:\n raise ValueError('Unknown detector type {0}'.format(dtype))",
"def advanced_features(self, advanced_features):\n\n self._advanced_features = advanced_features",
"def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes",
"def advanced_properties(self, advanced_properties):\n\n self._advanced_properties = advanced_properties",
"def set_datatype(self, datatype):\n if(datatype == 0):\n self.datatype = \"eeg\"\n elif(datatype == 1):\n self.datatype = \"motion\"\n else:\n raise NotImplementedError(\"EEG and Motion-Data supported only\")",
"def data_types(self):",
"def setDataType(self, dataType):\n\n self._dataType = dataType\n\n return True",
"def wasm_reference_types(self, enable):\n\n if not isinstance(enable, bool):\n raise TypeError('expected a bool')\n dll.wasmtime_config_wasm_reference_types_set(self.__ptr__, enable)",
"def __use_remote_datatypes_conf( lwr_client ):\n use_remote_datatypes = string_as_bool_or_none( lwr_client.destination_params.get( \"use_remote_datatypes\", False ) )\n return use_remote_datatypes",
"def set_dtype(self, dtype):\n self.mean_.set_dtype(dtype)\n for filter_k in self.samples_:\n filter_k.set_dtype(dtype)\n self.dtype = self.mean_.dtype",
"def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)",
"def _impose_types(self, columns, types):\n for c,t in zip(columns, types):\n if c in self.keys():\n self.table[c] = self.table[c].astype(t)",
"def enable_numeric_tower(self):\n # Enable numeric tower int <: float <: complex.\n # https://peps.python.org/pep-0484/#the-numeric-tower\n bool_info = self.to_type_info(bool)\n int_info = self.to_type_info(int)\n float_info = self.to_type_info(float)\n complex_info = self.to_type_info(complex)\n self.add_subclass_edge(super_class=int_info, sub_class=bool_info)\n self.add_subclass_edge(super_class=float_info, sub_class=int_info)\n self.add_subclass_edge(super_class=complex_info, sub_class=float_info)",
"def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}",
"def data_type_string(self, data_type_string):\n\n self._data_type_string = data_type_string",
"def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")",
"def variable_types(self, data_key, only_type=None):\r\n if self[data_key].meta['columns'] is None:\r\n return 'No meta attached to data_key: %s' %(data_key)\r\n else:\r\n types = {\r\n 'int': [],\r\n 'float': [],\r\n 'single': [],\r\n 'delimited set': [],\r\n 'string': [],\r\n 'date': [],\r\n 'time': [],\r\n 'array': []\r\n }\r\n not_found = []\r\n for col in self[data_key].data.columns:\r\n if not col in ['@1', 'id_L1', 'id_L1.1']: \r\n try:\r\n types[\r\n self[data_key].meta['columns'][col]['type']\r\n ].append(col)\r\n except:\r\n not_found.append(col) \r\n for mask in self[data_key].meta['masks'].keys():\r\n types[self[data_key].meta['masks'][mask]['type']].append(mask)\r\n if not_found:\r\n print '%s not found in meta file. Ignored.' %(not_found)\r\n if only_type:\r\n return types[only_type]\r\n else:\r\n return types",
"def _set_data_types(self):\n temp_df = self.raw_data\n cols = temp_df.drop('room_location', axis=1).columns\n temp_df[cols] = temp_df[cols].apply(pd.to_numeric)\n temp_df['room_location'] = temp_df['room_location'].astype(str)\n self.raw_data = temp_df",
"def data_types(self):\n return self['data_types']",
"def set_data_type(data_type):\n data_type_type = DataTypeUtil.getDtypeFromContext(data_type)\n DataTypeUtil.setDTypeForContext(data_type_type)",
"def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data",
"def test_ingest_with_numeric_boolean():\n schema = pa.schema([\n pa.field(\"foo\", pa.bool_())\n ])\n\n data = [{\"foo\": 0}, {\"foo\": 1}]\n\n converted_data = client.ingest_data(data, schema)\n assert converted_data.to_pydict() == {'foo': [False, True]}",
"def types(self, types):\n\n self._types = types",
"def allowed_attachment_types(self, allowed_attachment_types: ConfigNodePropertyArray):\n\n self._allowed_attachment_types = allowed_attachment_types",
"def convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n dtype_backend: DtypeBackend = \"numpy_nullable\",\n ):\n return DataFrameDefault.register(pandas.DataFrame.convert_dtypes)(\n self,\n infer_objects=infer_objects,\n convert_string=convert_string,\n convert_integer=convert_integer,\n convert_boolean=convert_boolean,\n convert_floating=convert_floating,\n dtype_backend=dtype_backend,\n )",
"def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res",
"def update_properties(self, prop_dict):\n ft_dict = {ft.name: ft for ft in self.get_field_types()}\n for name, val in prop_dict.items():\n ft = ft_dict[name]\n if ft.is_parameter():\n key = \"value\"\n else:\n key = \"sample\"\n if issubclass(type(val), Sequence) and ft.array:\n self.set_field_value_array(name, None, [{key: v} for v in val])\n else:\n self.set_field_value(name, None, {key: val})",
"def _set_field_feature_dtype(self, field_path, field_feature_dtype):\n feature_dtype_str = json.dumps(field_feature_dtype.descr)\n # check if the field_feature_dtype is already set\n if field_path in self.field_feature_dtypes:\n # check that the dtype was previously saved as \"None\" as we\n # won't overwrite anything else\n if self.field_feature_dtypes[field_path] is None:\n full_path = '{}/{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR, field_path)\n # we have to delete the old data and set new data\n del self.h5[full_path]\n self.h5.create_dataset(full_path, data=feature_dtype_str)\n else:\n raise AttributeError(\n \"Cannot overwrite feature dtype for {} with {} because it is {} not \".format(\n field_path, field_feature_dtype, self.field_feature_dtypes[field_path],\n NONE_STR))\n # it was not previously set so we must create then save it\n else:\n self._add_field_feature_dtype(field_path, field_feature_dtype)",
"def set_type_smart(self):\n self.update(type=\"smart\")",
"def _set_lsp_config_type_dynamic(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-type-dynamic\", rest_name=\"lsp-config-type-dynamic\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_type_dynamic must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-type-dynamic\", rest_name=\"lsp-config-type-dynamic\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_type_dynamic = t\n if hasattr(self, '_set'):\n self._set()"
]
| [
"0.50965697",
"0.49736077",
"0.49001017",
"0.4881866",
"0.47757646",
"0.476277",
"0.4752075",
"0.4641316",
"0.45932186",
"0.45875108",
"0.4532499",
"0.4515847",
"0.44989982",
"0.44780585",
"0.4475511",
"0.44473377",
"0.44451487",
"0.4419167",
"0.44160962",
"0.44096804",
"0.4384088",
"0.43786818",
"0.43586648",
"0.43510476",
"0.43183726",
"0.43082142",
"0.43019202",
"0.42808503",
"0.42724168",
"0.42596257"
]
| 0.7839279 | 0 |
Sets the tables_to_skip of this JsonJdbcIngestionProperties. | def tables_to_skip(self, tables_to_skip):
self._tables_to_skip = tables_to_skip | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fkey_nav_stops_on_skip(dumper, db):\n dumper.reader.load_db(\n db.create_sample(\n 5,\n fkeys=[\n (\"table1\", \"t2id\", \"table2\", \"id\"),\n (\"table2\", \"t3id\", \"table3\", \"id\"),\n (\"table3\", \"t4id\", \"table4\", \"id\"),\n ],\n )\n )\n dumper.add_config(\n {\n \"db_objects\": [\n {\"name\": \"table1\"},\n {\"name\": \"table3\", \"action\": \"skip\"},\n ]\n }\n )\n dumper.perform_dump()\n objs = [obj for obj, match in dumper.writer.dumped if isinstance(obj, Table)]\n assert len(objs) == 2",
"def table(self, *tables):\n assert hasattr(self, \"spark\"), \"it should have 'spark' attribute, having a spark session.\"\n\n try:\n yield\n finally:\n for t in tables:\n self.spark.sql(\"DROP TABLE IF EXISTS %s\" % t)",
"def handle_tables(\n client: bigquery.Client,\n dataset_obj: bigquery.Dataset,\n days: int,\n table_pattern: str = None,\n skip_tables: str = None,\n dry_run: bool = False,\n) -> None:\n\n for table_item in client.list_tables(dataset_obj): # type: bigquery.table.TableListItem\n table: bigquery.Table = client.get_table(table_item) # Fetch the full table object\n\n # Skip tables that match the skip_tables regex pattern if provided\n if skip_tables and re.match(skip_tables, table.table_id):\n continue\n\n # If a table_pattern is provided, skip tables that don't match the regex pattern\n if table_pattern and not re.match(table_pattern, table.table_id):\n continue\n\n # Call set_expiration function to handle expiration for the table\n set_expiration(client, table, days, dry_run, table_name=table.table_id)",
"def migrateTables(self):\n tables = self.client_from.tables.list(['columns'])\n if len(tables) > 0:\n for table in tables:\n self.client_to.tables.update(table['tableId'], json.dumps(table))\n else:\n print(\"No tables to migrate!\")\n return\n print(len(tables) + \" Tables migrated!\")",
"def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")",
"def set_skip(self, val):\n self.skip = val\n return self",
"def load_all_traj_skip(skip=200):\n pdb='/bpti/bpti-prot/bpti-prot.pdb'\n dcd = lambda x: '/bpti/bpti-prot/bpti-prot-%02d.dcd' % x\n tr = []\n for i in range(42):\n print ('loading ', i)\n tr.append(md.load(DCD_PROT(i), top=PDB_PROT, stride=skip))\n return tr",
"def _load_tables(neo4j_session: neo4j.Session, tables: List[Dict], update_tag: int) -> None:\n ingest_tables = \"\"\"\n UNWIND $tables_list as table\n MERGE (t:AzureStorageTable{id: table.id})\n ON CREATE SET t.firstseen = timestamp(), t.type = table.type\n SET t.name = table.name,\n t.tablename = table.table_name,\n t.lastupdated = $azure_update_tag\n WITH t, table\n MATCH (ts:AzureStorageTableService{id: table.service_id})\n MERGE (ts)-[r:CONTAINS]->(t)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $azure_update_tag\n \"\"\"\n\n neo4j_session.run(\n ingest_tables,\n tables_list=tables,\n azure_update_tag=update_tag,\n )",
"def collect_table_names(self):\n try:\n for migrate_table in self.migration_tables:\n tabel_name = migrate_table.migrationTable.DestinationTable.name\n self.table_list.add(tabel_name)\n self.tables.update(self.table_list)\n except Exception as err:\n logger.error(\"collect_table_names [error] -> %s\" % err)",
"def skip(self, skip):\n\n self._skip = skip",
"def create_tables(self):\n if self.mock:\n mock_dynamodb2(self._create_tables())\n else:\n self._create_tables()",
"def skip_tables(include=('*',), exclude=()):\n check = Filter()\n check.include(list(include))\n check.exclude(list(exclude))\n def _skip_handler(dispatcher, node):\n \"\"\"Process a node and skip based on table filter\"\"\"\n try:\n check('%s.%s' % (dispatcher.database, dispatcher.table))\n except FilteredItem:\n raise SkipNode()\n return node\n return _skip_handler",
"def load_db_tables(self):\r\n\r\n self.list_of_tables = pd.read_json(os.path.join(self.config_path, self.db_config_file),\r\n orient='records')[self.report_type]['table']\r\n\r\n # Loading tables from database\r\n for aux_index, table in enumerate(self.list_of_tables):\r\n \"\"\"\r\n table_indexes: position in which each table is located in the list variable 'list_of_tables'.\r\n list_of_tables: list data structure used to allocate the database tables in DataFrame format.\r\n \"\"\"\r\n self.table_indexes.append(table['name'])\r\n self.list_of_tables[aux_index] = pd.read_sql_table(table_name=table['name'],\r\n con=self.mariadb_engine,\r\n columns=table['columns'],\r\n parse_dates=table['parse_dates'])\r\n\r\n if table['master_table']:\r\n self.master_table.append(table['name'])",
"def parse_migration_tables(self, tabels_schema: MigrationTablesSchema):\n try:\n self.source_table = tabels_schema.migrationTable.SourceTable.dict()\n self.destination_table = tabels_schema.migrationTable.DestinationTable.dict()\n self.columns = tabels_schema.migrationTable.MigrationColumns\n except Exception as err:\n logger.error(\"parse_migration_tables [error] -> %s\" % err)",
"def create_tables(self, tables=None):\n LOG.debug(f\"Creating table subset {tables}\")\n Base.metadata.create_all(self.engine, tables, checkfirst=False)",
"def skip(self, skip):\n self.query = self.query.skip(skip)\n self._has_skip = True\n return self",
"def delete_tables(self):\n if self.mock:\n mock_dynamodb2(self._delete_tables())\n else:\n self._delete_tables()",
"def create_all_tables(self):\n pass",
"def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])",
"def verify_skip(self, d_stmt, table): \n pass",
"def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))",
"def _delete_tables(self):\n print(\"\\n ** Deleting DynamoDB Tables\")\n # Delete Story Table\n for table_config in self.table_list:\n with open(os.path.join(self.config_dir, table_config), \"rt\") as handle:\n config_data = json.load(handle)\n story_table = DynamoDB(DynamoTable.STACK_NAME, config_data[self.stack_name])\n story_table.delete()",
"def skip(self, skip):\n\n self._skip = skip\n return self",
"def _create_tables(self):\n\n print(\"\\n ** Creating DynamoDB Tables\")\n\n # Create Tables\n for table_config in self.table_list:\n with open(os.path.join(self.config_dir, table_config), \"rt\") as handle:\n config_data = json.load(handle)\n story_table = DynamoDB(DynamoTable.STACK_NAME, config_data[self.stack_name])\n story_table.create()",
"def load_table_from_dict(d, tableWidget, skip=[], disable_first=True):\n N_rows = len(d)-len(skip)\n tableWidget.setRowCount(N_rows)\n i = 0\n for key, val in d.items():\n if key in skip:\n continue\n item1, item2 = QTableWidgetItem(key), QTableWidgetItem(str(val))\n if disable_first:\n item1.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)\n tableWidget.setItem(i, 0, item1)\n tableWidget.setItem(i, 1, item2)\n i += 1\n \n #tableWidget.resizeColumnsToContents()\n tableWidget.resizeRowsToContents()",
"def load_tables(self, tables=None):\n return {\n table_name: self.load_table(table_name)\n for table_name in tables or self.get_tables()\n }",
"def reset_tables(database_url, _metadata):\n\n # use reflected MetaData to avoid errors due to ORM classes\n # being inconsistent with existing tables\n with isolated_nullpool_engine(database_url) as engine:\n seperate_metadata = MetaData()\n seperate_metadata.reflect(bind=engine)\n seperate_metadata.drop_all(bind=engine)\n ENUM(name='dpds_operation_types').drop(engine)\n\n # use ORM clases to define tables to create\n init_tables(database_url, _metadata)",
"def skip(self, skip):\n self._evaluated = False\n self._offset = skip\n return self",
"def set_tablename(self, name):\n self.ds_table = name",
"def setUp(self):\n table = self.get_local_dynamo_cli().Table(constants.get_configuration_table())\n response = table.scan()\n\n timeseries = [item['timeserie'] for item in response['Items']]\n\n for ts in timeseries:\n table.delete_item(Key={'timeserie': ts})"
]
| [
"0.47862852",
"0.47083324",
"0.47003055",
"0.4666161",
"0.46463743",
"0.4631545",
"0.4589141",
"0.45797166",
"0.45434368",
"0.45173484",
"0.44476372",
"0.4442546",
"0.44404238",
"0.44373155",
"0.44281414",
"0.43903542",
"0.43850672",
"0.43013865",
"0.42779592",
"0.4262847",
"0.4231296",
"0.4203173",
"0.42019337",
"0.4200862",
"0.41950476",
"0.41701272",
"0.41528624",
"0.41405702",
"0.40915707",
"0.40816742"
]
| 0.8193819 | 0 |
Decode the response headers and create appropriate metrics based on the header values. The response_headers are passed in as a list of tuples. [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)] | def process_response_headers(self, response_headers):
settings = self.settings
if not settings:
return
if not settings.cross_application_tracer.enabled:
return
appdata = None
try:
for k, v in response_headers:
if k.upper() == self.cat_appdata_key.upper():
appdata = json_decode(deobfuscate(v,
settings.encoding_key))
break
if appdata:
self.params['cross_process_id'] = appdata[0]
self.params['external_txn_name'] = appdata[1]
self.params['transaction_guid'] = appdata[5]
except Exception:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_response(response):\n data = []\n \n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n \n row_count = 0 \n for row in rows:\n #print '\\n\\n', 'ROW_COUNT: ', row_count, '\\n'\n data.append({}) \n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n #print header + ': ' + dimension\n data[row_count][header[3:]] = dimension\n \n for i, values in enumerate(dateRangeValues):\n #print 'Date range (' + str(i) + ')'\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n #print metricHeader.get('name') + ': ' + value\n data[row_count][metricHeader.get('name')[3:]] = value\n \n row_count += 1 \n \n return data",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):",
"def _ParseHTTPHeaders(self, http_headers_data, offset, display_name):\n header_string = http_headers_data.decode('ascii', errors='replace')\n\n try:\n http_header_start = header_string.index('request-method')\n except ValueError:\n logger.debug('No request method in header: \"{0:s}\"'.format(header_string))\n return None, None\n\n # HTTP request and response headers.\n http_headers = header_string[http_header_start::]\n\n header_parts = http_headers.split('\\x00')\n\n # TODO: check len(header_parts).\n request_method = header_parts[1]\n\n if request_method not in self._REQUEST_METHODS:\n logger.debug((\n '[{0:s}] {1:s}:{2:d}: Unknown HTTP method \\'{3:s}\\'. Response '\n 'headers: \\'{4:s}\\'').format(\n self.NAME, display_name, offset, request_method, header_string))\n\n try:\n response_head_start = http_headers.index('response-head')\n except ValueError:\n logger.debug('No response head in header: \"{0:s}\"'.format(header_string))\n return request_method, None\n\n # HTTP response headers.\n response_head = http_headers[response_head_start::]\n\n response_head_parts = response_head.split('\\x00')\n\n # Response code, followed by other response header key-value pairs,\n # separated by newline.\n # TODO: check len(response_head_parts).\n response_head_text = response_head_parts[1]\n response_head_text_parts = response_head_text.split('\\r\\n')\n\n # The first line contains response code.\n # TODO: check len(response_head_text_parts).\n response_code = response_head_text_parts[0]\n\n if not response_code.startswith('HTTP'):\n logger.debug((\n '[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. '\n 'Response headers: \\'{3:s}\\'.').format(\n self.NAME, display_name, offset, header_string))\n\n return request_method, response_code",
"def _parse_rate_limit_headers(headers):\n limit = int(headers[\"X-RateLimit-Limit\"])\n remaining = int(headers[\"X-RateLimit-Remaining\"])\n reset_at_utc = int(headers[\"X-RateLimit-Reset\"])\n return {\n \"limit\": limit,\n \"used\": limit - remaining,\n \"remaining\": remaining,\n \"reset_at_utc\": reset_at_utc,\n \"reset_in_sec\": reset_at_utc - round(time.time()),\n \"last_update\": round(time.time())\n }",
"def from_headers(self, headers):\n try:\n # First IP address is the one of the client\n ip = headers['X_FORWARDED_FOR'].split(',')[0].strip()\n except KeyError:\n ip = headers.get('REMOTE_ADDR')\n\n if ip:\n # Double-check if the address has a valid format\n if re.match(r'^[\\d+]{1,3}\\.[\\d+]{1,3}\\.[\\d+]{1,3}\\.[\\d+]{1,3}$',\n ip, re.I):\n ip = None\n\n # Exclude private IP address ranges\n if re.match(r'^(?:127\\.0\\.0\\.1|10\\.|192\\.168\\.|172\\.(?:1[6-9]|2[0-9]|3[0-1])\\.)', ip):\n ip = None\n\n self.ip_address = ip\n\n self.user_agent = headers.get('HTTP_USER_AGENT')\n\n if 'HTTP_ACCEPT_LANGUAGE' in headers:\n parsed_locales = []\n res = re.findall(\n r'(^|\\s*,\\s*)([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})*)\\s*(;\\s*q\\s*=\\s*(1(\\.0{0,3})?|0(\\.[0-9]{0,3})))?', \n headers['HTTP_ACCEPT_LANGUAGE'], re.I)\n for r in res:\n name = r[1].replace('-', '_')\n value = 1 if not r[4] else r[4]\n parsed_locales += [(name, value)]\n\n self.locale = sorted(parsed_locales, key=lambda x: x[1],\n reverse=True)[0][0]\n\n return self",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def response_headers():\n # Pending swaggerUI update\n # https://github.com/swagger-api/swagger-ui/issues/3850\n headers = MultiDict(request.args.items(multi=True))\n response = jsonify(list(headers.lists()))\n\n while True:\n original_data = response.data\n d = {}\n for key in response.headers.keys():\n value = response.headers.get_all(key)\n if len(value) == 1:\n value = value[0]\n d[key] = value\n response = jsonify(d)\n for key, value in headers.items(multi=True):\n response.headers.add(key, value)\n response_has_changed = response.data != original_data\n if not response_has_changed:\n break\n return response",
"def parse_cookies( headers ):",
"def header_info(msg_ids, accumulator):\n headers = []\n for ms_id in msg_ids:\n if ms_id in accumulator.headers_map.keys():\n headers.append(accumulator.headers_map[ms_id])\n return headers",
"def __parseHeaders(headers):\n global __all_headers\n if headers and len(headers) > 0:\n for header in headers:\n name = header.getElementsByTagName(\"name\")[0].childNodes[0].data\n value = header.getElementsByTagName(\"value\")[0].childNodes[0].data\n __addHeader(name, value)\n #print(__all_headers)",
"def _make_headers_df(headers_response):\n\n headers_df = util.make_dataframe(headers_response)\n headers_df = headers_df[\n [\"text\", \"column_index_begin\", \"column_index_end\", \"row_index_begin\", \"row_index_end\", \"cell_id\",\n \"text_normalized\"]]\n return headers_df",
"def parse_response(self, response, case):\n request = response.request\n parsed = {\n 'request': {\n 'method': request.method,\n 'url': request.url,\n 'body': request.body,\n },\n 'response': {\n 'headers': OrderedDict(),\n 'status_code': response.status_code,\n 'reason': response.reason,\n }\n }\n\n # Re-assemble request line\n url_parts = urlparse(request.url)\n parsed['request']['request_line'] = '%s %s%s%s HTTP/1.1' % (\n request.method, url_parts.path, '?' if url_parts.query else '',\n url_parts.query)\n\n # Process request headers\n if self.mode == 'display':\n hostname = url_parts.hostname\n else:\n hostname = self.doc_hostname\n parsed['request']['headers'] = OrderedDict((('Host', hostname),))\n for header in sorted([h.title() for h in request.headers]):\n raw_value = request.headers[header]\n value = self.parse_header(header, raw_value, 'request')\n if value:\n parsed['request']['headers'][header.title()] = value\n\n # Re-assemble response line\n parsed['response']['response_line'] = 'HTTP/1.1 %s %s' % (\n response.status_code, response.reason)\n\n # Process response headers\n for header in sorted([h.title() for h in response.headers]):\n raw_value = response.headers[header]\n value = self.parse_header(header, raw_value, 'response')\n if value:\n fixed_header = header.title().replace('Www', 'WWW')\n parsed['response']['headers'][fixed_header] = value\n\n # Process response body\n response.encoding = 'utf-8'\n body = response.text\n if self.standardize:\n body = body.replace(api, self.doc_base_url)\n for key, value in case.get('standardize', {}).items():\n assert key in ('created', 'modified', 'date')\n pattern = r\"\"\"(?x)(?s) # Be verbose, . include newlines\n \"%s\":\\s\" # Key and quote\n \\d{4}-\\d{2}-\\d{2} # Date\n T\\d{2}:\\d{2}:\\d{2} # Time\n \\.\\d{0,6}Z # Microseconds and UTC timezone\n \", # End quote and comma\n \"\"\" % key\n replace = '\"%s\": \"%s\",' % (key, value)\n body = re.sub(pattern, replace, body)\n parsed['response']['body'] = body\n\n return parsed",
"def response_helper(self, response, **kwargs):\n self.resolve_schema(response)\n if \"headers\" in response:\n for header in response[\"headers\"].values():\n self.resolve_schema(header)\n return response",
"def convert_headers(self, tickers):\n\n result = _makehash()\n for pair_name, fetched_values_dict in list(tickers.items()):\n for header, value in list(fetched_values_dict.items()):\n result[pair_name][self.config['headers'][header]] = value\n return result",
"def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new",
"def parse_response(response):\n # a result should always have a status\n status = response['status']\n\n # a result _may_ have a results or a reason\n result = response.get('results', [])\n reason = response.get('reason', None)\n\n return status, result, reason",
"def fill_headers(self, headers):\n self.headers = {h[0]: h[1] for h in headers}",
"def parse_demultiplex_stats_htm(self, fc_name, **kw):\n metrics = {\"Barcode_lane_statistics\": [],\n \"Sample_information\": []}\n # Use a glob to allow for multiple fastq directories\n htm_file_pattern = os.path.join(self.path, \"Unaligned*\", \"Basecall_Stats_*{}\".format(fc_name[1:]), \"Demultiplex_Stats.htm\")\n for htm_file in glob.glob(htm_file_pattern):\n self.log.debug(\"parsing {}\".format(htm_file))\n if not os.path.exists(htm_file):\n self.log.warn(\"No such file {}\".format(htm_file))\n continue\n with open(htm_file) as fh:\n htm_doc = fh.read()\n soup = BeautifulSoup(htm_doc)\n ##\n ## Find headers\n allrows = soup.findAll(\"tr\")\n column_gen=(row.findAll(\"th\") for row in allrows)\n parse_row = lambda row: row\n headers = [h for h in map(parse_row, column_gen) if h]\n bc_header = [str(x.string) for x in headers[0]]\n smp_header = [str(x.string) for x in headers[1]]\n ## 'Known' headers from a Demultiplex_Stats.htm document\n bc_header_known = ['Lane', 'Sample ID', 'Sample Ref', 'Index', 'Description', 'Control', 'Project', 'Yield (Mbases)', '% PF', '# Reads', '% of raw clusters per lane', '% Perfect Index Reads', '% One Mismatch Reads (Index)', '% of >= Q30 Bases (PF)', 'Mean Quality Score (PF)']\n smp_header_known = ['None', 'Recipe', 'Operator', 'Directory']\n if not bc_header == bc_header_known:\n self.log.warn(\"Barcode lane statistics header information has changed. New format?\\nOld format: {}\\nSaw: {}\".format(\",\".join(([\"'{}'\".format(x) for x in bc_header_known])), \",\".join([\"'{}'\".format(x) for x in bc_header])))\n if not smp_header == smp_header_known:\n self.log.warn(\"Sample header information has changed. New format?\\nOld format: {}\\nSaw: {}\".format(\",\".join(([\"'{}'\".format(x) for x in smp_header_known])), \",\".join([\"'{}'\".format(x) for x in smp_header])))\n ## Fix first header name in smp_header since htm document is mal-formatted: <th>Sample<p></p>ID</th>\n smp_header[0] = \"Sample ID\"\n\n ## Parse Barcode lane statistics\n soup = BeautifulSoup(htm_doc)\n table = soup.findAll(\"table\")[1]\n rows = table.findAll(\"tr\")\n column_gen = (row.findAll(\"td\") for row in rows)\n parse_row = lambda row: {bc_header[i]:str(row[i].string) for i in range(0, len(bc_header)) if row}\n metrics[\"Barcode_lane_statistics\"].extend(map(parse_row, column_gen))\n\n ## Parse Sample information\n soup = BeautifulSoup(htm_doc)\n table = soup.findAll(\"table\")[3]\n rows = table.findAll(\"tr\")\n column_gen = (row.findAll(\"td\") for row in rows)\n parse_row = lambda row: {smp_header[i]:str(row[i].string) for i in range(0, len(smp_header)) if row}\n metrics[\"Sample_information\"].extend(map(parse_row, column_gen))\n\n # Define a function for sorting the values\n def by_lane_sample(data):\n return \"{}-{}-{}\".format(data.get('Lane',''),data.get('Sample ID',''),data.get('Index',''))\n\n # Post-process the metrics data to eliminate duplicates resulting from multiple stats files\n for metric in ['Barcode_lane_statistics', 'Sample_information']:\n dedupped = {}\n for row in metrics[metric]:\n key = \"\\t\".join(row.values())\n if key not in dedupped:\n dedupped[key] = row\n else:\n self.log.debug(\"Duplicates of Demultiplex Stats entries discarded: {}\".format(key[0:min(35,len(key))]))\n metrics[metric] = sorted(dedupped.values(), key=by_lane_sample)\n\n ## Set data\n return metrics",
"def print_response(response):\n #fyi this is not my code, i grabbed it from github\n #forgot to copy the url though\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n\n for row in report.get('data', {}).get('rows', []):\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n print header + ': ' + dimension\n\n for i, values in enumerate(dateRangeValues):\n print 'Date range: ' + str(i)\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n print metricHeader.get('name') + ': ' + value",
"def convert_response_to_df(response):\n\n list = []\n\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n sampled = True if report.get('samplesReadCounts') else False\n\n for row in rows:\n dict = {}\n dict['sampling'] = sampled\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n dict[header] = dimension\n\n for i, values in enumerate(dateRangeValues):\n for metric, value in zip(metricHeaders, values.get('values')):\n if ',' in value or '.' in value:\n dict[metric.get('name')] = float(value)\n else:\n dict[metric.get('name')] = int(value)\n list.append(dict)\n\n df = pd.DataFrame(list)\n return df",
"def parse(self, stdout, stderr, returncode):\n\n dw_metrics = {}\n print('Input for test case: ', len(stdout))\n\n for dw_line in stdout:\n if not any(c.isalpha() for c in dw_line): # Skip empty or non-interpretable lines\n continue\n\n if 'URL hit percentages' in dw_line:\n dw_metrics['URL hit percentages'] = {}\n continue\n\n if 'URL hit percentages' in dw_metrics and ':' in dw_line:\n self.parse_dw_key_val(dw_line, dw_metrics)\n\n print('DW metrics: ', dw_metrics)\n return dw_metrics",
"def _headers(self, headers_dict):\n return Headers(dict((k,[v]) for (k,v) in headers_dict.items()))",
"def parse_header(self):",
"def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())",
"def parse_response(response_url, headers, body):\n\n # TODO: Might consider making these exceptions.\n if 'content-type' not in headers:\n log.err('Missing Content-Type header. Skipping.')\n return set(), set()\n content_type = headers['content-type'].lower()\n if 'text/html' not in content_type:\n log.err('Content type \"%s\" not parseable. Skipping.' % content_type)\n return set(), set()\n\n # If this were a production environment, we'd probably want to try to\n # figure out chunked response body parsing. We could end up with some\n # huge body sizes as-is.\n soup = BeautifulSoup(body)\n image_response_url_set = set([])\n links_to_crawl_set = set([])\n\n for tag in soup.find_all(['a', 'img']):\n if tag.name == 'a':\n _record_link(response_url, tag, links_to_crawl_set)\n elif tag.name == 'img':\n _record_image(response_url, tag, image_response_url_set)\n\n return image_response_url_set, links_to_crawl_set",
"def respuesta(response):\n for report in response.get('reports', []):\n\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get(\n 'metricHeader', {}).get('metricHeaderEntries', [])\n\n return_data = []\n\n for row in report.get('data', {}).get('rows', []):\n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n pipeline_insert = {}\n for header, dimension in zip(dimensionHeaders, dimensions):\n pipeline_insert[header] = dimension\n\n for i, values in enumerate(dateRangeValues):\n\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n pipeline_insert[metricHeader.get('name')] = value\n return_data.append(pipeline_insert)\n\n return return_data",
"def process_header_request(self, request, http_s_obj):\n response_dict = {}\n data = request.split(\"\\r\\n\\r\\n\")\n header_info = data[0].split(\"\\r\\n\")\n headers = self.updateheader(header_info, http_s_obj)\n response_dict.update({'type': header_info[0].split()[0]})\n response_dict.update({'headers': headers})\n body = data[1]\n response_dict.update({'data': body})\n path = header_info[0].split()[1]\n if path.find('?') != -1:\n split_sym = '?'\n if path.find('&') != -1:\n split_sym = '&'\n try:\n req = path.split(split_sym)\n path = req[0]\n query = req[1]\n except Exception as e:\n query = ''\n response_dict.update({'path': path})\n response_dict.update({'query': query})\n\n return response_dict",
"def dispatch(self, request, *args, **kwargs):\n response = super(HeaderMixin, self).dispatch(request, *args, **kwargs)\n for key, value in self.get_headers(request).items():\n if key not in response:\n response[key] = value\n return response",
"def response_headers(self, response_headers):\n\n self._response_headers = response_headers"
]
| [
"0.6024652",
"0.58121204",
"0.5701095",
"0.5608415",
"0.55389136",
"0.54506004",
"0.54506004",
"0.5417471",
"0.5374543",
"0.5340843",
"0.53154534",
"0.5298669",
"0.5272827",
"0.52655214",
"0.5247795",
"0.52409744",
"0.5230199",
"0.5219283",
"0.5217733",
"0.5178149",
"0.51646173",
"0.5163587",
"0.5154996",
"0.5132517",
"0.5132367",
"0.5129278",
"0.5123053",
"0.5097087",
"0.50953066",
"0.5073256"
]
| 0.6154797 | 0 |
It takes in a website link as input and return all the classnames used on the website. | def extract_all_tags(final_link, driver):
#driver = webdriver.Chrome(executable_path="ChromeDriver/chromedriver.exe")
driver.get(str(final_link))
classes = []
tags = ['div', 'td', 'li', 'a']
for tag in tags:
a = driver.find_elements_by_tag_name(str(tag))
b = len(a)
for i in range(b):
try:
if a[i].get_attribute("class") == None or a[i].get_attribute("class") == '' or a[i].get_attribute("class") == ' ' or a[i].get_attribute("class") == ' ':
continue
else:
className = a[i].get_attribute("class").strip().split(" ")
for classN in className:
classes.append(str(tag) + '.' + str(classN))
except:
continue
#driver.quit()
classes = list(dict.fromkeys(classes))
return(classes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_classes(html):\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace('\\xa0', ' '))]\n # return classes",
"def class_frequencies(url):\n links_list_list = []\n try:\n request = requests.get(url)\n soup = BeautifulSoup(request.content, \"lxml\")\n classes = []\n for element in soup.find_all(class_=True):\n list_class = element.get(\"class\")\n classe = \"\"\n for elt in list_class:\n classe += elt + \" \"\n classe = classe[: -1]\n classes.append(classe)\n # print(\"Class:\", classes, \":\", len(classes))\n dict_frequencies = Counter(classes)\n list_frequencies = list(dict_frequencies.values())\n list_frequencies = list(set(list_frequencies))\n list_frequencies = sorted(list_frequencies, reverse=True)\n # list_frequencies = list_frequencies[: 5]\n # print(\"List frequency:\", list_frequencies)\n for classe in dict_frequencies.keys():\n if dict_frequencies[classe] in list_frequencies and dict_frequencies[classe] > 2:\n # print(\"Classes:\", classe, \"|\", dict_frequencies[classe])\n is_project_class = True\n for classes_removed in list_html_classes_removed:\n if classes_removed in classe:\n is_project_class = False\n links_projects_list = []\n soup2 = soup.find_all(class_=classe)\n for i in soup2:\n linl = i.find('a', href=True)\n links_projects_list.append(linl)\n if linl is None:\n is_project_class = False\n\n if is_project_class:\n for i in range(len(links_projects_list)):\n links_projects_list[i] = links_projects_list[i].get('href')\n # print('Projects Links Found : ', links_projects_list)\n links_list_list += [links_projects_list]\n b_set = set(map(tuple, links_list_list))\n list_unique_lists = list(map(list, b_set))\n domain = url.replace('http://', '')\n domain = domain.replace('https://', '')\n ndx = domain.find('/')\n domain = domain[: ndx]\n # print(\"b:\", list_unique_lists, \"| domain:\", domain)\n count_good = 0\n list_good_list = []\n for list_urls_possibles in list_unique_lists:\n is_a_good_list = True\n for i, url_possible in enumerate(list_urls_possibles):\n if url_possible[: 4] == \"http\":\n if domain not in url_possible[: -2] or \".jpg\" in url_possible or url_possible[: -2] in url:\n is_a_good_list = False\n else:\n new_url_possible = domain + \"/\" + url_possible\n if \".jpg\" in new_url_possible or new_url_possible[: -2] in url:\n is_a_good_list = False\n else:\n list_urls_possibles[i] = new_url_possible\n if is_a_good_list:\n count_good += 1\n list_good_list.append(list_urls_possibles)\n # print(list_urls_possibles)\n # print(count_good)\n if count_good > 0:\n return \"Found by class\", from_lists_to_list(list_good_list)\n else:\n url_test = url + \"/\"\n index_projects = url_test.find(\"projects\")\n index_slash = url_test.find(\"/\", index_projects)\n if len(url) > index_slash + 2:\n return \"Direct project\", [url]\n else:\n return \"List of non clickable projects\", [url]\n except requests.exceptions.ConnectionError:\n print(\"Error requests:\", url)\n return \"Nothing\", []",
"def get_classes(username, password):\n\n def chunk(l, size):\n return [l[i:i+size] for i in xrange(0, len(l), size)]\n\n driver = webdriver.PhantomJS(PHANTOMJS_BIN)\n driver.implicitly_wait(TIMEOUT)\n\n driver.get(TRITONLINK_URL)\n\n # Get redirected to login page\n login_url = driver.current_url\n\n # Send to elements\n e_username = driver.find_element_by_name(USERNAME_NAME)\n e_password = driver.find_element_by_name(PASSWORD_NAME)\n e_username.send_keys(username)\n e_password.send_keys(password)\n e_password.send_keys(Keys.RETURN)\n\n try:\n WebDriverWait(driver, TIMEOUT).until(\n lambda d: d.find_element_by_css_selector(\"#%s, .%s\" %\n (CLASSES_CONTAINER_ID, LOGIN_ERROR_CLASS)\n )\n )\n\n # Check if logged in\n if driver.current_url == login_url:\n raise AuthenticationException\n\n bs_mtl = BeautifulSoup(driver.page_source)\n except TimeoutException:\n raise TritonLinkException(\"Request timed out\")\n finally:\n driver.quit()\n\n # Parse TritonLink\n\n # Get all class elements by weekday\n try:\n bs_classes_container = bs_mtl.find_all(id=CLASSES_CONTAINER_ID)[0]\n except IndexError:\n raise TritonLinkException(\"Classes container not found\")\n\n bs_classes = bs_classes_container.find_all(CLASSES_ELEM)\n by_weekday = zip(*chunk(bs_classes, len(WEEK_DAYS)))\n\n # Process each td\n classes = []\n for class_day, day in zip(WEEK_DAYS, by_weekday):\n for clazz in day:\n try:\n class_info = clazz.find_all(class_=CLASSES_CLASS)[0]\n # If empty, skip\n except IndexError:\n continue\n\n class_time, class_name, class_loc = list(class_info.stripped_strings)\n classes.append({\n 'name': class_name,\n 'day': class_day,\n 'time': class_time,\n 'location': class_loc,\n })\n\n return classes",
"def EnrolledClasses(self,html): \n classes = []\n soup = BeautifulSoup(html)\n for element in soup.find_all(\"input\"):\n if element[\"name\"] == \"TITLE\" and element[\"value\"]:\n classes.append(element.get(\"value\"))\n return classes",
"def find_ahref_by_class(tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n href = str(item.find('a'))\n href = href.split('\"')[1]\n result.append(href)\n return result",
"def classes(attrs):\n return attrs.get('class', '').split()",
"def find_usefull_links(links, classmodel, class_count_vect):\n\n import re\n final_links = []\n seclinks = links\n for link in links:\n fulllink = link\n if link == None:\n continue\n else:\n link = link.replace('://', ' ')\n link = link.replace('@', ' ')\n link = link.replace('#', ' ')\n link = link.replace('/', ' ')\n link = link.replace('-', ' ')\n link = link.replace('.', ' ')\n link = link.replace('https', '')\n link = link.replace('http', '')\n link = link.replace('www', '')\n link = link.replace('&', ' ')\n link = link.replace('=', ' ')\n linkpd = pd.Series(link.strip())\n link_feature = class_count_vect.transform(linkpd)\n result = classmodel.predict(link_feature)\n\n result = result.tolist()\n result = str(result)\n if result == '[1]':\n final_links.append(fulllink)\n final_links = list(dict.fromkeys(final_links))\n \n if len(final_links) == 0 or len(final_links) < 5:\n for linksec in seclinks:\n linkwords = ['cabinet', 'gover', 'goverment', 'composition', 'ministers', 'minister',\n 'president', 'composicao', 'parliament', 'person', 'who', 'mini', 'compo',\n 'governor', 'secretariat', 'secretary']\n for w in linkwords:\n if re.search(w, linksec):\n final_links.append(linksec)\n else:\n continue\n final_links = list(dict.fromkeys(final_links))\n return (final_links)",
"def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]",
"def classes(self):\n if self.classname:\n return [self.classname]\n return []",
"def classes(self):\n return self.browser.classes(self)",
"def get_classes(item):\n query = {\"query\": \"\"\"\n SELECT ?classe ?classeLabel WHERE { \n wd:%s wdt:P31/wdt:P279* ?classe . \n\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\". } \n }\n \"\"\" % (item)\n }\n\n url = \"https://query.wikidata.org/sparql\"\n\n r = requests.get(url, params=query)\n\n # print(r.text)\n\n soup = BeautifulSoup(r.text, \"lxml\")\n\n return [x.text.strip( 'http://www.wikidata.org/entity/' ) for x in soup.find_all( \"uri\" )]",
"def scrap_site(link):\n pass # Scrapy or BeautifulSoup",
"def iter_spider_classes(module):\n ...",
"def get_table_classes(self, table):\n return [\n utils.clean(row.find('a').get('title'))\n for row in table.find('tbody').findAll('tr') if row.find('a')\n ]",
"def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines",
"def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classname): ', final_link)\n scrape_data(final_link, final_tags, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)",
"def listingURLs(soup):\n\n #Get URLs\n itemListing = soup.find_all(class_=\"user-ad-row link link--base-color-inherit link--hover-color-none link--no-underline\")\n itemListing += soup.find_all(class_=\"user-ad-row user-ad-row--featured-or-premium link link--base-color-inherit link--hover-color-none link--no-underline\")\n itemListing += soup.find_all(class_=\"user-ad-row user-ad-row--premium user-ad-row--featured-or-premium link link--base-color-inherit link--hover-color-none link--no-underline\")\n #Create list\n urlList = [i['href'] for i in itemListing]\n return urlList",
"def find_link_class(self):\n\n linkclass = ComicSite.CHALLENGE_ACTIVE\n\n # for project hosted on comic, try to find upcoming/active automatically\n\n if self.params[\"hosted on comic\"]:\n linkclass = self.params[\"project type\"]\n\n if self.date > self.to_datetime(datetime.datetime.today()):\n linkclass += \" \"+ self.UPCOMING\n\n else:\n # else use the explicit setting in xls\n\n section = self.params[\"website section\"].lower()\n if section == \"upcoming challenges\":\n linkclass = ComicSite.CHALLENGE_ACTIVE +\" \"+ self.UPCOMING\n elif section == \"active challenges\":\n linkclass = ComicSite.CHALLENGE_ACTIVE\n elif section == \"past challenges\":\n linkclass = ComicSite.CHALLENGE_INACTIVE\n elif section == \"data publication\":\n linkclass = ComicSite.DATA_PUB\n\n return linkclass",
"def parseClasses(file_name):\n\tlines = file(file_name).read().strip().split('\\n')\n\tlines = [x.strip() for x in lines if len(x.strip()) > 0]\n\tclasses = []\n\tfor l in lines:\n\t\tclasses = classes + [clean(x) for x in l.split(',')]\n\treturn classes",
"def GetClassesFromFile(self,file_path):\n classes = []\n try:\n fl = open(file_path,\"r\")\n for line in fl.readlines():\n if \"class\" in line and \":\" in line:\n line = line.strip(\"class \")\n line2 = \"\"\n for i in line:\n if i!=\":\": line2+=i\n\n classes.append(line2)\n if classes:\n return classes\n else:\n return False\n fl.close()\n except:\n return False",
"def get_guide_urls(self):\n # data structures for returns\n urls = []\n link_labels = []\n link_class = []\n # data structures for tracking classes for links\n cur_class = None\n dict_counter = {}\n for tag in self.post_div.find_all(\"a\"):\n url = tag[\"href\"]\n # update class for the links if boundary found\n if url in url_to_class:\n dict_count = min(dict_counter.get(url, 0), len(url_to_class[url]) - 1)\n cur_class = url_to_class[url][dict_count]\n dict_counter[url] = dict_counter.get(url, 0) + 1\n # record the data for the link\n if cur_class is not None:\n urls += [url]\n link_labels += [tag.text]\n link_class += [cur_class]\n return urls, link_labels, link_class",
"def get_wiki_taxonomy(url):\n classifications = ['Kingdom:', 'Division:', 'Phylum:', 'Class:', 'Order:', 'Suborder:', 'Family:', 'Genus:',\n 'Species:']\n\n response = requests.get(url)\n soup = BeautifulSoup(response.text,'lxml')\n\n table = soup.find( \"table\", {\"class\":\"infobox biota\"})\n rows = []\n class_dict = {}\n try:\n for row in table.findAll(\"tr\"):\n for td in row.findAll(\"td\"):\n text = td.text.strip()\n if len(text) > 0:\n rows.append(text)\n\n # find matching entries to classification entries and get next row as value\n\n for i in range(len(rows)-1):\n if rows[i] in classifications:\n entry = rows[i+1]\n class_dict[rows[i][:-1]] = fix_extra(fix_comma(entry))\n except Exception:\n pass\n\n # quickfix some problems\n # species can have genus abbreviation, we will remove here\n for key, value in class_dict.items():\n if key == 'Species':\n class_dict['Species'] = get_lowercase(value)\n else:\n class_dict[key] = value.split(' ')[0]\n return class_dict",
"def get_links_from_one_page(driver,site,URL_exclusions):\r\n while True:\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Find all elements with class=\"g\". This includes search results.\r\n break\r\n except:\r\n continue \r\n links = []\r\n for result in results:\r\n link = result.find_element_by_tag_name(\"a\") #Hyperlinks are contained under <a> tags\r\n link = link.get_attribute('href') #Retrive link as a string\r\n if link.find(site) != -1: #Some class=\"g\" elements are not search results. Only store links with urls containing \"site\".\r\n links.append(link)\r\n sig_links = [] #Create list of links for pages not from travel sections\r\n for url in links:\r\n find = np.zeros(len(URL_exclusions))\r\n for i in range(len(URL_exclusions)):\r\n find[i] = bool(url.find(URL_exclusions[i]) == -1)\r\n if all(find) == True: #If none of the exclusion words are in url\r\n sig_links.append(url)\r\n return sig_links",
"def _parse_classification(self, links):\n for link in links:\n if \"hearing\" in link[\"title\"].lower():\n return FORUM\n return COMMISSION",
"def process_class_list(self, module, classes):",
"def enumerate_profiles(inhandle, page):\n html = inhandle.read()\n soup = BeautifulSoup(html, 'html.parser')\n \n urls = [ node.find('a')['href'] for node in soup.findAll('h1', {'class':'entry-title'})]\n return urls",
"def countdots(url): \r\n return url.count('.')",
"def extract_classes(soup):\r\n select = soup.find('select', id='dnn_ctr11396_TimeTableView_ClassesList')\r\n return {option['value']: option.text for option in select.findChildren('option')}",
"def get_headlines_from_one_page(driver,site,URL_exclusions):\r\n headlines = []\r\n links = get_links_from_one_page(driver,site,URL_exclusions)\r\n for i in range(len(links)):\r\n start = time.time()\r\n timeout = 0\r\n while timeout < 120: #Someimtes the page doesn't load. Quit the page after two minutes.\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Pages contained in class=\"g\" elements\r\n button = results[i].find_element_by_tag_name(\"a\") #Links under <a> tag\r\n link = button.get_attribute('href') #URL contained under 'href' \r\n if link.find(site) != -1: #Some \"g\" elements are not search results\r\n find = np.zeros(len(URL_exclusions))\r\n for j in range(len(URL_exclusions)):\r\n find[j] = bool(link.find(URL_exclusions[j]) == -1)\r\n if all(find) == True: #If no exclusion words found in UR\r\n button.click()\r\n sleep_time = np.random.random() * np.random.randint(1,6) #Sleep for random time between 1 and 5s to reduce chance of bot detection.\r\n time.sleep(sleep_time)\r\n headline = get_headline(driver)\r\n if headline != '': #Only interested if we succesfully find headline\r\n headlines.append(headline)\r\n driver.back()\r\n sleep_time = np.random.random() * np.random.randint(1,6)\r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n if timeout >= 120:\r\n break #If results hasn't loaded after 120 seconds, we need to break the for loop\r\n return headlines",
"def get_classes(self):\n\n # Sort them.\n classes = ['Safe','Violence','Gun','Cold_Arms','Smoking','Kissing']\n classes = sorted(classes)\n\n # Return.\n if self.class_limit is not None:\n return classes[:self.class_limit]\n else:\n return classes"
]
| [
"0.6977077",
"0.63743794",
"0.63369435",
"0.6298379",
"0.60849005",
"0.6010769",
"0.595103",
"0.58935386",
"0.58824253",
"0.5866446",
"0.58081573",
"0.57081354",
"0.56383866",
"0.5592302",
"0.559083",
"0.5580339",
"0.5535862",
"0.5511863",
"0.5510536",
"0.55013674",
"0.54908884",
"0.5482741",
"0.54416394",
"0.5421503",
"0.54154116",
"0.5376562",
"0.53518355",
"0.53411573",
"0.53218365",
"0.53164047"
]
| 0.6917513 | 1 |
1. It takes all the useful links from the home website as input. 2. Enforce web crawling and extract all useful found 3. Extract the useful data from all the useful links extracted | def deep_link_scraping(final_links, driver):
import re
second_links = []
for website2 in final_links:
links2 = extract_all_links(website2, driver)
final_links1 = find_usefull_links(links2, classmodel, class_count_vect)
final_links2 = list(set(final_links1) - set(final_links))
second_links += final_links2
second_links = list(dict.fromkeys(second_links))
second_links1 = find_usefull_links(second_links, classmodel, class_count_vect)
second_links2 = []
for link in second_links1:
if re.search('#', link):
x = re.search('#', link)
link = link[:int(x.span()[0])]
second_links2.append(link)
else:
second_links2.append(link)
second_links2 = list(dict.fromkeys(second_links2))
for final_link in second_links2:
tags = extract_all_tags(final_link, driver)
if len(tags) != 0:
final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)
if len(final_tags) != 0:
scrape_data(final_link, final_tags, driver)
else:
scrape_data_tag(final_link, driver)
else:
scrape_data_tag(final_link, driver)
return second_links2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classname): ', final_link)\n scrape_data(final_link, final_tags, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)",
"def get_headlines_from_one_page(driver,site,URL_exclusions):\r\n headlines = []\r\n links = get_links_from_one_page(driver,site,URL_exclusions)\r\n for i in range(len(links)):\r\n start = time.time()\r\n timeout = 0\r\n while timeout < 120: #Someimtes the page doesn't load. Quit the page after two minutes.\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Pages contained in class=\"g\" elements\r\n button = results[i].find_element_by_tag_name(\"a\") #Links under <a> tag\r\n link = button.get_attribute('href') #URL contained under 'href' \r\n if link.find(site) != -1: #Some \"g\" elements are not search results\r\n find = np.zeros(len(URL_exclusions))\r\n for j in range(len(URL_exclusions)):\r\n find[j] = bool(link.find(URL_exclusions[j]) == -1)\r\n if all(find) == True: #If no exclusion words found in UR\r\n button.click()\r\n sleep_time = np.random.random() * np.random.randint(1,6) #Sleep for random time between 1 and 5s to reduce chance of bot detection.\r\n time.sleep(sleep_time)\r\n headline = get_headline(driver)\r\n if headline != '': #Only interested if we succesfully find headline\r\n headlines.append(headline)\r\n driver.back()\r\n sleep_time = np.random.random() * np.random.randint(1,6)\r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n if timeout >= 120:\r\n break #If results hasn't loaded after 120 seconds, we need to break the for loop\r\n return headlines",
"def get_links_from_one_page(driver,site,URL_exclusions):\r\n while True:\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Find all elements with class=\"g\". This includes search results.\r\n break\r\n except:\r\n continue \r\n links = []\r\n for result in results:\r\n link = result.find_element_by_tag_name(\"a\") #Hyperlinks are contained under <a> tags\r\n link = link.get_attribute('href') #Retrive link as a string\r\n if link.find(site) != -1: #Some class=\"g\" elements are not search results. Only store links with urls containing \"site\".\r\n links.append(link)\r\n sig_links = [] #Create list of links for pages not from travel sections\r\n for url in links:\r\n find = np.zeros(len(URL_exclusions))\r\n for i in range(len(URL_exclusions)):\r\n find[i] = bool(url.find(URL_exclusions[i]) == -1)\r\n if all(find) == True: #If none of the exclusion words are in url\r\n sig_links.append(url)\r\n return sig_links",
"def Wikipedia_process_content_URL(URL:str):\r\n anchors_list = []\r\n trong_tag_list = []\r\n\r\n print(f\"\\nFetching the URL: {URL} ------------------\")\r\n browser.get(URL)\r\n\r\n # Wait until the browser loads the URL properly\r\n print(\"\\nSleeping for 10 seconds ------------------\")\r\n time.sleep(10)\r\n \r\n Current_Page_Content = browser.page_source.encode('utf-8').strip()\r\n \r\n # Parsing the html page\r\n soup = BeautifulSoup(Current_Page_Content, 'html.parser')\r\n # print(soup.prettify())\r\n \r\n # Finding all dls and then all the anchors in them and then appending them to anchors_list\r\n # Find all anchors text inside <dl>/ strip is for removing empty spaces\r\n all_dls = soup.find_all('dl')\r\n for dl in all_dls:\r\n Current_dl_list_ofAnchors = dl.find_all('a')\r\n for anchor in Current_dl_list_ofAnchors:\r\n current_anchor_text = anchor.get_text().strip().lower()\r\n \r\n # Cleaning logic of the list: no empty items, no items of 1, 2 or 3 characters\r\n if(current_anchor_text == \"\"):\r\n continue\r\n elif(len(current_anchor_text)<4):\r\n continue\r\n else:\r\n anchors_list.append(current_anchor_text)\r\n \r\n # 1) Removing empty string items from the list\r\n while(\"\" in anchors_list) :\r\n anchors_list.remove(\"\") \r\n\r\n # print(\"\\nAnchors list in this iteration--------\")\r\n # print(anchors_list)\r\n\r\n print(f\"\\nFinished parsing & storing keywords from URL: {URL}--------------\")\r\n return anchors_list",
"def scrap_site(link):\n pass # Scrapy or BeautifulSoup",
"def getMNACGenerator():\n\n # 0 - 89 (something between 80 and 90\n searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?title_1=&title=&field_piece_inventory_number_value=&keys=&field_piece_type_value_i18n[0]=pintura&&&page=%s'\n # 0 - 48, for some reason not all paintings get returned in the main query\n # searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?field_piece_type_value_i18n[0]=pintura&field_piece_info_content_value[p.%%2019th]=p.%%2019th&field_piece_info_content_value[q.%%2020th]=q.%%2020th&&page=%s'\n htmlparser = HTMLParser.HTMLParser()\n\n foundit=True\n\n for i in range(0, 89):\n searchUrl = searchBaseUrl % (i,)\n print searchUrl\n searchPage = urllib2.urlopen(searchUrl)\n searchPageData = searchPage.read()\n\n searchRegex = u'\\<a href\\=\\\"(\\/en\\/colleccio\\/[^\\\"]+)\\\"\\>Read more\\<\\/a\\>'\n itemmatches = re.finditer(searchRegex, searchPageData)\n urllist = []\n #for match in matches:\n # try:\n # # #bla = unicode(match.group(1), u'utf-8')\n # urllist.append(u'http://www.dulwichpicturegallery.org.uk%s' % (match.group(1),))\n # except UnicodeDecodeError:\n # pywikibot.output(u'Found an url I cannot parse: %s' % (unicode(match.group(1), u'utf-8'),))#\n\n #print len(urllist)\n #urlset = set(urllist)\n #print len(urlset)\n\n\n for itemmatch in itemmatches:\n url = u'http://www.museunacional.cat%s' % (itemmatch.group(1),)\n print url\n\n if url==u'http://adsfasdfasdf':\n foundit=True\n if not foundit:\n continue\n metadata = {}\n\n metadata['collectionqid'] = u'Q861252'\n metadata['collectionshort'] = u'MNAC'\n metadata['locationqid'] = u'Q861252'\n metadata['instanceofqid'] = u'Q3305213'\n \n metadata['url'] = url\n\n itemPage = urllib2.urlopen(url)\n itemPageData = unicode(itemPage.read(), u'utf-8')\n \n #print itemPageEnData\n titleRegex = u'<li class=\"ca first\"><a href=\"/ca/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"ca\" title=\"([^\\\"]+)\">Català</a></li>[\\r\\n\\t\\s]*<li class=\"es\"><a href=\"/es/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"es\" title=\"([^\\\"]+)\">Español</a></li>[\\r\\n\\t\\s]*<li class=\"en last active\"><a href=\"/en/colleccio/[^\\\"]+\" class=\"language-link active\" xml:lang=\"en\" title=\"([^\\\"]+)\">English</a></li>'\n #titleEnRegex = u'<main class=\"main narrow\">[\\r\\n\\t\\s]+<h1>[\\r\\n\\t\\s]*([^<]+)[\\r\\n\\t\\s]*</h1>'\n creatorRegex = u'<div class=\"ds-author-piece\">([^<]+)</div>'\n dateRegex = u'Painting<div class=\"ds-feature\"><p>(\\d\\d\\d\\d)</p></div>' #FIXME: Only matches on real years\n invRegex = u'Inventory number: </div><p>([^<]+)</p>'\n\n # Could also get Dimensions, Materials, Acquisition\n \n matchTitle = re.search(titleRegex, itemPageData)\n if not matchTitle:\n pywikibot.output(u'The title data for this painting is BORKED!')\n continue\n\n #FIXME: Check encoding\n\n metadata['title'] = { u'ca' : htmlparser.unescape(matchTitle.group(1)),\n u'es' : htmlparser.unescape(matchTitle.group(2)),\n u'en' : htmlparser.unescape(matchTitle.group(3)),\n }\n \n #pywikibot.output(metadata.get('title'))\n\n creatorMatch = re.search(creatorRegex, itemPageData)\n if not creatorMatch:\n pywikibot.output(u'The creator data for this painting is BORKED!')\n continue\n\n #FIXME: Add some logic for work after and clean up\n\n name = htmlparser.unescape(creatorMatch.group(1))\n # We need to normalize the name\n if u',' in name:\n (surname, sep, firstname) = name.partition(u',')\n name = u'%s %s' % (firstname.strip(), surname.strip(),)\n metadata['creatorname'] = name\n \n metadata['description'] = { u'nl' : u'%s van %s' % (u'schilderij', metadata.get('creatorname'),),\n u'en' : u'%s by %s' % (u'painting', metadata.get('creatorname'),),\n u'ca' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n u'es' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n }\n\n\n invMatch = re.search(invRegex, itemPageData)\n\n if not invMatch:\n pywikibot.output(u'No inventory number found! Skipping')\n continue\n \n metadata['id'] = invMatch.group(1)\n metadata['idpid'] = u'P217'\n\n dateMatch = re.search(dateRegex, itemPageData)\n\n if dateMatch:\n metadata['inception'] = dateMatch.group(1)\n\n yield metadata",
"def gather_headlines(urls):\n pass",
"def harvest_urls():\n manifest = []\n category = {}\n subcategory = {}\n directoryfiles = \"%s/directory_listing/\" % config['PREFIX']\n # ^^ the directory containing the HTML from the Technorati site.\n\n #Set up directory for intermediate data: MANIFEST\n #MANIFEST contains: Category, Subcategory, Title and URL.\n #and is a roster of URLs of blogs to autodiscover.\n if not os.path.exists(prefix + \"meta\"):\n os.mkdir(prefix + \"meta\")\n else:\n #TO DO: What if meta exists but MANIFEST got deleted?\n logging.info(\"Blog URLs already harvested. Skipping...\")\n return\n\n #Iterate through each file in the directory and extract blog URLs.\n for infile in glob.glob(os.path.join(directoryfiles, '*.html')):\n logging.info(\"Harvesting blog URLs from %s.\" % infile)\n dirpage = file(infile)\n root = parse(dirpage).getroot()\n #Rather than infer the category from the filename, just extract\n #it from the file. Not the best way to do this, hit is minimal.\n\tpieces = infile.split('/')[-1].split('_')\n\tcat = pieces[1]\n\tsubcat = None\n\tif len(pieces) == 4:\n\t\tsubcat = pieces[2]\n blogs = root.xpath(\"//td[@class='site-details']\")\n #Iterate through all of the blogs listed on the page.\n for blog in blogs:\n url = blog.xpath(\"a[@class='offsite']\")[0].text\n title = blog.xpath('h3/a')[0].text\n OUT = open(prefix + \"meta/MANIFEST\", \"a\")\n #Store the category of the blog.\n category[url] = cat\n if subcat:\n output = [cat, subcat, title.encode('utf-8').replace(' ', ' '), url]\n subcategory[url] = subcat\n print >> OUT, ' '.join(output)\n else:\n output = [cat, \"NA\", title.encode('utf-8').replace(' ', ' '), url]\n print >> OUT, '\\t'.join(output)\n manifest.append(output)\n OUT.close()\n # This is a hack to get around having to use a database.\n # TODO: Reimplement using a database.\n BLOGCATS = open(prefix + \"blogcats.pickle\", \"w\")\n cPickle.dump(category, BLOGCATS)\n BLOGCATS.close()\n return manifest",
"def getArticleURLS(base_url, headers):\n \n url_links = []\n for url in base_url:\n try:\n #retrieve webpage from the url\n page = requests.get(url, headers=headers).text\n\n #use beautifulSoup to scrap the page\n soup = BeautifulSoup(page, 'lxml')\n\n links = []\n #loop through the page to collect anchor tags and retrieve the urls\n for a in soup.find_all(href=True):\n links.append(a['href'])\n # titles.append(a.text.encode('ascii',errors='replace').replace(b'?', b' ').decode('utf8'))\n\n #clean collected urls\n final_links = [link for link in links if '/News/' in link]\n clean_links = [link for link in final_links if not 'News/688334-688334' in link]\n clean_urls = ['https://www.monitor.co.ug' + link for link in clean_links if not 'https://www.monitor.co.ug' in link]\n cleaned_links = list(OrderedDict.fromkeys(clean_urls))\n url_links += cleaned_links\n except requests.exceptions.ConnectionError as error:\n return error\n\n #patterns to filter base urls with headlines only\n patterns = ['/News/688324-','/News/National/688334-','/News/Education/688336-',\n '/News/Insight/688338-','/News/World/688340-','/News/photos/3286528-']\n result_list = [row for row in url_links if not any(p in row for p in patterns)]\n\n return json.dumps(result_list)",
"def scrape_data_tag(final_link, driver):\n\n import time\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(final_link)\n time.sleep(2)\n tags = ['li', 'p', 'tr']\n for tag in tags:\n children = driver.find_elements_by_tag_name(tag)\n for child in children:\n try:\n links = child.find_elements_by_tag_name('a')\n images = child.find_elements_by_tag_name('img')\n if len(child.text) == 0:\n continue\n else:\n infotext = []\n sociallinks = []\n imageslinks = [] \n checklen = len(child.text.split(\"\\n\"))\n if checklen > 0 and checklen < 30:\n infotext = child.text.split(\"\\n\")\n\n for link in links:\n sociallinks.append(link.get_attribute('href'))\n \n for link in imageslinks:\n imageslinks.append(link.get_attribute('href'))\n\n except:\n continue\n\n infolen = len(infotext)\n sociallen = len(sociallinks)\n if sociallen > 0 and sociallen <= 10 and infolen != 0:\n try:\n dump_data(infotext, sociallinks, imageslinks)\n except:\n continue\n elif sociallen == 0 and infolen != 0:\n try:\n sociallinks = ['No Available Social Media Links']\n dump_data(infotext, sociallinks, imageslinks)\n except:\n continue\n \n\n #driver.quit()",
"def crawl(url):\n try:\n # kondisi berhenti\n time_now = time.time() - start_time\n time_now_int = int(time_now)\n if time_now_int >= 900:\n return\n\n # memasukan url kedalam visited_url\n visited_url.append(url)\n\n # crawl page\n print(\"page yang sedang di crawl:\", url)\n page = requests.get(url)\n request = page.content\n soup = bs4.BeautifulSoup(request, 'html.parser')\n\n # extract title\n title = soup.title.string\n\n # check version html\n article_html5 = soup.find('article')\n if article_html5 is None:\n # extract text content from html4\n html5 = \"no\"\n texts = soup.find('body').findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n text = u\" \".join(t.strip() for t in visible_texts)\n text = text.lstrip().rstrip()\n text = text.split(',')\n clean_text = ''\n for sen in text:\n if sen:\n sen = sen.rstrip().lstrip()\n clean_text += sen+','\n complete_text = clean_text\n # print(complete_text)\n else:\n # extract text content from html5\n html5 = \"yes\"\n texts = article_html5.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n text = u\" \".join(t.strip() for t in visible_texts)\n text = text.lstrip().rstrip()\n text = text.split(',')\n clean_text = ''\n for sen in text:\n if sen:\n sen = sen.rstrip().lstrip()\n clean_text += sen+','\n complete_text = clean_text\n # print(complete_text)\n\n # get meta description\n description = soup.find(\"meta\",attrs={\"name\":\"description\"})\n if description is None:\n description = \"-\"\n else:\n description = description.get(\"content\")\n\n # get meta keywords\n keywords = soup.find(\"meta\",attrs={\"name\":\"keywords\"})\n if keywords is None:\n keywords = \"-\"\n else:\n keywords = keywords.get(\"content\")\n\n # isHotURL\n hot_link = \"no\"\n\n # check table if exist at crawldb\n cursor.execute(\n \"SELECT base_url, COUNT(*) FROM page_information WHERE base_url = %s GROUP BY base_url\",\n (url,)\n )\n results = cursor.fetchall()\n # gets the number of rows affected by the command executed\n row_count = cursor.rowcount\n if row_count == 0:\n # Create a new record\n sql = \"INSERT INTO `page_information` (`base_url`, `html5`, `title`, `description`, `keywords`, `content_text`, `hot_url`, `model_crawl`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, html5, title, description, keywords, complete_text, hot_link, \"BFS crawling\"))\n # commit to save our changes\n db.commit()\n else:\n # update database\n sql = \"UPDATE page_information SET hot_url = %s WHERE base_url = %s\"\n # Execute the query\n cursor.execute(sql, (hot_url, url))\n # commit to save our changes\n db.commit()\n\n # extract style\n for style in soup.findAll('style'):\n # Create a new record\n sql = \"INSERT INTO `style_resource` (`base_url`, `style`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, style))\n # commit to save our changes\n db.commit()\n\n # extract script\n for script in soup.findAll('script'):\n # Create a new record\n sql = \"INSERT INTO `script_resource` (`base_url`, `script`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, script))\n # commit to save our changes\n db.commit()\n\n # extract lists\n for lists in soup.findAll('li'):\n # Create a new record\n sql = \"INSERT INTO `list` (`base_url`, `list`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, lists))\n # commit to save our changes\n db.commit()\n\n # extract forms\n for form in soup.findAll('form'):\n # Create a new record\n sql = \"INSERT INTO `forms` (`base_url`, `form`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, form))\n # commit to save our changes\n db.commit()\n\n # extract tables\n for table in soup.findAll('table'):\n # Create a new record\n sql = \"INSERT INTO `tables` (`base_url`, `tables`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, table))\n # commit to save our changes\n db.commit()\n\n # extract images\n for image in soup.findAll('img'):\n # Create a new record\n sql = \"INSERT INTO `images` (`base_url`, `image`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, image))\n # commit to save our changes\n db.commit()\n\n # extract outgoing link\n links = soup.findAll(\"a\", href=True)\n\n # memasukan outgoing link kedalam queue\n for i in links:\n flag = 0\n\n # Complete relative URLs and strip trailing slash\n complete_url = urljoin(url, i[\"href\"]).rstrip('/')\n\n # create graph\n # G.add_edges_from([(url, complete_url)])\n\n # create list graph\n branch = []\n # remove https://\n new_url = url.replace('https://', '')\n new_url = new_url.replace('http://', '')\n new_complete = complete_url.replace('https://', '')\n new_complete = new_complete.replace('http://', '')\n branch.append(new_url)\n branch.append(new_complete)\n list_g.append(branch)\n\n # Create a new record\n sql = \"INSERT INTO `linking` (`crawl_id`, `url`, `outgoing_link`) VALUES (%s, %s, %s)\"\n # Execute the query\n cursor.execute(sql, (1, url, complete_url))\n # commit to save our changes\n db.commit()\n\n # Check if the URL already exists in the url_queue\n for j in url_queue:\n if j == complete_url:\n flag = 1\n break\n\n # Check if the URL already exists in the visited_url\n for j in visited_url:\n if (j == complete_url):\n flag = 1\n break\n\n # If not found in queue\n if flag == 0:\n if (visited_url.count(complete_url)) == 0:\n url_queue.append(complete_url)\n\n except (AttributeError, KeyError, requests.exceptions.InvalidSchema, requests.exceptions.ConnectionError):\n title = \"no-title\"\n complete_text = \"no-text\"\n\n # crawl url selanjutnya\n if len(url_queue) == 0:\n return\n current = url_queue.popleft()\n\n # # create list graph\n # branch = []\n # # remove https://\n # new_url = url.replace('https://', '')\n # new_complete = current.replace('https://', '')\n # branch.append(new_url)\n # branch.append(new_complete)\n # list_g.append(branch)\n\n crawl(current)",
"def scrape_data(final_link, tags, driver):\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(str(final_link))\n errcount = 0\n for tag in tags:\n try:\n children = driver.find_elements_by_css_selector(tag)\n for child in children:\n try:\n links = child.find_elements_by_tag_name('a')\n images = child.find_elements_by_tag_name('img')\n if len(child.text) == 0:\n continue\n else:\n infotext = []\n sociallinks = []\n imageslinks = [] \n checklen = len(child.text.split(\"\\n\"))\n if checklen > 0 and checklen < 30:\n infotext = child.text.split(\"\\n\")\n\n for link in links:\n sociallinks.append(link.get_attribute('href'))\n\n for linki in imageslinks:\n imageslinks.append(linki.get_attribute('href'))\n\n except:\n continue\n \n infolen = len(infotext)\n sociallen = len(sociallinks)\n if sociallen > 0 and sociallen <= 10 and infolen != 0:\n dump_data(infotext, sociallinks, imageslinks)\n \n else:\n if infolen == 0 or sociallen == 0:\n errcount += 1\n \n except:\n continue\n \n if errcount == len(tags):\n scrape_data_tag(final_link, driver)\n \n elif errcount > 0:\n scrape_data_tag(final_link, driver)\n \n #driver.quit()",
"def scrape_links(base_url, data):\n soup = BeautifulSoup(data, from_encoding=\"gbk\")\n\n # Create mechanize links to be used\n # later by mechanize.Browser instance\n #soup = BeautifulSoup(data)\n print 'scrape_links before'\n links = []\n for anchor in soup.find_all('a'):\n url = anchor['href']\n text = anchor.string\n shtml = '.shtml'\n thisYear = '2013'\n isWant = ( anchor.has_attr('href')) \\\n and ( anchor.has_attr('target') ) \\\n and (BASE_URL in url) \\\n and (shtml in url) \\\n and (text != None) \\\n and (thisYear in url)\n if isWant==True:\n unicode_string = (unicode(anchor.string))\n print 'unicode_string:',unicode_string\n print 'type(text): ', type(text)\n print 'type(unicode_string): ', type(unicode_string)\n tag = anchor.name\n\n attrs = []\n for name in anchor.attrs:\n attrs.append(name)\n link = mechanize.Link(base_url, url, text, tag, attrs)\n print link\n links.append(link)\n if len(links) > 10:\n break;\n print 'scrape_links after'\n return links",
"def mk_link_list(self, BS_object, base_url):\n link_list = []\n body = BS_object.find('body')\n for element in body.find_all('a'):\n # for link in BS_object.find_all('a'): # TEST if there are any links in html head\n \n raw_link = element.get('href')\n print \"GETS RAW LINK: %r, type:\" % raw_link, type(raw_link)\n if type(raw_link) is not unicode:\n print \"mk_link_list: FAILED TO EXTRACT USABLE LINK, SKIPPING...\"\n continue\n\n if raw_link.startswith(\"https:/\") or raw_link.startswith(\"http:/\"):\n if not raw_link.endswith(\"/\"): # maintaining constant url format\n raw_link + \"/\"\n print \"mk_link_list: FULL LINK\"\n if raw_link.startswith(base_url): # Internal URL check\n print \"mk_link_list: FULL LINK STARTS WITH BASE URL AND IS GOOD FOR LINK LIST\"\n link_list.append(raw_link)\n else:\n print \"mk_link_list: THIS FULL LINK IS NOT INTERNAL LINK\"\n else:\n # when part link found it will be always internal link\n print \"mk_link_list:FOUND PART LINK\", raw_link\n try:\n raw_link.strip()\n except:\n pass\n print \"mk_link_list: MAKING FULL LINK FROM PART\"\n full_link = urlparse.urljoin(base_url, raw_link)\n print \"mk_link_list: FULL LINK MADE FROM PART LINK\", full_link\n if full_link.startswith(base_url): # Internal URL check\n print \"mk_link_list: FULL LINK STARTS WITH BASE URL AND IS GOOD FOR LINK LIST\"\n link_list.append(full_link)\n else:\n print \"mk_link_list: THIS FROM PART TO FULL LINK IS NOT INTERNAL LINK\"\n\n\n\n dedupli_list = c_m.remove_duplicates(link_list) # \n dedupli_list.sort()\n try:\n dedupli_list.remove(base_url) # we do not need retriving base url html again\n print \"mk_link_list: LINK LIST AFTER BASE URL REMOVAL\", len(dedupli_list)\n except ValueError:\n print \"mk_link_list: NO BASE URL FOUND IN BASE URL(HOMEPAGE)\"\n\n return dedupli_list",
"def fetch_urls(browser, number_publications):\n links = []\n links.extend(re.findall(\"/p/([^/]+)/\", browser.page_source))\n n_scrolls = scrolls(number_publications)\n\n for i in range(\n n_scrolls\n ): # collecting all the pictures links in order to see which ones contains location data\n print(\n Fore.WHITE +\n \"Scrolling the Instagram target profile, scraping pictures URLs ...\"\n + str(100 * i // n_scrolls) + \"% of the profile scrolled \",\n end=\"\\r\")\n browser.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight)\")\n links.extend(re.findall(\"/p/([^/]+)/\", browser.page_source))\n time.sleep(\n 1\n ) # dont change this, otherwise some scrolls won't be effective and all the data won't be scrapped\n\n print(Fore.WHITE + \"\\nPictures links collected: \" + Fore.GREEN + \"OK\")\n return list(dict.fromkeys(links)) # remove duplicates",
"def scrape_main() -> None:\n\n logger.info(\"Starting scrape\")\n search_info = construct_scrape_regex_patterns(grab_scrape_info())\n links = run_scrape(\n url=search_info['url'],\n seasons_regex=search_info['seasons'],\n episodes_regex=search_info['episodes']\n )\n if links:\n logger.debug(\"Writing urls to file\")\n with open('urls.txt', 'w') as f:\n for link in links:\n f.write(link + '\\n')\n else:\n logger.warning(\"No links available\")",
"def analyze(self, page_url, html):\n soup = BeautifulSoup(html)\n\n triples = []\n for link_type, element_name, attrs, attribute_name in self.link_types:\n triples.extend(\n [\n (page_url, link_type, self.extract_link(page_url, element, attribute_name))\n for element\n in soup.find_all(element_name, attrs=attrs)\n ]\n )\n\n return list(filter(lambda t: t[2] is not None, triples))",
"def crawl_url_links(input_dict):\n\n extractor_name=input_dict.get('extractor','DefaultExtractor')\n import requests\n label=input_dict['label']\n urls,_, source, source_date=_process_input(input_dict['input'],False)\n\n\n docs=[]\n titles=[]\n for url in urls:\n print(url)\n try:\n r = requests.get(url)\n except ConnectionError:\n continue\n if r.status_code==200:\n html=r.text\n from boilerpipe.extract import Extractor\n extractor = Extractor(extractor=extractor_name, html=html)\n\n titles.append(url)\n text=''\n if label:\n text+='!'+label+'\\t'\n text+=extractor.getText()\n docs.append(text)\n\n\n corpus_date = str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime()))\n documents, labels = _process_adc(docs, False, label, titles)\n features = {\"Source\": source, \"SourceDate\": source_date, \"CorpusCreateDate\": corpus_date,\n \"Labels\": json.dumps([label]) if label else '[]'}\n\n return {\"adc\": DocumentCorpus(documents=documents, features=features)}",
"def _scrape(self):",
"def parseHtmlLinks(page, canBeOffsite=False, landingPage_ignoreUrlREs=[]):\n if 'links' in page:\n return page\n elif 'seleniumDriver' in page:\n return parseLinksSelenium(page)\n else:\n logging.debug('Parsing HTML links')\n htmlString = page['data']\n baseUrl = page['url']\n urlParts = urlparse.urlsplit(baseUrl)\n basePath = urlParts[2]\n baseLoc = urlParts[1]\n logging.log(5, 'Parsing %s with bs3' % page['url'])\n linkStrainer = SoupStrainer(['a',\n 'meta',\n 'iframe',\n 'frame'])\n try:\n fulltextLinks = BeautifulSoup(htmlString, smartQuotesTo=None, convertEntities=BeautifulSoup.ALL_ENTITIES, parseOnlyThese=linkStrainer)\n except ValueError as e:\n raise pubGetError('Exception during bs html parse', 'htmlParseException', e.message)\n\n logging.log(5, 'bs parsing finished')\n linkDict = OrderedDict()\n metaDict = OrderedDict()\n iframeDict = OrderedDict()\n frameDict = OrderedDict()\n for l in fulltextLinks:\n logging.log(5, 'got link %s' % l)\n if l.name == 'iframe':\n src = l.get('src')\n if src == None or 'pdf' not in src:\n continue\n id = l.get('id', 'pdfDocument')\n iframeDict[id] = src\n if l.name == 'frame':\n src = l.get('src')\n if src == None or 'pdf' not in src:\n continue\n id = l.get('id', 'pdfDocument')\n frameDict[id] = src\n elif l.name == 'a':\n text = l.getText()\n text = text.encode('utf8')\n url = l.get('href')\n if url == None:\n logging.log(5, 'url is None')\n continue\n try:\n linkLoc = urlparse.urlsplit(url)[1]\n linkPath = urlparse.urlsplit(url)[2]\n except ValueError:\n raise pubGetError('Value error on url split %s' % url, 'urlSplitError', url)\n\n if canBeOffsite == False and linkLoc != '' and linkLoc != baseLoc:\n logging.log(5, 'skipping link %s, is offsite' % url)\n continue\n fullUrl = urlparse.urljoin(baseUrl, url)\n parts = list(urlparse.urlsplit(fullUrl)[:4])\n if parts[0] == 'javascript':\n logging.log(5, 'skipping link %s, is javascript' % url)\n continue\n parts.append('')\n fullUrlNoFrag = urlparse.urlunsplit(parts)\n if anyMatch(landingPage_ignoreUrlREs, fullUrlNoFrag):\n logging.log(5, 'skipping link %s, because of ignore REs' % url)\n continue\n linkDict[fullUrlNoFrag] = text\n logging.log(5, 'Added link %s for text %s' % (repr(fullUrlNoFrag), repr(text)))\n elif l.name == 'meta':\n name = l.get('name')\n if name != None:\n content = l.get('content')\n metaDict[name] = content\n if str(l.get('http-equiv')).lower() == 'refresh':\n content = l.get('content')\n logging.log('found meta refresh tag: %s' % str(content))\n if content != None:\n url = string.split(content, '=', 1)[1]\n url = urlparse.urljoin(baseUrl, url)\n metaDict['refresh'] = url\n\n logging.log(5, 'Meta tags: %s' % metaDict)\n logging.log(5, 'Links: %s' % linkDict)\n logging.log(5, 'iframes: %s' % iframeDict)\n logging.log(5, 'frames: %s' % frameDict)\n\n page['links'] = linkDict\n page['metas'] = metaDict\n page['iframes'] = iframeDict\n page['frames'] = frameDict\n logging.log(5, 'HTML parsing finished')\n return page",
"async def get_article_links(self):\n urls = []\n for page in range(self._start, self._end+1):\n urls.append(self._searchURL + str(page))\n result_list = await self._connect(urls)\n\n self._urls = []\n hares_links = []\n for result in result_list:\n soup = result[1]\n search_links = soup.find_all(class_='search-title')\n article_links = re.findall(r'url=(.*?)\\\"', str(search_links))\n for l in article_links:\n l = unquote(l)\n if 'hare48.pixnet.net' in l:\n hares_links.append(l)\n else:\n self._urls.append(l)\n self._urls.extend(await self._transform_hares(hares_links))",
"def google_scraper(key_words, date_upper, date_lower, site, check = None): \n try:\n key_words = [word for word in key_words if '&' not in word]\n search_terms = ' '.join(key_words)\n search_terms = search_terms.replace(' ', '%20')\n\n #month of upper date\n mu = date_upper[:2]\n #day of upper\n du = date_upper[3:5]\n #year of upper\n yu = date_upper[6:]\n \n #month of lower date\n ml = date_lower[:2]\n #day of lower\n dl = date_lower[3:5]\n #year of lower\n yl = date_lower[6:]\n \n url = \"https://news.google.com/search?q=\" + search_terms + \" %20source%3A\" + \\\n site + \"%20before%3A\" + yu + \"-\" + mu + \"-\" + du + \"%20after%3A\" + yl + \\\n \"-\" + ml + \"-\" + dl + \"&hl=en-US&gl=US&ceid=US%3Aen\"\n \n #request url\n page = requests.get(url)\n \n #parse html\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #find article tags\n article_urls = soup.find_all('article')\n\n #retrieve initial urls that link through google news\n article_titles = []\n for article_text in article_urls:\n article_title = article_text.find('a')\n article_titles.append(article_title['href'])\n\n \n #initiate actual urls list\n urls = []\n \n #get actual site urls\n for google_url in article_titles:\n try:\n #get link and combine with google news url\n bad_url = google_url[1:]\n combined_url = 'https://news.google.com' + bad_url\n \n #request site, then get actual url from requests\n actual_page = requests.get(combined_url)\n \n #return actual url\n actual_url = actual_page.url\n \n #add actual url to list\n urls.append(actual_url)\n except:\n continue\n \n #only get urls from site of interest\n if check == None:\n urls = [url for url in urls if site in url]\n else:\n urls = [url for url in urls if check in url]\n \n if len(urls) == 0:\n search_terms = ' '.join(key_words)\n search_terms = search_terms.replace(' ', ' ')\n search_terms = search_terms.replace(' ', '%20OR%20')\n \n url = \"https://news.google.com/search?q=\" + search_terms + \" %20source%3A\" + \\\n site + \"%20before%3A\" + yu + \"-\" + mu + \"-\" + du + \"%20after%3A\" + yl + \\\n \"-\" + ml + \"-\" + dl + \"&hl=en-US&gl=US&ceid=US%3Aen\"\n \n #request url\n page = requests.get(url)\n \n #parse html\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #find article tags\n article_urls = soup.find_all('article')\n\n #retrieve initial urls that link through google news\n article_titles = []\n for article_text in article_urls:\n article_title = article_text.find('a')\n article_titles.append(article_title['href'])\n\n \n #initiate actual urls list\n urls = []\n \n #get actual site urls\n for google_url in article_titles:\n try:\n #get link and combine with google news url\n bad_url = google_url[1:]\n combined_url = 'https://news.google.com' + bad_url\n \n #request site, then get actual url from requests\n actual_page = requests.get(combined_url)\n \n #return actual url\n actual_url = actual_page.url\n \n #add actual url to list\n urls.append(actual_url)\n except:\n continue\n \n #only get urls from site of interest\n if check == None:\n urls = [url for url in urls if site in url]\n else:\n urls = [url for url in urls if check in url]\n \n return(urls)\n \n else:\n return(urls)\n \n except:\n return([])",
"def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links",
"def checkSite(url, strings, output):\n\n domain = getDomainName(url)\n otherLinks = list()\n txt = \"\\n=================================\\n\"\n txt += \"Domain: \" + domain + \"\\n\"\n txt += \"=================================\\n\"\n linksViewed = set()\n dStrings = dict()\n imgs = set([])\n links = set([url])\n forbidden = [\".ogg\", \".tex\", \".pdf\", \".mp3\", \"mp4\", \".ods\", \".xls\", \".xlsx\",\\\n \".doc\", \".docx\", \".zip\", \".tar\", \".gz\", \".ggb\", \".cls\", \".sty\", \".avi\", \".flv\"\\\n \".mkv\", \".srt\"]\n #While we have not check all URL\n while links - linksViewed != set([]):\n #Check all links (not already viewed) for this page\n for link in links - linksViewed:\n #If the domains are the same, the extension is not forbidden\n if getDomainName(link) == domain and getExtension(link) not in forbidden:\n print \"Checking on \" + link + \"...\\n\"\n dStringst, linkst, imgst = checkPage(link, strings)\n txt += link + \":\\n\"\n for key, value in dStringst.items():\n txt += \"\\t\" + key + \" -> \" + str(value) + \"\\n\"\n txt += \"\\n\"\n #Clear list of forbidden extentions\n tmp = list()\n for l in linkst:\n if getDomainName(l) == domain and getExtension(l) not in forbidden:\n tmp.append(l)\n #If the extension is allowed but the link is on an other domain\n elif getDomainName(l) != domain and getExtension(l) not in forbidden:\n otherLinks.append(l)\n linkst = set(tmp)\n links = links | linkst\n imgs = imgs | imgst\n #Update link count\n for key, value in dStringst.items():\n dStrings[key] = dStrings[key] + value if key in dStrings.keys() else 0\n\n linksViewed = linksViewed | set([link])\n\n links = set(links)\n imgs = set(imgs)\n\n #Export/print after each website, in order to do not lost all if user\n #stops the program with Ctrl+C\n if len(output) > 0:\n export(output, txt)\n else:\n print txt\n return txt, set(otherLinks), linksViewed",
"def mor_prepare_data():\n prices, locations, areas, links = [], [], [], []\n for i in range(START_PAGE, SEARCHING_DEPTH+1):\n handler = requests.get(main_url, params={\"page\": str(i)})\n soup = bs4.BeautifulSoup(handler.text, 'lxml')\n heads = soup.find_all(\"header\")\n once = True\n for head in heads:\n if head.find(\"meta\", {\"itemprop\": \"category\"}) and once:\n\n raw_price = head.find(\"meta\", {\"itemprop\": \"price\"})\n price = int(float(raw_price[\"content\"]) if raw_price else \"\")\n\n raw_loc_list = head.find(\"h2\",\n {\"class\": \"single-result__title\"}).getText().strip().split(\n \", \")\n found = False\n for loc in raw_loc_list:\n if location_mapper[CITY].get(loc.lower(), 0):\n location = location_mapper[CITY][loc.lower()]\n\n found = True\n break\n if not found:\n location = \"\"\n if DEBUG_MODE:\n print(raw_loc_list)\n\n raw_area = head.find(\"p\", {\n \"class\": \"single-result__price single-result__price--currency\"}).getText().strip().split()\n if price and location:\n square_price = raw_area[0] if len(raw_area) == 2 else \"\".join(\n (raw_area[0], raw_area[1]))\n\n area = int(price / float(square_price.replace(\",\", \".\")))\n link_url = head.find('a')['href']\n\n if location and area and link_url:\n prices.append(price) if price < PRICE_UPPER_LIMIT else prices.append(\n PRICE_UPPER_LIMIT)\n locations.append(location)\n areas.append(area) if area < AREA_UPPER_LIMIT else areas.append(\n AREA_UPPER_LIMIT)\n links.append(link_url)\n\n return prices, locations, areas, links",
"def updateURLs(self, tree):\n urls = set()\n #Remove all links we have already visited\n for link in tree.findall(\".//a\"):\n try:\n url = urllib.parse.urldefrag(link.attrib['href'])[0]\n if (url and url not in self.unvisitedURLs and url\n not in self.visitedURLs):\n urls.add(url)\n except KeyError:\n pass\n\n #Remove all non-http URLs and a dd a sutiable base URL where that is\n #missing\n newUrls = set()\n for url in urls:\n splitURL = list(urllib.parse.urlsplit(url))\n if splitURL[0] != \"http\":\n continue\n if splitURL[1] == \"\":\n splitURL[1] = urllib.parse.urlsplit(self.currentURL)[1]\n newUrls.add(urllib.parse.urlunsplit(splitURL))\n urls = newUrls\n\n responseHeaders = {}\n #Now we want to find the content types of the links we haven't visited\n for url in urls:\n try:\n resp, content = self.http.request(url, \"HEAD\")\n responseHeaders[url] = resp\n except AttributeError as KeyError:\n #Don't know why this happens\n pass\n\n\n #Remove links not of content-type html or pages not found\n #XXX - need to deal with other status codes?\n toVisit = set([url for url in urls if url in responseHeaders and\n \"html\" in responseHeaders[url]['content-type'] and\n responseHeaders[url]['status'] == \"200\"])\n\n #Now check we are allowed to spider the page\n for url in toVisit:\n robotURL = list(urllib.parse.urlsplit(url)[:2])\n robotURL.extend([\"robots.txt\", \"\", \"\"])\n robotURL = urllib.parse.urlunsplit(robotURL)\n self.robotParser.set_url(robotURL)\n if not self.robotParser.can_fetch(\"*\", url):\n toVisit.remove(url)\n\n self.visitedURLs.update(urls)\n self.unvisitedURLs.update(toVisit)",
"def collect_web_sites(self):\n min_distance = None\n max_sequence_match = None\n index_string_match = index_distance = None\n self.collection = collections.OrderedDict()\n for i_web, web_row in self.company_urls_df.iterrows():\n # get the url first from the websites table which list all the urls belonging to\n # one kvk search\n url = web_row[URL_KEY]\n\n # skip all none uls and also the filtered urls\n if url is None or url == \"\":\n logger.debug(\"Skipping url because it is None or empty\")\n continue\n if self.filter_urls and url not in self.filter_urls:\n logger.debug(f\"filter urls is given so skip {url}\")\n continue\n\n # store a list of UrlInfo object with a minimum info the url which was tested\n url_info = UrlInfo(index=i_web, url=url)\n self.collection[url] = url_info\n\n print_banner(f\"Processing {url}\")\n\n # quick check if we can processes this url based on the country code\n suffix = url_info.url_extract.suffix\n if suffix in self.exclude_extensions.index:\n url_info.outside_nl = True\n logger.info(f\"Web site {url} has suffix '.{suffix}' Continue \")\n\n # get the processing time of the last time you did this url from the table\n try:\n processing_time = self.urls_df.loc[url, DATETIME_KEY]\n except KeyError:\n processing_time = None\n\n if self.force_process or self.rescan_missing_urls:\n url_info.needs_update = True\n else:\n url_info.needs_update = check_if_url_needs_update(processing_time=processing_time,\n current_time=self.current_time,\n older_time=self.older_time)\n if url_info.needs_update:\n # if the url needs update, store the current time\n url_info.processing_time = self.current_time\n else:\n url_info.processing_time = processing_time\n\n url_analyse = self.scrape_url_and_store_in_dataframes(url, url_info)\n\n url_info.url_analyse = url_analyse\n\n if url_analyse and not url_analyse.exists:\n self.logger.debug(f\"url '{url}'' does not exist\")\n continue\n\n # based on the company postcodes and kvknummer and web contents, make a ranking how\n # good the web sides matches the company\n match = UrlCompanyRanking(url, self.company_name_small,\n url_extract=url_info.url_extract,\n url_analyse=url_analyse,\n company_kvk_nummer=self.kvk_nr,\n company_postcodes=self.postcodes,\n threshold_string_match=self.threshold_string_match,\n threshold_distance=self.threshold_distance,\n logger=self.logger)\n\n url_info.match = match\n\n # update the min max\n if min_distance is None or match.distance < min_distance:\n index_distance = i_web\n min_distance = match.distance\n\n if max_sequence_match is None or match.string_match > max_sequence_match:\n index_string_match = i_web\n max_sequence_match = match.string_match\n\n self.logger.debug(\" * {} - {} - {}\".format(url, match.ext.domain,\n match.distance))\n\n if min_distance is None:\n self.company_urls_df = None\n elif index_string_match != index_distance:\n self.logger.warning(\n \"Found minimal distance for {}: {}\\nwhich differs from \"\n \"best string match {}: {}\".format(index_distance,\n self.collection[url].url,\n index_string_match,\n self.collection[url].url))",
"def parse(html, url, bases): \n\n soup = BeautifulSoup(html, 'lxml')\n htmlBody = soup.find('body').get_text().strip()\n links = [urljoin(url, l.get('href')) for l in soup.findAll('a')]\n links = [l for l in links if urlparse(l).netloc in bases]\n return url, htmlBody, links",
"def get_all_headlines_from_chrome_2(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?as_q=&as_epq=irish+travellers&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=any&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines",
"def find_usefull_links(links, classmodel, class_count_vect):\n\n import re\n final_links = []\n seclinks = links\n for link in links:\n fulllink = link\n if link == None:\n continue\n else:\n link = link.replace('://', ' ')\n link = link.replace('@', ' ')\n link = link.replace('#', ' ')\n link = link.replace('/', ' ')\n link = link.replace('-', ' ')\n link = link.replace('.', ' ')\n link = link.replace('https', '')\n link = link.replace('http', '')\n link = link.replace('www', '')\n link = link.replace('&', ' ')\n link = link.replace('=', ' ')\n linkpd = pd.Series(link.strip())\n link_feature = class_count_vect.transform(linkpd)\n result = classmodel.predict(link_feature)\n\n result = result.tolist()\n result = str(result)\n if result == '[1]':\n final_links.append(fulllink)\n final_links = list(dict.fromkeys(final_links))\n \n if len(final_links) == 0 or len(final_links) < 5:\n for linksec in seclinks:\n linkwords = ['cabinet', 'gover', 'goverment', 'composition', 'ministers', 'minister',\n 'president', 'composicao', 'parliament', 'person', 'who', 'mini', 'compo',\n 'governor', 'secretariat', 'secretary']\n for w in linkwords:\n if re.search(w, linksec):\n final_links.append(linksec)\n else:\n continue\n final_links = list(dict.fromkeys(final_links))\n return (final_links)"
]
| [
"0.7229524",
"0.67814445",
"0.6743636",
"0.66795707",
"0.6603203",
"0.65976393",
"0.6584772",
"0.6559459",
"0.65498966",
"0.6531386",
"0.65170753",
"0.65139127",
"0.6511179",
"0.6486064",
"0.64685076",
"0.64224035",
"0.6405716",
"0.6380941",
"0.6372001",
"0.637091",
"0.6350366",
"0.63366765",
"0.63116443",
"0.6298533",
"0.629593",
"0.6276861",
"0.62751883",
"0.62724394",
"0.62401855",
"0.6234458"
]
| 0.7232413 | 0 |
It takes useful links as an input and calls the function scrape_data or scrape_data_tag on the basis if useful tags are received | def link_scraping(final_links, driver):
for final_link in final_links:
tags = extract_all_tags(final_link, driver)
if len(tags) != 0:
final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)
if len(final_tags) != 0:
print('Extracting(classname): ', final_link)
scrape_data(final_link, final_tags, driver)
else:
print('Extracting(tag): ', final_link)
scrape_data_tag(final_link, driver)
else:
print('Extracting(tag): ', final_link)
scrape_data_tag(final_link, driver) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape_data(final_link, tags, driver):\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(str(final_link))\n errcount = 0\n for tag in tags:\n try:\n children = driver.find_elements_by_css_selector(tag)\n for child in children:\n try:\n links = child.find_elements_by_tag_name('a')\n images = child.find_elements_by_tag_name('img')\n if len(child.text) == 0:\n continue\n else:\n infotext = []\n sociallinks = []\n imageslinks = [] \n checklen = len(child.text.split(\"\\n\"))\n if checklen > 0 and checklen < 30:\n infotext = child.text.split(\"\\n\")\n\n for link in links:\n sociallinks.append(link.get_attribute('href'))\n\n for linki in imageslinks:\n imageslinks.append(linki.get_attribute('href'))\n\n except:\n continue\n \n infolen = len(infotext)\n sociallen = len(sociallinks)\n if sociallen > 0 and sociallen <= 10 and infolen != 0:\n dump_data(infotext, sociallinks, imageslinks)\n \n else:\n if infolen == 0 or sociallen == 0:\n errcount += 1\n \n except:\n continue\n \n if errcount == len(tags):\n scrape_data_tag(final_link, driver)\n \n elif errcount > 0:\n scrape_data_tag(final_link, driver)\n \n #driver.quit()",
"def scrape_data_tag(final_link, driver):\n\n import time\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(final_link)\n time.sleep(2)\n tags = ['li', 'p', 'tr']\n for tag in tags:\n children = driver.find_elements_by_tag_name(tag)\n for child in children:\n try:\n links = child.find_elements_by_tag_name('a')\n images = child.find_elements_by_tag_name('img')\n if len(child.text) == 0:\n continue\n else:\n infotext = []\n sociallinks = []\n imageslinks = [] \n checklen = len(child.text.split(\"\\n\"))\n if checklen > 0 and checklen < 30:\n infotext = child.text.split(\"\\n\")\n\n for link in links:\n sociallinks.append(link.get_attribute('href'))\n \n for link in imageslinks:\n imageslinks.append(link.get_attribute('href'))\n\n except:\n continue\n\n infolen = len(infotext)\n sociallen = len(sociallinks)\n if sociallen > 0 and sociallen <= 10 and infolen != 0:\n try:\n dump_data(infotext, sociallinks, imageslinks)\n except:\n continue\n elif sociallen == 0 and infolen != 0:\n try:\n sociallinks = ['No Available Social Media Links']\n dump_data(infotext, sociallinks, imageslinks)\n except:\n continue\n \n\n #driver.quit()",
"def deep_link_scraping(final_links, driver):\n\n import re\n second_links = [] \n for website2 in final_links:\n links2 = extract_all_links(website2, driver)\n final_links1 = find_usefull_links(links2, classmodel, class_count_vect)\n final_links2 = list(set(final_links1) - set(final_links))\n second_links += final_links2\n\n \n second_links = list(dict.fromkeys(second_links))\n second_links1 = find_usefull_links(second_links, classmodel, class_count_vect)\n second_links2 = []\n for link in second_links1:\n if re.search('#', link):\n x = re.search('#', link)\n link = link[:int(x.span()[0])]\n second_links2.append(link)\n else:\n second_links2.append(link)\n\n second_links2 = list(dict.fromkeys(second_links2))\n for final_link in second_links2:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n scrape_data(final_link, final_tags, driver)\n else:\n scrape_data_tag(final_link, driver)\n else:\n scrape_data_tag(final_link, driver)\n return second_links2",
"def scrap_site(link):\n pass # Scrapy or BeautifulSoup",
"def scrape_links(base_url, data):\n soup = BeautifulSoup(data, from_encoding=\"gbk\")\n\n # Create mechanize links to be used\n # later by mechanize.Browser instance\n #soup = BeautifulSoup(data)\n print 'scrape_links before'\n links = []\n for anchor in soup.find_all('a'):\n url = anchor['href']\n text = anchor.string\n shtml = '.shtml'\n thisYear = '2013'\n isWant = ( anchor.has_attr('href')) \\\n and ( anchor.has_attr('target') ) \\\n and (BASE_URL in url) \\\n and (shtml in url) \\\n and (text != None) \\\n and (thisYear in url)\n if isWant==True:\n unicode_string = (unicode(anchor.string))\n print 'unicode_string:',unicode_string\n print 'type(text): ', type(text)\n print 'type(unicode_string): ', type(unicode_string)\n tag = anchor.name\n\n attrs = []\n for name in anchor.attrs:\n attrs.append(name)\n link = mechanize.Link(base_url, url, text, tag, attrs)\n print link\n links.append(link)\n if len(links) > 10:\n break;\n print 'scrape_links after'\n return links",
"def _scrape(self):",
"def scrape_links(links):\n chrome_options = set_chrome_options()\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)\n # driver = webdriver.Chrome(ChromeDriverManager().install())\n data = []\n click.secho(\"Processing Blocks\", fg=\"green\")\n for link in links:\n flat_data = scrape_link(driver, link)\n data = data + flat_data\n driver.close()\n return data",
"def scrape_main() -> None:\n\n logger.info(\"Starting scrape\")\n search_info = construct_scrape_regex_patterns(grab_scrape_info())\n links = run_scrape(\n url=search_info['url'],\n seasons_regex=search_info['seasons'],\n episodes_regex=search_info['episodes']\n )\n if links:\n logger.debug(\"Writing urls to file\")\n with open('urls.txt', 'w') as f:\n for link in links:\n f.write(link + '\\n')\n else:\n logger.warning(\"No links available\")",
"def test():\n from datas import whatlinks_page\n\n pages = whatlinks_page.whatlinks\n\n for qid in extract_linked_items(pages):\n page = get_itempage(qid)\n try:\n page.get()\n substitute_item_in_dataset(page, get_itempage(\"Q1660508\"), get_itempage(\"Q1622272\") )\n\n except Exception as exc:\n print('wow : <{}> ({}) something is wrong.'.format(exc, type(exc)))",
"def parseHtmlLinks(page, canBeOffsite=False, landingPage_ignoreUrlREs=[]):\n if 'links' in page:\n return page\n elif 'seleniumDriver' in page:\n return parseLinksSelenium(page)\n else:\n logging.debug('Parsing HTML links')\n htmlString = page['data']\n baseUrl = page['url']\n urlParts = urlparse.urlsplit(baseUrl)\n basePath = urlParts[2]\n baseLoc = urlParts[1]\n logging.log(5, 'Parsing %s with bs3' % page['url'])\n linkStrainer = SoupStrainer(['a',\n 'meta',\n 'iframe',\n 'frame'])\n try:\n fulltextLinks = BeautifulSoup(htmlString, smartQuotesTo=None, convertEntities=BeautifulSoup.ALL_ENTITIES, parseOnlyThese=linkStrainer)\n except ValueError as e:\n raise pubGetError('Exception during bs html parse', 'htmlParseException', e.message)\n\n logging.log(5, 'bs parsing finished')\n linkDict = OrderedDict()\n metaDict = OrderedDict()\n iframeDict = OrderedDict()\n frameDict = OrderedDict()\n for l in fulltextLinks:\n logging.log(5, 'got link %s' % l)\n if l.name == 'iframe':\n src = l.get('src')\n if src == None or 'pdf' not in src:\n continue\n id = l.get('id', 'pdfDocument')\n iframeDict[id] = src\n if l.name == 'frame':\n src = l.get('src')\n if src == None or 'pdf' not in src:\n continue\n id = l.get('id', 'pdfDocument')\n frameDict[id] = src\n elif l.name == 'a':\n text = l.getText()\n text = text.encode('utf8')\n url = l.get('href')\n if url == None:\n logging.log(5, 'url is None')\n continue\n try:\n linkLoc = urlparse.urlsplit(url)[1]\n linkPath = urlparse.urlsplit(url)[2]\n except ValueError:\n raise pubGetError('Value error on url split %s' % url, 'urlSplitError', url)\n\n if canBeOffsite == False and linkLoc != '' and linkLoc != baseLoc:\n logging.log(5, 'skipping link %s, is offsite' % url)\n continue\n fullUrl = urlparse.urljoin(baseUrl, url)\n parts = list(urlparse.urlsplit(fullUrl)[:4])\n if parts[0] == 'javascript':\n logging.log(5, 'skipping link %s, is javascript' % url)\n continue\n parts.append('')\n fullUrlNoFrag = urlparse.urlunsplit(parts)\n if anyMatch(landingPage_ignoreUrlREs, fullUrlNoFrag):\n logging.log(5, 'skipping link %s, because of ignore REs' % url)\n continue\n linkDict[fullUrlNoFrag] = text\n logging.log(5, 'Added link %s for text %s' % (repr(fullUrlNoFrag), repr(text)))\n elif l.name == 'meta':\n name = l.get('name')\n if name != None:\n content = l.get('content')\n metaDict[name] = content\n if str(l.get('http-equiv')).lower() == 'refresh':\n content = l.get('content')\n logging.log('found meta refresh tag: %s' % str(content))\n if content != None:\n url = string.split(content, '=', 1)[1]\n url = urlparse.urljoin(baseUrl, url)\n metaDict['refresh'] = url\n\n logging.log(5, 'Meta tags: %s' % metaDict)\n logging.log(5, 'Links: %s' % linkDict)\n logging.log(5, 'iframes: %s' % iframeDict)\n logging.log(5, 'frames: %s' % frameDict)\n\n page['links'] = linkDict\n page['metas'] = metaDict\n page['iframes'] = iframeDict\n page['frames'] = frameDict\n logging.log(5, 'HTML parsing finished')\n return page",
"def page_data():\n return scrape()",
"def get_links(self, soup):\n \"\"\" @param soup: BeautifulSoup object that cointains the targeted links \"\"\"\n \"\"\" @type soup: BeautifulSoup object \"\"\"\n for link in soup.select('a[href^=\"https://\"]'): # All links which have a href element\n href = link.get('href') # The actually href element of the link\n if not any(href.endswith(x) for x in ['.csv', '.xls', '.xlsx']):\n print(\"No excel\")\n continue\n if not href in self.url_queue:\n self.url_queue.append(href) # Add the URL to our queue",
"def soup_scraper(url, database, website, key_word, ):\n try:\n response = requests.get(url)\n print(response, \" \", end='')\n\n except:\n print(\"Skipping. Connnection error\")\n return \"\", database\n\n soup = BeautifulSoup(response.content, 'html.parser')\n contents = soup.find_all(class_='ZINbbc xpd O9g5cc uUPGi')\n for content in contents:\n try:\n title = content.find_all(class_='BNeawe vvjwJb AP7Wnd')[0].get_text()\n description = content.find_all(class_='BNeawe s3v9rd AP7Wnd')[0].get_text()\n try:\n description = description.find_all(class_='BNeawe s3v9rd AP7Wnd')[0]\n except:\n description = description\n link = content.find_all(href=re.compile(\"/url\\?q=\"))[0]\n link_text = re.search(r\"\\/url\\?q=(\\S+)&sa\", link.prettify()).group(1)\n link_base = re.search(r\"(https?:\\/\\/\\S+?)\\/\", link_text).group(1)\n except:\n continue\n\n db = {\n \"website\": website,\n \"keyword\": key_word,\n \"timestamp\": datetime.utcnow(),\n \"position\": 0,\n \"actual_score\": 0,\n \"total_score\": 0,\n \"medium\": 0,\n \"weight\": 0,\n \"title\": title,\n \"description\": description,\n \"base_link\": link_base,\n \"full_link\": link_text\n }\n database.append(db)\n\n nexts = soup.find_all(class_='nBDE1b G5eFlf')\n if len(nexts) == 1:\n next = nexts[0].attrs['href']\n else:\n next = nexts[1].attrs['href']\n\n return next, database",
"def process_link(self, inp):\n url = inp\n try:\n request = urllib2.Request(url)\n request.add_header('User-Agent', self.browsers[randint(0, 28)])\n request.add_header('Accept',\n ('text/html,application/xhtml+xml,'\n 'application/xml;q=0.9,*/*;q=0.8'))\n request.add_header('Accept-Language', 'en-us,en;q=0.5')\n soup = BeautifulSoup(urllib2.urlopen(request).read())\n content_div = soup.findAll(id=\"content\")[0]\n raw_text = clean_html(str(content_div))\n f = open('wiki_text2.txt', 'w')\n f.write(raw_text)\n f.close()\n return self.process_text(raw_text)\n except:\n traceback.print_exc()\n raise \"cant process link :traceback:%s\" % traceback.format_exc()",
"def retrieving_data():\n for x in range(1):\n page_number=random.randint(1,500)\n page_num=str(page_number)\n url = 'http://www.tastespotting.com/browse/'+page_num\n req = http.request('GET', url)\n data = BeautifulSoup(req.data,'html.parser')\n for each_div in data.find_all(\"div\", { \"class\": \"trendspotted-item\"}):\n for each_recipe in each_div.find_all('a', href=True):\n \"\"\"links starting with /clicks are the links of recipe to their original sites, so just retrieve those links\"\"\"\n if each_recipe['href'].startswith('/click'):\n retrieving_data.recipe_link=each_recipe['href'][16:-12]\n for each_img in each_recipe.find_all('img', alt=True):\n retrieving_data.recipe_image=each_img['src']\n for each_caption in each_div.find(\"p\", { \"class\": \"photo_caption\"}):\n retrieving_data.recipe_title=each_caption",
"def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError",
"def scrape(self):\n pass",
"def parse_links(self, links, pause=True, batch_size=50, time_=120):\n data = {}\n id, batch = 1, 1\n taglist = set()\n progbar = SimpleProgressBar(len(links))\n for link in links:\n response = self.__br.open(link)\n content = response.read().decode('utf-8')\n doc = document_fromstring(content)\n titlediv = doc.cssselect('title')\n title = titlediv[0].text_content().replace('Picture on VisualizeUs', '').strip() if titlediv else None\n imgs = doc.cssselect('div.media-content img')\n img = imgs[0].get('src') if imgs else None\n if not img:\n continue\n links = doc.cssselect('div.quote a')\n link = links[0].get('href') if links else None\n tags = []\n for a in doc.cssselect('ul.tags-links li a'):\n tg = a.text_content().strip()\n tags.append(tg)\n taglist.add(tg)\n data[id] = {'title': title, 'image_url': img, 'link': link, 'tags': tags}\n progbar.update(id)\n if pause and batch_size > 0 and batch == batch_size:\n if not time_ is None and time_ > 0:\n progbar.pause(time_)\n batch = 0\n id += 1\n batch += 1\n progbar.finish()\n return data, taglist",
"def google_scraper(key_words, date_upper, date_lower, site, check = None): \n try:\n key_words = [word for word in key_words if '&' not in word]\n search_terms = ' '.join(key_words)\n search_terms = search_terms.replace(' ', '%20')\n\n #month of upper date\n mu = date_upper[:2]\n #day of upper\n du = date_upper[3:5]\n #year of upper\n yu = date_upper[6:]\n \n #month of lower date\n ml = date_lower[:2]\n #day of lower\n dl = date_lower[3:5]\n #year of lower\n yl = date_lower[6:]\n \n url = \"https://news.google.com/search?q=\" + search_terms + \" %20source%3A\" + \\\n site + \"%20before%3A\" + yu + \"-\" + mu + \"-\" + du + \"%20after%3A\" + yl + \\\n \"-\" + ml + \"-\" + dl + \"&hl=en-US&gl=US&ceid=US%3Aen\"\n \n #request url\n page = requests.get(url)\n \n #parse html\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #find article tags\n article_urls = soup.find_all('article')\n\n #retrieve initial urls that link through google news\n article_titles = []\n for article_text in article_urls:\n article_title = article_text.find('a')\n article_titles.append(article_title['href'])\n\n \n #initiate actual urls list\n urls = []\n \n #get actual site urls\n for google_url in article_titles:\n try:\n #get link and combine with google news url\n bad_url = google_url[1:]\n combined_url = 'https://news.google.com' + bad_url\n \n #request site, then get actual url from requests\n actual_page = requests.get(combined_url)\n \n #return actual url\n actual_url = actual_page.url\n \n #add actual url to list\n urls.append(actual_url)\n except:\n continue\n \n #only get urls from site of interest\n if check == None:\n urls = [url for url in urls if site in url]\n else:\n urls = [url for url in urls if check in url]\n \n if len(urls) == 0:\n search_terms = ' '.join(key_words)\n search_terms = search_terms.replace(' ', ' ')\n search_terms = search_terms.replace(' ', '%20OR%20')\n \n url = \"https://news.google.com/search?q=\" + search_terms + \" %20source%3A\" + \\\n site + \"%20before%3A\" + yu + \"-\" + mu + \"-\" + du + \"%20after%3A\" + yl + \\\n \"-\" + ml + \"-\" + dl + \"&hl=en-US&gl=US&ceid=US%3Aen\"\n \n #request url\n page = requests.get(url)\n \n #parse html\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #find article tags\n article_urls = soup.find_all('article')\n\n #retrieve initial urls that link through google news\n article_titles = []\n for article_text in article_urls:\n article_title = article_text.find('a')\n article_titles.append(article_title['href'])\n\n \n #initiate actual urls list\n urls = []\n \n #get actual site urls\n for google_url in article_titles:\n try:\n #get link and combine with google news url\n bad_url = google_url[1:]\n combined_url = 'https://news.google.com' + bad_url\n \n #request site, then get actual url from requests\n actual_page = requests.get(combined_url)\n \n #return actual url\n actual_url = actual_page.url\n \n #add actual url to list\n urls.append(actual_url)\n except:\n continue\n \n #only get urls from site of interest\n if check == None:\n urls = [url for url in urls if site in url]\n else:\n urls = [url for url in urls if check in url]\n \n return(urls)\n \n else:\n return(urls)\n \n except:\n return([])",
"def test_extract_links(crawler, test_description, text, links):\n extracted_links = crawler.extract_links(text)\n assert links == extracted_links, test_description",
"def parse(self, response):\n content_type = self.get_content_type(response.headers)\n\n sitescan = response.meta.get('sitescan')\n\n if 'text/html' not in self.get_content_type(response.headers):\n\n # For linked content, find the urlscan it linked from\n urlscan = model.URLScan.objects.get(\n site_scan=sitescan,\n page_url_hash=sha256(response.meta['referrer']).hexdigest())\n else:\n # Only create urlscans for text/html\n urlscan, us_created = model.URLScan.objects.get_or_create(\n\n site_scan=sitescan,\n page_url_hash=sha256(response.url).hexdigest(),\n defaults={'page_url': response.url,\n 'timestamp': self.get_now_time()})\n\n # Continue crawling\n # Parse stylesheet links, scripts, and hyperlinks\n hxs = HtmlXPathSelector(response)\n\n # Extract other target links\n try:\n css_links = hxs.select('//link/@href').extract()\n except TypeError:\n css_links = []\n\n try:\n js_links = hxs.select('//script/@src').extract()\n except TypeError:\n js_links = []\n\n try:\n hyperlinks = hxs.select('//a/@href').extract()\n except TypeError:\n hyperlinks = []\n\n # Using a set removes duplicate links.\n all_links = set(hyperlinks + js_links + css_links)\n\n # Examine links, yield requests if they are valid\n for url in all_links:\n\n if not url.startswith('http://'):\n # ensure that links are to real sites\n if url.startswith('javascript:'):\n continue\n else:\n url = urljoin(response.url, url)\n\n ua = response.meta['user_agent']\n\n request = Request(url)\n request.headers.setdefault('User-Agent', ua.ua_string)\n request.meta['referrer'] = response.url\n request.meta['sitescan'] = sitescan\n request.meta['user_agent'] = ua\n request.meta['content_type'] = None\n\n yield request\n\n # The response contains a user agent, we should yield an item\n item = MarkupItem()\n item['content_type'] = self.get_content_type(response.headers)\n item['filename'] = os.path.basename(urlparse(response.url).path)\n item['headers'] = unicode(response.headers)\n item['meta'] = response.meta\n item['raw_content'] = response.body\n item['sitescan'] = sitescan\n item['urlscan'] = urlscan\n item['url'] = response.url\n item['user_agent'] = response.meta.get('user_agent')\n item['redirected_from'] = response.meta.get('redirected_from',\n u'')\n yield item",
"def scan(link):\n try:\n r = requests.get(link)\n if r.status_code == 200:\n soup = BeautifulSoup(r.content, \"html.parser\")\n return soup.find_all(\"a\")\n except ConnectionError as e:\n print(\"Connection error occurred while trying to reach the page\")\n print(e)\n return []",
"def maincall(self, usernamelist, output_choice, tag_bool, com_bool):\n \n dict1 = self.userpage_scraper(usernamelist)\n dict2 = self.user_images_url(dict1)\n\n self.crawling_images_url(dict2, output_choice, com_bool, tag_bool)",
"def posts_info(self,soup,Urls_list,Likes,URLS,Date):\n \n while 1:\n time.sleep(0.2)\n post=soup.find_all('div',class_=\"by\") \n for i in post:\n l=i.find('span',id=re.compile(\"like_\"))\n Hr=i.find('a',href=re.compile(\"#footer_action_list\"))\n if Hr==None:\n Hr=i.find('a',href=re.compile(\"/story.php\"))\n \n \n d=i.find('abbr')\n \n if Hr!=None:\n Href=Hr['href']\n Href=Href.replace('https://m.facebook.com','')\n Href=Href.replace('https://mbasic.facebook.com','') \n Urls_list.append(Href)\n if d !=None:\n date=d.get_text()\n Date.append(date)\n else:\n Date.append('None')\n \n if l!=None: \n if l.get_text()!=None:\n likes=l.get_text()\n if likes==\"Like · React\":\n likes='0'\n else:\n likes=likes.replace('· Like · React','') \n likes=likes.replace(\"· Like\",'')\n likes=likes.replace(\"· Love\",'')\n likes=likes.replace(\"· Haha\",'')\n likes=likes.replace(\"· Care\",'')\n likes=likes.replace(\"· Wow\",'')\n likes=likes.replace(\"· Angry\",'')\n Likes.append(likes)\n else:\n Likes.append(\"0\")\n else:\n Likes.append(\"0\")\n \n \n more=self.more_page(soup)\n if more !=None:\n soup=self.get_page(more,session)\n \n else:\n break\n \n Urls_list,URLS=self.clean_url(Urls_list,URLS) \n \n return Urls_list,URLS,Likes,Date",
"def try_url(data, cache = None, base_url = None):\n if data is None:\n return data\n return ScrapeURL(data, cache = cache, base_url = base_url)",
"def search_thru_comments(urls, listOfKWs):\n browser = webdriver.Chrome('/Users/sophie/documents/chromedriverCurrent')\n\n listKWs = []\n for KW in listOfKWs:\n listKWs.append([KW])\n # ex: listKWs=[['poverty'], ['inequality'], ['aids'], ['hiv']]\n # list where list[something]=name of KW. append after that the urls.\n global listKWsDate\n listKWsDate = []\n for KW in listOfKWs:\n listKWsDate.append([KW])\n print(listKWs == listKWsDate)\n\n for link in urls:\n browser.get(link)\n\n source = browser.page_source\n data = bs(source, 'html.parser')\n body = data.find('body')\n script = body.find('script',\n text=lambda t: t.startswith('window._sharedData'))\n #print(script)\n scriptStr = str(script)\n scriptStr.replace(\"'\",\"\")\n #scriptSplit=script.split('shortcode')\n #print(scriptSplit)\n\n #pass to searchForEach which will check the indiv posts for all KWs\n # and will then add them to the appropriate spread sheet\n for KW in listOfKWs:\n searchForEachKW(KW, scriptStr, listKWs, listKWsDate)\n\n #need to change so that calls search for each KW here. so that\n # searching each link for all the hashtags, and then add link to\n # appropriatre kw spreadsheet\n\n return listKWs",
"def get_data(self, link_queue):\n while not link_queue.empty():\n url = link_queue.get(block=False)\n try:\n with urlopen(url) as url_open:\n beauty_url = BeautifulSoup(url_open.read(), 'html.parser')\n res = beauty_url.select('a[href*=\"tvn24\"]:not([href^=\"mailto\"])')\n res = {re.get('href') for re in res if not re.get('href').endswith('/')}\n except: # pylint: disable=bare-except\n pass\n else:\n link_queue.task_done()\n self.res_queue.put(res)",
"def crawl_url_links(input_dict):\n\n extractor_name=input_dict.get('extractor','DefaultExtractor')\n import requests\n label=input_dict['label']\n urls,_, source, source_date=_process_input(input_dict['input'],False)\n\n\n docs=[]\n titles=[]\n for url in urls:\n print(url)\n try:\n r = requests.get(url)\n except ConnectionError:\n continue\n if r.status_code==200:\n html=r.text\n from boilerpipe.extract import Extractor\n extractor = Extractor(extractor=extractor_name, html=html)\n\n titles.append(url)\n text=''\n if label:\n text+='!'+label+'\\t'\n text+=extractor.getText()\n docs.append(text)\n\n\n corpus_date = str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime()))\n documents, labels = _process_adc(docs, False, label, titles)\n features = {\"Source\": source, \"SourceDate\": source_date, \"CorpusCreateDate\": corpus_date,\n \"Labels\": json.dumps([label]) if label else '[]'}\n\n return {\"adc\": DocumentCorpus(documents=documents, features=features)}",
"def get(soup, data, dictionary):\n\n # Steg 1: Ting som alltid er sant:\n dictionary['fb_like'] = None #0\n dictionary['others_share'] = None #0\n dictionary['fb_share'] = len(soup.select(\".share-facebook\"))\n dictionary['googleplus_share'] = len(soup.select(\".share-googleplus\"))\n dictionary['twitter_share'] = len(soup.select(\".share-twitter\"))\n dictionary['email_share'] = len(soup.select(\".share-mail\"))\n\n # tror ikke disse har noen aside...\n dictionary['related_stories_box_les'] = len(soup.select(\"aside.articlewidgets article\"))\n\n # related thematic (found in footer part of page)\n dictionary['related_stories_box_thematic'] = 0\n # grab that footer part with data-relation-limit attr\n related_thematic = soup.find_all(has_data_relation_limit)\n # loop\n for el in related_thematic:\n #check divs\n for div in el.select(\"div\"):\n if has_data_id(div):\n dictionary['related_stories_box_thematic'] +=1\n\n # re related stories is the combined previous two\n dictionary['related_stories'] = dictionary['related_stories_box_les'] + dictionary['related_stories_box_thematic']\n \n\n # antall js dokumenter\n dictionary['js'] = count_js(soup, data, dictionary) # = len(re.findall(\"<iframe src=\", data)) # .js\n # remove javascript.\n [s.decompose() for s in soup.body.article('script')]\n # I believe this is what creates the somewhat awkward line-breaks in the soup\n\n # Find author(s)\n byline = soup.find('div', 'byline')\n authors = []\n try:\n for address, li in izip(byline.find_all('address'), byline.find_all('li', 'icon-email')):\n authorName = address.strong.text #address.find(class_='fn').string.encode('utf-8')\n # NRK is still trying to hide the email address from spammers. #href = li.a['href']\n authorMail = None # 'abandon this? too hard?'#unquote(href[21:-1])[7:] # Antakelsen er at epost vil holde seg til ASCII. \n authorRole = address.span.text #address.find(class_='role').string.strip().encode('utf-8')\n author = [authorName, authorMail, authorRole]\n authors.append(author)\n # and remove author image (so not to count it later..) \n address.figure.decompose()\n except AttributeError:\n # Finner ingen forfatter(e)\n new_logger.warn(\"Ingen forfattere \\\"{0}\\\". Oppgir \\\"<UKJENT>\\\" som forfatter\".format(dictionary['url']))\n #print \n authors.append([None, None, None])\n dictionary['authors'] = authors\n \n # Find published datetime\n try:\n dictionary['published'] = strptime(soup.time['datetime'][0:19], \"%Y-%m-%dT%H:%M:%S\")\n except TypeError:\n new_logger.info(\"finner ikke publiseringsdato\")\n dictionary['published'] = None\n\n new_logger.debug(\"published: %s\", type(dictionary['published']))\n # Find update datetime\n try:\n updated = soup.find('span', 'update-date')\n dictionary['updated'] = datetime.strptime(updated.time['datetime'][0:19], \"%Y-%m-%dT%H:%M:%S\")\n except:\n new_logger.info(\"finner ikke oppdateringsdato\")\n dictionary['updated'] = None\n\n # Find headline\n try:\n dictionary['headline'] = soup.body.article.find('h1').text.strip()\n #dictionary['headline'] = soup.header.find('div', 'articletitle').h1.text # .text gived unicode, .string gives 'bs4.element.NavigableString'\n except AttributeError:\n new_logger.debug( \"NB: bruker doc-title...\" )\n dictionary['headline'] = soup.title.text\n\n # Find fact-boxes :\n # Should be removes from body, but includes in LIX. Right?\n faktabokser = []\n #for boks in soup.find_all(\"section\", class_=\"articlewidget cf facts lp_faktaboks\"):\n for boks in soup.find_all(\"section\", class_=\"facts\"):\n faktaboks_text = boks.text.strip()\n lix = Lix(faktaboks_text)\n faktaboks_analysis = lix.analyzeText(faktaboks_text)\n faktabokser.append({\"text\":faktaboks_text, \"links\":boks.find_all(\"a\"), \"wordcount\":faktaboks_analysis['wordCount']})\n # and remove from soup\n #boks.decompose() #ikke fjern boks'n fra suppa.\n # NB, this also removes pictures if any in the fact-box\n dictionary['factbox'] = faktabokser\n\n new_logger.debug(\"faktabokser: %s\", len(dictionary['factbox']))\n\n # Find full text \n # article MINUS .universes OR is it .lp_related ?\n # remove the related section\n # try:\n # soup.body.article.find('section', 'lp_related').decompose()\n # except:\n # pass\n # # remove div.published (the top-bar)\n # soup.body.article.find('div', 'published').decompose()\n # # remove div.shareurl (the sharebar)\n # soup.body.article.find('div', 'sharing').decompose()\n\n # Find self declared url # get this before decomposing the header this is found in..\n dictionary['url_self_link'] = soup.select(\"time > a\")[0]['href']\n\n # remove header with sharing links and date\n soup.select(\".bulletin-header\")[0].decompose()\n # store body text\n dictionary['body'] = soup.body.article.text.strip() \n # .stripped_strings option?\n # soup.get_text(\"|\", strip=True) perhaps?\n\n # Find char count, line count, word count and Lix\n lix = Lix(dictionary['body']) \n\n analyse = lix.analyzeText(dictionary['body'])\n try:\n dictionary['char_count'] = len(dictionary['body'])\n dictionary['word_count'] = analyse['wordCount']\n dictionary['line_count'] = analyse['sentenceCount']\n dictionary['lesbahet'] = analyse['lixScore']\n except TypeError:\n new_logger.error(\"Kunne ikke kjøre lix\", dictionary['body']) \n dictionary['line_count'] = None\n dictionary['word_count'] = None\n dictionary['char_count'] = None\n dictionary['lesbahet'] = -1.0\n\n # look through the last part of the body text to find news bureau\n # add more in settings.py\n dictionary['news_bureau'] = matches_pattern(dictionary['body'].strip()[-200:], syndicators)\n\n\n # Find language. Defaults can be tampered with in settings.py\n (language, certainty) = langid.classify(soup.body.article.text)\n new_logger.debug( \"(language, certainty) (%s, %s)\" % (language, certainty))\n language_code = uncertain_language_string\n if (certainty > language_identification_threshold):\n language_code = language\n\n dictionary['language'] = language_code\n\n\n get_video(soup.body.article, data, dictionary)\n\n # flash (untested)\n dictionary['flash_file'] = get_flash(soup.body.article, data, dictionary)\n\n # Tell opp iframe. \n dictionary['iframe'] = count_iframes(soup, data, dictionary)\n \n # Tell opp css (karakterer)\n dictionary['css'] = count_css(soup, data, dictionary)\n\n\n # Finnes det en form for kommentarer her? I de nyere NRK sidene er det tydeligvis kun det på Ytring.\n # Men vi søker generelt nå, og håper på det beste. I verste fall vil et interessant krasj fortelle meg at dette ikke er tilfellet. –Haakon\n dictionary['comment_fields'] = 0\n dictionary['comment_number'] = 0\n if len(re.findall('<div id=\"disqus_thread\"', data)) != 0:\n dictionary['comment_fields'] = 1\n dictionary['comment_number'] = None # -9999#num_comments(dictionary)\n \n # tar seg av lenker i siden\n count_links(soup, data, dictionary)\n\n # antall bilder.\n # Beautiful Soup teller feil her og. Noe er galt.\n # Regex matching gir riktig resultat så vi får gå for det.\n #result = soup.article.find_all('figure', 'image')\n #print len(result)\n #new_logger.debug( \"antall bilder: %s\", len(re.findall(\"<img src=\\\"http:\", data)) )\n \n dictionary['images'] = count_images(soup.body.article, data, dictionary)\n \n # bildesamlinger\n dictionary['image_collection'] = len(soup.select(\".slideshow\")) # er dette nok?\n # Som diskutert med Eirik, dette henter ut bildetekstene og deler dem med pipe symboler.\n\n imgtagger = re.findall(u\"<img src=\\\"http.*\\n.*\", str(soup.body.article) )\n bildetekst = \"\"\n for imgtag in imgtagger:\n funn = re.findall(\"alt=\\\".*\\\"\", imgtag)\n if len(funn) > 0:\n bildetekst += ((funn[0])[5:-1] + \" | \")\n bildetekst = bildetekst[:-3] # Fjerner siste pipen\n dictionary['image_captions'] = bildetekst\n\n\n\n dictionary['map'] = count_map(soup.body.article, data, dictionary)\n dictionary['poll'] = None # -9999\n dictionary['game'] = None # -9999\n \n dictionary['interactive_elements'] = count_interactive( \\\n dictionary['comment_fields'] , dictionary['image_collection'] , \\\n dictionary['video_files'] , dictionary['video_files_nrk'] , \\\n dictionary['fb_like'] , dictionary['fb_share'] , \\\n dictionary['googleplus_share'] , dictionary['twitter_share'] , \\\n dictionary['others_share'] , dictionary['email_share'] , \\\n dictionary['map'] , dictionary['poll'] , dictionary['game'])\n \n\n\n\n\n return dictionary",
"def test_scrape(self):\n self.assertEqual(self.scraped.title, 'Heading!')\n self.assertEqual(self.scraped.link_text, 'Go to Google')\n self.assertEqual(self.scraped.link_url, 'http://Google.com')"
]
| [
"0.6941169",
"0.6730641",
"0.630104",
"0.6288677",
"0.6256914",
"0.5943134",
"0.5876478",
"0.5758648",
"0.5690269",
"0.56877136",
"0.5608039",
"0.5546213",
"0.5546081",
"0.5538152",
"0.55267596",
"0.5491039",
"0.5466902",
"0.54129934",
"0.5401599",
"0.5398654",
"0.5341974",
"0.53302765",
"0.53180265",
"0.5305892",
"0.5291174",
"0.52877516",
"0.5231577",
"0.5227738",
"0.52199405",
"0.5212211"
]
| 0.7491283 | 0 |
initializes an empty graph (no nodes, no edges) and an empty list that stores graph copies | def __init__(self):
self._graph = DirectedGraph()
self._graph_copies = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.graph = {}\n self.edges = 0\n self.vertices = 0",
"def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])",
"def init_vertices(self):\n self.vertices = []\n for key in self.graph:\n self.vertices.append(self.Vertex(key, self.graph[key]))",
"def __init__(self, nodes=None, edges=None):\n self._nodes = []\n self.nodes = nodes\n self._edges = []\n self.edges = edges\n self._create_connections()\n self._sorted_nodes = None\n self._node_wip = []",
"def __init__(self):\n self.graph = LinkedList()",
"def __init__(self):\n self.graph = None",
"def fresh_copy(self):\n return OrderedGraph()",
"def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()",
"def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}",
"def empty_instance():\n from weighted_graph import Graph\n return Graph()",
"def __init__(self, edges=None, nodes=None, graph=None):\n self.edges = edges\n self.nodes = nodes\n self.graph = graph",
"def __init__(self, graph):\n\n self.V = set() # all vertices\n # optimize: use arrays and map idx <-> names, should we get much larger graphs\n # TODO pre-compute/cache all-pairs shortest paths? [dist for src == dest > 0]\n # self.A = defaultdict(dict) # shortest-paths, lookup A[from][to] => dist\n self.G = defaultdict(dict) # the graph, lookup G[from][to] to get direct distance (check for existence!)\n # makes cases 1-5 quick & easy, but memory hog for much larger graphs\n for edge in graph:\n src, dest, cost = edge\n self.V.add(src)\n self.V.add(dest)\n self.G[src][dest] = cost",
"def build_empty_graph(input_dim, output_dim, num_intermediate):\n from .models import DAG\n num_emit, num_rec = num_intermediate + input_dim, num_intermediate + output_dim\n activations = torch.zeros(num_rec, dtype=torch.long)\n connections = torch.zeros(num_rec, num_emit, dtype=torch.long)\n\n return DAG(input_dim, output_dim, num_intermediate, connections, activations, check_valid=True)",
"def __init__(self):\n self._adjacency_list = {\n\n }",
"def fresh_copy(self):\n return OrderedMultiGraph()",
"def __init__(self, graph_dict: Dict[Node, List[Node]] = None) -> None:\n if graph_dict is None:\n graph_dict = {}\n self.__graph_dict = graph_dict",
"def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()",
"def _construct_graph(self):\n raise NotImplementedError",
"def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")",
"def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph",
"def fresh_copy(self):\n return OrderedMultiDiGraph()",
"def initMyGraph(ctor):\n\tg = ctor(5)\n\tg.addEdge(0,1)\n\tg.addEdge(1,0)\n\tg.addEdge(1,1)\n\tg.addEdge(1,2)\n\tg.addEdge(4,0)\n\tg.addEdge(4,2)\n\treturn g",
"def copy_graph(g):\n return copy.deepcopy(g)",
"def construct_null_graph(num_nodes):\n # return the graph represented using dictionary format\n return dict({node: dict({}) for node in range(num_nodes)})",
"def fresh_copy(self):\n return OrderedDiGraph()",
"def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()",
"def __init__(self):\n self._list: List[Edge] = list()",
"def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}",
"def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}",
"def initialize_graph(self, V, edge_list):\n # ---------- INSERT CODE BELOW ----------\n for _ in range(V):\n self.add_vertex()\n \n for node in edge_list:\n self.add_edge(node[0],node[1],node[2])\n\n # ---------- INSERT CODE ABOVE ----------"
]
| [
"0.6994657",
"0.6869622",
"0.67684174",
"0.67584795",
"0.6714823",
"0.6585916",
"0.6517928",
"0.6511417",
"0.6493447",
"0.64735955",
"0.647329",
"0.64572185",
"0.64479786",
"0.6444233",
"0.6434317",
"0.64009154",
"0.6390514",
"0.6379037",
"0.63668334",
"0.6359676",
"0.6354472",
"0.63381773",
"0.63316303",
"0.6326621",
"0.6288571",
"0.62839365",
"0.6281663",
"0.6277278",
"0.6277278",
"0.62540036"
]
| 0.75274307 | 0 |
checks if there is an edge between two vertices read from keyboard. | def check_edges(self):
start = int(input('Enter start vertex: '))
end = int(input('Enter end vertex: '))
if self._graph.is_edge_between(start, end):
print('There is an edge from ' + str(start) + ' to ' + str(end))
else:
print('There is NO edge from ' + str(start) + ' to ' + str(end)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]",
"def isEdge(self, x, y):\n if y in self.parseX() or x in self.parseX():\n return y in self.dictOut[x]\n else :\n print(\"verteces not found\")",
"def isEdge(self,x,y):\r\n return self.matr[x][y]",
"def isEdge(self,x,y):\n\t\treturn y in self._dict[x]",
"def IsEdge(self, p_int, p_int_1):\n ...",
"def isEdge(self,x,y):\n\t\treturn self._matr[x][y]",
"def isEdge(self, x, y):\n return y in self._dictOut[x]",
"def is_edge(self, v, w):\n return self.op_norm(v[0], w[0]) == (v[1] + w[1]) and (self.variant.is_bipartite() or v != w)",
"def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False",
"def containsEdge(self, v1, v2):\n for e in self.edges:\n if (e.pvt, e.nvt) in [(v1, v2), (v2, v1)]:\n return True\n return False",
"def is_boundary_edge(a, b, bdy_edges):\n for edge in bdy_edges:\n a0, b0 = edge\n if a == a0 and b == b0:\n return True\n return False",
"def is_edge(graph, u, v):\n return graph.matrix[u][v]",
"def has_edge(self, v1, v2):\n\n return v1 in self.get_reachables(v2[0], v2[1])",
"def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges",
"def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True",
"def isAdjacent(self, vertex1, vertex2):\n return vertex2 in self.adjList[vertex1]",
"def is_adjacent(self, startVertex: np.int_ , endVertex: np.int_):\n return self.__mat[startVertex][endVertex]>0",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True",
"def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True",
"def containsEdge(self, e):\n return any(e.nvt in [self.vertices[i-2], self.vertices[i]] and self.vertices[i-1] == e.pvt for i in range(len(self.vertices)))",
"def edge_direction(a, b):\n if a[0] == b[0]:\n return -1, 1\n elif a[0] == b[1]:\n return -1, -1\n elif a[1] == b[0]:\n return 1, 1\n elif a[1] == b[1]:\n return 1, -1\n else:\n constants.log.debug('\\n'.join([\n 'edges not connected!',\n 'vertex path %s',\n 'entity path: %s',\n 'entity[a]: %s,',\n 'entity[b]: %s']),\n vertex_path,\n entity_path,\n entities[ea].points,\n entities[eb].points)\n\n return None, None",
"def is_adjacent(v1, v2):\n return (v2 in _board_graph[v1])",
"def does_edge_exist(self, src_key, dest_key):\n return self.vertices[src_key].does_it_point_to(self.vertices[dest_key])",
"def comp_edge(_P, P): # Used in scan_P_().\n _x0 = _P['x0']\n _xn = _x0 + _P['L']\n x0 = P['x0']\n xn = x0 + P['L']\n\n if _xn < xn: # End-point relative position.\n return True, x0 < _xn # Overlap.\n else:\n return False, _x0 < xn",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.bottom >= screen_rect.bottom or self.rect.top <= -1:\n\t\t\treturn True",
"def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1",
"def test_case22(self):\n \n result = self.graph1.isEdge(\"supervisor1\",\"student1\")\n\n self.assertTrue(result)",
"def is_vert(e) :\n f = e[0][0]\n for t in e :\n if f != t[0] :\n return False\n return True"
]
| [
"0.7021373",
"0.70151204",
"0.6955464",
"0.6910868",
"0.68814826",
"0.683401",
"0.66876227",
"0.6523862",
"0.6507085",
"0.6488775",
"0.6424605",
"0.6365208",
"0.63593936",
"0.63342386",
"0.6291343",
"0.6290668",
"0.6263541",
"0.62546223",
"0.62185633",
"0.6196922",
"0.61563927",
"0.61371475",
"0.6133721",
"0.6110442",
"0.60967016",
"0.6089659",
"0.60703576",
"0.6058734",
"0.60370994",
"0.6012384"
]
| 0.71416295 | 0 |
prints the degree of a vertex read from keyboard | def print_degree(self):
vertex = int(input('enter vertex: '))
in_degree = self._graph.get_in_degree(vertex)
out_degree = self._graph.get_out_degree(vertex)
print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))
print('The out degree of ' + str(vertex) + ' is ' + str(out_degree)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()",
"def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()",
"def return_outdeg(self, vertex: np.int_):\n return len(self.__adj[vertex])",
"def degree(adj_mat, vertex):\n return np.sum(adj_mat[vertex][:])",
"def key(self, vertex: \"Vertex\") -> \"Vertex\":\n p = self.precision\n return round(vertex[0], p), round(vertex[1], p), round(vertex[2], p)",
"def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if(self.__directed):\r\n degrees = {}\r\n l = list(self.__graph_dict.values())\r\n flatter = []\r\n for x in l:\r\n for y in x:\r\n flatter.append(y)\r\n\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n if(k in flatter):\r\n degrees[k] += flatter.count(k)\r\n return degrees\r\n\r\n else:\r\n degrees = {}\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n return degrees",
"def degree_v(self):\n return self._degree_v",
"def return_indeg(self, vertex: np.int_):\n return sum(map(lambda x: x>0,self.__mat[:,vertex]))",
"def degree_node(g, node):\n return len(g[node])",
"def out_degrees_tf_vertex(self, vertex: int) -> tf.Tensor:\n return tf.gather(self.out_degrees_tf, [vertex])",
"def vert_degree(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = 0\n\t\tfor x in prob:\n\t\t\tfor vertex in x:\n\t\t\t\tif element == vertex:\n\t\t\t\t\tvertex_map[element] += 1\n\treturn vertex_map",
"def degree(self,v,outgoing=True):\n adj = self._outgoing if outgoing else self._incoming\n return len(adj[v])",
"def degree(self):\n return self.graph.degree()",
"def vertex_multidegree(breakpoint_graph, vertex):\n return len(list(breakpoint_graph.get_edges_by_vertex(vertex)))",
"def degree(self):\n return self._deg",
"def get_degree(self, vertex):\r\n if not self.is_vertex_in_graph(vertex):\r\n raise GraphException(f\"The vertex {vertex} does not exist in the graph.\")\r\n return len(self.__neighbours[vertex])",
"def parse_vertex(lines):\n print \" * Parsing vertex\"\n return _parse_vn(lines, \"v %.6f %.6f %.6f\")",
"def degree(self):\n return self._degree",
"def degree(self):\n return self._degree",
"def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')",
"def parse_vertex(text):\r\n\r\n v = 0\r\n t = 0\r\n n = 0\r\n\r\n chunks = text.split(\"/\")\r\n\r\n v = int(chunks[0])\r\n if len(chunks) > 1:\r\n if chunks[1]:\r\n t = int(chunks[1])\r\n if len(chunks) > 2:\r\n if chunks[2]:\r\n n = int(chunks[2])\r\n\r\n return { 'v':v, 't':t, 'n':n }",
"def get_degrees(msg):\n #read input from positioner\n qpt.flushInput()\n pos_string = b''\n comp = b''\n if(msg == 'default'): \n msg = b'\\x02\\x31\\x00\\x00\\x00\\x00\\x00\\x31\\x03'\n while(len(pos_string) < 7):\n qpt.write(msg)\n pos_string = qpt.readline()\n if(pos_string[0] != 0x06):\n pos_string = b'\\x00'\n #should make it re-read string, will go back to start of while loop\n #convert the hex value to degrees for horizontal position\n if(pos_string[2] == 0x1B and pos_string[4] == 0x1B):\n #2 1b for x\n hor_deg = (((int(pos_string[5]-128))*256) + (int(pos_string[3])-128))/10\n if(pos_string[6] == 0x1B and pos_string[8] == 0x1B):\n #2 for y\n ver_deg = ((int(pos_string[9]-128)*256) + (int(pos_string[7]-128)))/10\n elif(pos_string[7] == 0x1B):\n #1 for y\n ver_deg = ((int(pos_string[8]-128)*256) + (int(pos_string[6])))/10\n elif(pos_string[6] == 0x1B):\n #1 for y, different location\n ver_deg = ((int(pos_string[8])*256) + (int(pos_string[6]-128)))/10\n else:\n #none for y\n ver_deg = ((int(pos_string[7])*256) + (int(pos_string[6])))/10\n #make correction for negative value\n if(ver_deg > 360):\n #y negative, do backwards\n ver_deg = (-1)*(65535 - ver_deg*10)/10\n \n elif(pos_string[3] == 0x1B):\n #1 for x\n hor_deg = (((int(pos_string[4])-128)*256) + (int(pos_string[2])))/10\n if(pos_string[5] == 0x1B and pos_string[7] == 0x1B):\n #2 for y\n ver_deg = ((int(pos_string[8]-128)*256) + (int(pos_string[6]-128)))/10\n elif(pos_string[6] == 0x1B):\n #1 for y\n ver_deg = ((int(pos_string[7]-128)*256) + (int(pos_string[5])))/10\n elif(pos_string[5] == 0x1B):\n #1 for y, different location\n ver_deg = ((int(pos_string[7])*256) + (int(pos_string[6]-128)))/10\n else:\n #none for y\n ver_deg = ((int(pos_string[6])*256) + (int(pos_string[5])))/10\n #make correction for negative value\n if(ver_deg > 360):\n ver_deg = (-1)*(65535 - ver_deg*10)/10\n \n elif(pos_string[2] == 0x1B):\n #1b in first location\n hor_deg = (((int(pos_string[4]))*256) + (int(pos_string[3]-128)))/10\n if(pos_string[5] == 0x1B and pos_string[7] == 0x1B):\n #2 for y\n ver_deg = ((int(pos_string[8]-128)*256) + (int(pos_string[6]-128)))/10\n elif(pos_string[6] == 0x1B):\n #1 for y\n ver_deg = ((int(pos_string[7]-128)*256) + (int(pos_string[5])))/10\n elif(pos_string[5] == 0x1B):\n #1 for y in different location\n ver_deg = ((int(pos_string[7])*256) + (int(pos_string[6]-128)))/10\n \n else:\n #none for y\n ver_deg = ((int(pos_string[6])*256) + (int(pos_string[5])))/10\n #make correction for negative value\n if(ver_deg > 360):\n #y negative, do backwards\n ver_deg = (-1)*(65535 - ver_deg*10)/10\n \n else:\n #none for x\n hor_deg = ((int(pos_string[3])*256) + (int(pos_string[2])))/10\n if(pos_string[4] == 0x1B and pos_string[6] == 0x1B):\n #2 for y\n ver_deg = ((int(pos_string[7]-128)*256) + (int(pos_string[5]-128)))/10\n elif(pos_string[5] == 0x1B):\n #1 for y\n ver_deg = ((int(pos_string[6]-128)*256) + (int(pos_string[4])))/10\n elif(pos_string[4] == 0x1B):\n #1 for y, different location\n ver_deg = ((int(pos_string[6])*256) + (int(pos_string[5]-128)))/10\n else:\n #none for y\n ver_deg = ((int(pos_string[5])*256) + (int(pos_string[4])))/10\n #make correction for negative value\n if(ver_deg > 360):\n #y negative, do backwards\n ver_deg = (-1)*(65535 - ver_deg*10)/10\n\n if(hor_deg > 360):\n #rewrite for negative x\n hor_deg = (-1)*(65535 - hor_deg*10)/10\n\n print('At: ', hor_deg, ver_deg)\n print(pos_string)\n print(pos_string[0],pos_string[1],pos_string[2],pos_string[3],pos_string[4],)\n print(' ')\n return hor_deg, ver_deg",
"def get_vertices(self):\n return str(self.vert_dict.keys())",
"def get_vertex(self):\n V = circumcenter(self.Cents)\n return V",
"def readchar(self) -> int:",
"def get_key_plain():\n if len(sys.argv) != 2:\n exit(\"Usage: python vigenere.py k\")\n\n # get plaintext\n user_input = input(\"plaintext: \")\n \n return sys.argv[1], user_input",
"def order_v(self):\n return self._degree_v + 1",
"def get_vertex_keys(self):\n return self.vertList.keys()",
"def degree_symbol():\n return u'\\N{DEGREE SIGN}'",
"def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()"
]
| [
"0.6310401",
"0.60159636",
"0.5975416",
"0.5782732",
"0.56887203",
"0.56442046",
"0.56138074",
"0.54638255",
"0.54545337",
"0.5335347",
"0.53333074",
"0.5323652",
"0.5312062",
"0.5273959",
"0.52303344",
"0.5211556",
"0.51971334",
"0.51810956",
"0.51810956",
"0.5150495",
"0.5129437",
"0.5109472",
"0.5097528",
"0.5067669",
"0.49982116",
"0.49970883",
"0.49535754",
"0.4948855",
"0.49297938",
"0.49231404"
]
| 0.7967106 | 0 |
iterates all outbound edges of vertex read from keyboard. prints them and their cost | def iterate_outbound_edges(self):
vertex = int(input('enter vertex: '))
try:
vertices = self._graph.get_outbound_edges(vertex)
except ValueError as ve:
print(ve)
return
print('Outbound edges from ' + str(vertex) + ':')
for v in vertices:
cost = self._graph.get_cost(vertex, v)
print('Edge from ' + str(vertex) + ' to ' + str(v) + ' with cost ' + str(cost)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iterate_inbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_inbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Inbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(v, vertex)\n print('Edge from ' + str(v) + ' to ' + str(vertex) + ' with cost ' + str(cost))",
"def print_cost(self):\n start = int(input('start vertex: '))\n end = int(input('end vertex: '))\n cost = self._graph.get_cost(start, end)\n print('the cost of edge is ' + str(cost))",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))",
"def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()",
"def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)",
"def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])",
"def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')",
"def print_output(edges):\n for edge in edges:\n print(\"{} {} {}\".format(edge[0], edge[1], int(edge[2])))",
"def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))",
"def writeEDGE(self):\n\t\tpass",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def printPath(edgesTo,v):\r\n path = str()\r\n while v is not None:\r\n print(v) \r\n path += str(v) + ' -> ' \r\n v = edgesTo[v]\r\n print(path)",
"def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))",
"def print_degree(self):\n vertex = int(input('enter vertex: '))\n in_degree = self._graph.get_in_degree(vertex)\n out_degree = self._graph.get_out_degree(vertex)\n print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))\n print('The out degree of ' + str(vertex) + ' is ' + str(out_degree))",
"def read_input(E):\n # ---------- INSERT CODE BELOW ----------\n edge_list = []\n\n for _ in range(E):\n src, dst, cost = input('').rstrip('\\r\\n').split()\n edge_list.append((int(src),int(dst),int(cost)))\n \n return edge_list\n # ---------- INSERT CODE ABOVE ----------",
"def main():\n e = Edge(12, 34, 5.67)\n print(e)",
"def getedge(self):\n cmd=\"getEdge(\"+self.board+\",\"+self.inpedge+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n #print 'edge= ',output\n self.edge=output[0]\n self.inputedge.setEntry(self.edge)\n if self.board != '0': \n self.delay=output[1] \n self.inputdelay.setEntry(self.delay)",
"def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"",
"def draw_edges(img, data_vertex, data_edges):\r\n i = 0\r\n for v1, v2, v3 in data_edges: # get the numbers of string\r\n # # v1, v2, v3 = v1 - 1, v2 - 1, v3 - 1 # change the numbering\r\n # print(v1,v2,v3)\r\n img = draw_line(img, data_vertex, v1, v2)\r\n img = draw_line(img, data_vertex, v1, v3)\r\n img = draw_line(img, data_vertex, v2, v3)\r\n i += 1\r\n # print(i)\r\n return img",
"def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")",
"def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.capacity[(u, v)]})\"\r\n return \", \".join(map(edgeRepresentation, self.residualNeighbors(u)))",
"def __repr__(self):\n s = [\"{} vertices, {} edges\\n\".format(self._V, self._E)]\n for v in range(self._V):\n s.append(\"%d : \" % (v))\n for w in self._adj[v]:\n s.append(\"%d \" % (w))\n s.append(\"\\n\")\n\n return \"\".join(s)",
"def boldlyGo(self, edges):\n\t\t\n\t\t# gets list of edges\n\t\t# runs through and calculates straighline lengths for all of them\n\t\t\n\t\t# chooses the one with the least cost - probably just straightline distance\n\t\t\t#in the future, we could run Astar on all of them and choose the one with best path\n\t\t\t# or have a history which picks the biggest one eventually\n\t\t# sends that as a goal to astar, lets robot move there and report it is done the move",
"def pretty_print_equation(self):\n\n for n in self.nodes:\n # Get a list of tuples, first is the v\n parents = self.adj_inv[n]\n if len(parents) == 0:\n if self.binary:\n right_side = '{0,1}'\n else:\n right_side = 'N(0, 1)'\n else:\n right_side = ' + '.join(['{:.3f}*x_{}'.format(self.weights[i, n], i)\n for i in parents])\n \n right_side.replace('+ -', '-')\n print('x_{} = {}'.format(n, right_side))",
"def draw_edges(self):\n pass",
"def showFlow(self):\r\n def edgesFromVertex(u):\r\n \"\"\"\r\n Represents the flow across the given vertex.\r\n \"\"\"\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))\r\n\r\n def adjacencyLists():\r\n \"\"\"\r\n Represents the flow across all relevant vertices.\r\n \"\"\"\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)\r\n\r\n return print(\"\\n\".join(adjacencyLists()))",
"def print_out_unexplained(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['unexplained_flow']\n print(\"({} {}) unexplained flow={}, edgeId={}\".format(s, t, w,\n arc))",
"def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)",
"def print_model_graph(self, name=None, agent=([], [], [])):\n dot = pygraphviz.AGraph(directed=\"True\")\n for outp in list(self.outputs.keys()):\n dot.add_node(outp, pos=(outp[1:] + \",10\"), color=\"red\", label=outp + \", \" + str(self.outputs[outp].taking.size) + \"-\" + self.outputs[outp].taking.type)\n for inp in list(self.inputs.keys()):\n dot.add_node(inp, pos=(inp[1:] + \",0\"), color=\"blue\", label=inp + \", \" + str(self.inputs[inp].producing.size) + \"-\" + self.inputs[inp].producing.type)\n for comp in list(self.networks.keys()):\n dot.add_node(comp, label=comp + \"-\" + str(type(self.networks[comp].descriptor).__name__)[:-14] + \":\" + str(self.networks[comp].taking.size) + \"-\" + str(self.networks[comp].producing.size))\n\n for c in self.connections:\n con = self.connections[c]\n if self.conn_in_agent(con, agent[0]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"blue\")\n elif self.conn_in_agent(con, agent[1]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"red\")\n elif self.conn_in_agent(con, agent[2]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"green\")\n else:\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"black\")\n dot.layout('dot')\n if not name:\n name = str(hash(self))\n dot.draw(name + '.pdf')",
"def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )"
]
| [
"0.7460391",
"0.6843248",
"0.63860714",
"0.63567245",
"0.62640804",
"0.62558603",
"0.6059996",
"0.60002565",
"0.589454",
"0.5844208",
"0.5819766",
"0.5682499",
"0.5679052",
"0.56354827",
"0.5605318",
"0.5578951",
"0.5576057",
"0.554845",
"0.55293566",
"0.55283886",
"0.5504756",
"0.5460331",
"0.54447377",
"0.5422286",
"0.54063255",
"0.5398567",
"0.53953373",
"0.5384925",
"0.53849065",
"0.5359387"
]
| 0.79708344 | 0 |
iterates all inbound edges of vertex read from keyboard, prints them and their cost. | def iterate_inbound_edges(self):
vertex = int(input('enter vertex: '))
try:
vertices = self._graph.get_inbound_edges(vertex)
except ValueError as ve:
print(ve)
return
print('Inbound edges from ' + str(vertex) + ':')
for v in vertices:
cost = self._graph.get_cost(v, vertex)
print('Edge from ' + str(v) + ' to ' + str(vertex) + ' with cost ' + str(cost)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iterate_outbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_outbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Outbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(vertex, v)\n print('Edge from ' + str(vertex) + ' to ' + str(v) + ' with cost ' + str(cost))",
"def print_cost(self):\n start = int(input('start vertex: '))\n end = int(input('end vertex: '))\n cost = self._graph.get_cost(start, end)\n print('the cost of edge is ' + str(cost))",
"def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()",
"def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')",
"def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))",
"def print_degree(self):\n vertex = int(input('enter vertex: '))\n in_degree = self._graph.get_in_degree(vertex)\n out_degree = self._graph.get_out_degree(vertex)\n print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))\n print('The out degree of ' + str(vertex) + ' is ' + str(out_degree))",
"def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)",
"def read_input(E):\n # ---------- INSERT CODE BELOW ----------\n edge_list = []\n\n for _ in range(E):\n src, dst, cost = input('').rstrip('\\r\\n').split()\n edge_list.append((int(src),int(dst),int(cost)))\n \n return edge_list\n # ---------- INSERT CODE ABOVE ----------",
"def showFlow(self):\r\n def edgesFromVertex(u):\r\n \"\"\"\r\n Represents the flow across the given vertex.\r\n \"\"\"\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))\r\n\r\n def adjacencyLists():\r\n \"\"\"\r\n Represents the flow across all relevant vertices.\r\n \"\"\"\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)\r\n\r\n return print(\"\\n\".join(adjacencyLists()))",
"def print_output(edges):\n for edge in edges:\n print(\"{} {} {}\".format(edge[0], edge[1], int(edge[2])))",
"def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"",
"def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))",
"def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)",
"def process_input(input_path):\n\n # Parse lines from input file into list\n with open(input_path, 'r') as input_file:\n lines = input_file.readlines()\n\n # Declare component lists and helper variables\n vertex_map = {} # Mapping of named vertices to indices, handles duplicate connections\n idx = 0\n edges = [] # List of (src, dst) tuples\n weights = [] # Weight of each edge\n\n for line in lines:\n # Parse each line of csv or text file\n if input_path.endswith('.csv'):\n parts = line.split(',')\n else:\n parts = line.split()\n\n # Add source vertex to list of vertices\n src = parts[0]\n if src not in vertex_map:\n vertex_map[src] = idx\n idx += 1\n\n # Add destination vertex to list of vertices\n dst = parts[1]\n if dst not in vertex_map:\n vertex_map[dst] = idx\n idx += 1\n\n # Add integer representation of edges to list of connections\n edges.append((vertex_map[src], vertex_map[dst]))\n weights.append(parts[2])\n\n # Get definite list of vertices\n vertices = vertex_map.keys()\n\n # Print graph information\n vprint(str(len(vertices)) + ' vertices')\n vprint(str(len(edges)) + ' edges')\n\n # Build IGraph representation of network\n graph = ig.Graph(edges, directed=False)\n graph.es['weight'] = [weights[e] for e in range(len(graph.es))]\n\n return graph, vertices",
"def pretty_print_equation(self):\n\n for n in self.nodes:\n # Get a list of tuples, first is the v\n parents = self.adj_inv[n]\n if len(parents) == 0:\n if self.binary:\n right_side = '{0,1}'\n else:\n right_side = 'N(0, 1)'\n else:\n right_side = ' + '.join(['{:.3f}*x_{}'.format(self.weights[i, n], i)\n for i in parents])\n \n right_side.replace('+ -', '-')\n print('x_{} = {}'.format(n, right_side))",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def draw_edges(img, data_vertex, data_edges):\r\n i = 0\r\n for v1, v2, v3 in data_edges: # get the numbers of string\r\n # # v1, v2, v3 = v1 - 1, v2 - 1, v3 - 1 # change the numbering\r\n # print(v1,v2,v3)\r\n img = draw_line(img, data_vertex, v1, v2)\r\n img = draw_line(img, data_vertex, v1, v3)\r\n img = draw_line(img, data_vertex, v2, v3)\r\n i += 1\r\n # print(i)\r\n return img",
"def PrintGraph(self):\n # print(\"Graph has {} nodes and {} edges.\".format(Node.count, Edge.count))\n # print(\"Unique connected nodes:\")\n # for (a, b) in self.connections:\n # print(\"{},{}\".format(a.index, b.index))\n\n # print(f\"\\nAll edges : {[e.index for e in self.edges]}\")\n\n # print(\"\\nDegree of nodes\")\n\n # for node in self.nodes:\n # print(f\"D of {node.index} = {len(node.neighbours)}\")\n\n for node in self.nodes:\n print(\"{}. ({}, {})\".format(node.index, node.x, node.y))",
"def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))",
"def main():\n e = Edge(12, 34, 5.67)\n print(e)",
"def read_input():\n input()\n size = int(input().split()[-1])\n nb_edges = int(input().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = input()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = input()\n while 'Terminals' not in line:\n line = input()\n if 'SECTION' in line:\n line = input()\n while 'Terminals' not in line:\n line = input()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = input()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)",
"def __repr__(self):\n s = [\"{} vertices, {} edges\\n\".format(self._V, self._E)]\n for v in range(self._V):\n s.append(\"%d : \" % (v))\n for w in self._adj[v]:\n s.append(\"%d \" % (w))\n s.append(\"\\n\")\n\n return \"\".join(s)",
"def analyze_edges_and_weight(list_of_nodes):\n edges_info = []\n for node in list_of_nodes:\n n_edge_of_node = len(node.neighbors) # Counts the kys in the dictionary 'Node.neighbors'\n total_weight_of_node = sum(list(map(lambda x: node.neighbors[x], node.neighbors))) # Sums values of the dict\n node_info = (node.name, n_edge_of_node, total_weight_of_node)\n edges_info.append(node_info)\n total_n_edges = sum([tup[1] for tup in edges_info]) # Sum total number of edges\n total_weight_of_graph = sum([tup[2] for tup in edges_info]) # Sum total weight of edges\n sorted_info = sorted(edges_info, key=lambda tup: tup[1], reverse=True)\n return \"Total number of edges is {},\\nTotal weight of the graph is {}:\\nNodes sorted by no. of edges: {}.\".format(total_n_edges, total_weight_of_graph, sorted_info)",
"def printPath(edgesTo,v):\r\n path = str()\r\n while v is not None:\r\n print(v) \r\n path += str(v) + ' -> ' \r\n v = edgesTo[v]\r\n print(path)",
"def run(version=1):\n\n # scan header to define our graph parameters\n try:\n header = input(\"Enter graph header:\")\n edges_count, start_edge, finish_edge = header.split(\" \")\n edges_count = int(edges_count)\n logger.debug(\"Scanned edges count: {}; Start:{}, End:{}\".format(\n edges_count, start_edge, finish_edge))\n except ValueError:\n raise ValueError(\"Input data parsing error, \"\n \"the format should be like \\\"3 a b\\\"\")\n\n # scan edges\n edges = scan_edges(edges_count)\n logger.debug(\"Scanned edges: {}\".format(edges))\n\n optimize(edges, start_edge, finish_edge)\n\n print_output(edges)",
"def getFlowAcrossVertex(self, vertex):\r\n return sum(self.getFlow((vertex, toVertex)) for toVertex in self.adjacent[vertex])",
"def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)",
"def main_script(input_vertices, input_num, input_prob, input_run=0):\n\n\tinput_vert_map = choose_values(input_vertices, input_run)\n\tinput_Q = coupler(input_vert_map, input_num)\n\tinput_results = solve_on_isakov(input_Q)\n\tinput_run_first_group, input_run_second_group = obtain_groups(input_results, input_vertices)\n\tinput_num_edges = count_edges(input_run_first_group, input_run_second_group, input_prob)\n\treturn input_num_edges, input_run_first_group, input_run_second_group",
"def boldlyGo(self, edges):\n\t\t\n\t\t# gets list of edges\n\t\t# runs through and calculates straighline lengths for all of them\n\t\t\n\t\t# chooses the one with the least cost - probably just straightline distance\n\t\t\t#in the future, we could run Astar on all of them and choose the one with best path\n\t\t\t# or have a history which picks the biggest one eventually\n\t\t# sends that as a goal to astar, lets robot move there and report it is done the move"
]
| [
"0.7314351",
"0.7056074",
"0.63513935",
"0.6183939",
"0.6028314",
"0.59128845",
"0.5906504",
"0.58057135",
"0.58016145",
"0.5688565",
"0.5601222",
"0.54778075",
"0.54134685",
"0.5381318",
"0.5345022",
"0.53172",
"0.5306116",
"0.53016233",
"0.5269003",
"0.5239629",
"0.5238125",
"0.5234473",
"0.5213949",
"0.518162",
"0.51778406",
"0.5127623",
"0.51222676",
"0.5103229",
"0.50853807",
"0.5060795"
]
| 0.8002663 | 0 |
prints the cost of an edge between two vertices read from keyboard, if the edge exist. If the edge does not exist, or vertices are not valid, raise ValueError | def print_cost(self):
start = int(input('start vertex: '))
end = int(input('end vertex: '))
cost = self._graph.get_cost(start, end)
print('the cost of edge is ' + str(cost)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cost_distance(e):\n # Make sure we have a proper edge with two vertices\n if len(e) != 2:\n raise ValueError\n\n a = V_coord[e[0]]\n b = V_coord[e[1]]\n\n # Return the distance between two points\n return distance(a, b)",
"def check_edges(self):\n start = int(input('Enter start vertex: '))\n end = int(input('Enter end vertex: '))\n if self._graph.is_edge_between(start, end):\n print('There is an edge from ' + str(start) + ' to ' + str(end))\n else:\n print('There is NO edge from ' + str(start) + ' to ' + str(end))",
"def iterate_outbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_outbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Outbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(vertex, v)\n print('Edge from ' + str(vertex) + ' to ' + str(v) + ' with cost ' + str(cost))",
"def add_edge(self, v1, v2): # O(1) time complexity\n if v1 in self.vertices and v2 in self.vertices: # check to see if v1 & v2 exists already\n self.vertices[v1].add(v2) # # add connection from v1 to v2 \n else: # else \n print(\"That vertex does not exist\")\n\n # additional options (class)\n \"\"\"\n if (v1 or v2) not in self.vertices:\n return \"vertex does exist\"\n self.vertices[v1].add(v2)\n ###\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices{v1}.add(v2)\n else:\n print(\"One of these vertices does not exist)\n \"\"\"",
"def iterate_inbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_inbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Inbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(v, vertex)\n print('Edge from ' + str(v) + ' to ' + str(vertex) + ' with cost ' + str(cost))",
"def add_edge(self, v1, v2):\n # Check if they exist\n # if v1 in self.vertices and v2 in self.vertices:\n if v1 in self.vertices:\n # Add the edge\n self.vertices[v1].add(v2)\n else:\n print(f\"ERROR ADDING EDGE between {v1} and {v2} : Vertex not found\")",
"def add_edge(self, from_vert, to_vert, cost=0):\n # if either vertex is not in the graph,\n # add it - or return an error (choice is up to you).\n if from_vert not in self.vert_dict or to_vert not in self.vert_dict:\n raise ValueError('vertexes not in graph')\n # if both vertices in the graph, add the\n # edge by making t a neighbor of f\n else:\n self.vert_dict[from_vert].add_neighbor(self.vert_dict[to_vert], cost)",
"def energy_cost(edge):\n return edge_weight(edge) * 1.2",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n print(\"ERROR ADDING EDGE: Vrtes not found\")",
"def add_edge(self, v1, v2):\n pass # TODO\n # both vertices have to exist to make connection(e.g. directed edge)\n\n if v1 in self.vertices and v2 in self.vertices:\n # print(f' type(vertices) is {type(self.vertices)}')\n self.vertices[v1].add(v2) # using set .add() method to append\n else:\n # print(f'ERROR: vertex {v1} or {v2} does not exist') \n raise ValueError(\"Vertex not yet created\")\n # print(f'ERROR: vertex {v1} or {v2} does not exist')\n\n #### not quite\n # try:\n # if v1 in self.vertices or v2 in self.vertices:\n # self.vertices[v1].add(v2)\n # except:\n # raise ValueError(\" BAD VERTEX !!\")\n\n\n if v1 not in self.vertices or v2 not in self.vertices:\n raise ValueError(\" BAD VERTEX !!\")\n else:\n self.vertices[v1].add(v2)",
"def compute_penalty(edge_1, edge_2):\n\n if edge_1 == edge_2:\n return 0\n elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.FORWARD}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.BACKWARD}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.UNDIRECTED}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.FORWARD, EdgeType.BACKWARD}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.FORWARD, EdgeType.UNDIRECTED}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.BACKWARD, EdgeType.UNDIRECTED}:\n return 1\n else:\n raise ImpossibleEdgeConfiguration",
"def add_edge(self, key1, key2, weight=0):\n\n \n if key1 not in self.graph and key2 not in self.graph:\n raise ValueError(\"Both Vertex of keys {} and {} not in Graph\".format(key1, key2))\n elif key1 not in self.graph or key2 not in self.graph:\n raise ValueError(\"Either Vertex of keys {} and {} not in Graph\".format(key1, key2))\n\n elif key1 == key2:\n raise ValueError(\"Vertex {} can't be its own neighbor\".format(key1))\n else:\n # Get the two neighbor verteces\n vertex_one = self.graph[key1]\n vertex_two = self.graph[key2]\n\n # Code idea from Vicenzo : https://github.com/C3NZ/CS22/blob/master/challenges/graph.py#L77\n added_from = vertex_one.add_neighbor(vertex_two, weight)\n added_to = vertex_two.add_neighbor(vertex_one, weight)\n\n if added_from and added_to:\n self.edges += 1",
"def get_cost_of_edge(self, _from, _to):\r\n if (_from, _to) in self.__cost.keys():\r\n return self.__cost[(_from, _to)]\r\n elif (_to, _from) in self.__cost.keys():\r\n return self.__cost[(_to, _from)]\r\n else:\r\n raise GraphException(\"The given edge does not exist.\")",
"def add_edge(self, v1, v2):\n if v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise ValueError(f\"The second Vertices you provided: {v2} is not in the graph. You can't link to a vertices that isn't in the graph.\")",
"def add_edge(self, v1, v2):\n pass # TODO",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('nonexistent vertex/node')",
"def add_vertex_edge(self, vertices):\n if len(vertices) < 2:\n raise Exception('Cannot have a single vertex')\n self.add_vertex(vertices[0])\n length_array = len(vertices)\n for iterator in range(1, length_array):\n num = vertices[iterator]\n is_number = False\n try:\n int(num)\n is_number = True\n except ValueError:\n pass\n if is_number:\n self.add_edge(vertices[0], num)",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError(\"That vertex does not exist!\")",
"def add_edge(self, item1: Any, item2: Any, weight: Union[int, float]) -> None:\n if item1 in self._vertices and item2 in self._vertices:\n v1 = self._vertices[item1]\n v2 = self._vertices[item2]\n\n # Add the new edge\n v1.neighbours[v2] = weight\n v2.neighbours[v1] = weight\n else:\n # We didn't find an existing vertex for both items.\n raise ValueError",
"def add_edge(self, name1: Any, name2: Any, weight: float = 1.0) -> None:\n if name1 in self._vertices and name2 in self._vertices:\n v1 = self._vertices[name1]\n v2 = self._vertices[name2]\n\n # Add the new edge\n v1.neighbours[v2] = weight\n v2.neighbours[v1] = weight\n else:\n # We didn't find an existing vertex for both items.\n raise ValueError",
"def check_input(nodes, num_edges):\n num_nodes = len(nodes)\n min_edges = num_nodes - 1\n if num_edges < min_edges:\n raise ValueError('num_edges less than minimum (%i)' % min_edges)\n max_edges = num_nodes * (num_nodes - 1)\n if num_edges > max_edges:\n raise ValueError('num_edges greater than maximum (%i)' % max_edges)",
"def edge_num(self,row1,col1,row2,col2):\n\n row = row1\n col = col1\n row_n = row2\n col_n = col2\n \n if row2 < row1 or col2 < col1:\n row = row2\n col = col2\n row_n = row1\n col_n = col1\n \n if not ((row == row_n and col == col_n - 1) or (row == row_n-1 and col == col_n)):\n return -1\n\n if row < 0 or row_n >= self.rows or col < 0 or col_n >= self.cols:\n return -1\n \n node1 = row*self.rows+col+1\n node2 = row_n*self.rows+col_n+1\n edge_number = self.edge2index[(node1,node2)]\n #print \"%s %s: %d\" % (str(node1),str(node2),edge_number)\n \"\"\"\n #THIS DOWN HERE WOULD WORK IF GRAPHILLION NUMBERED EDGES CORRECTLY BUT IT DOESNT\n #print \"(%d,%d) (%d,%d)\" % (row,col,row_n,col_n)\n if row + col < self.cols - 1:\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * row\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 1 + 2 * row\n #edges[edge_number] = 1\n else:\n col_dist = self.cols - col - 1\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * col_dist - 1\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 2 * col_dist\n #edges[edge_number] = 1\n \"\"\"\n\n return edge_number",
"def get_edge(update: Update, context: CallbackContext):\n\n try:\n try: \n element = str(context.args[0])\n edge = str(context.args[1])\n message = look_edges_db(element,edge)\n except:\n element = str(context.args[0])\n message = look_edges_db(element) \n except:\n message = '''choose valid element, e.g. Co and edge: \\n\n Fe\n Ni L \n Co L3\n '''\n \n context.bot.send_message(chat_id=update.message.chat_id, text=message, parse_mode='HTML')",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].edges.add(v2)\n self.vertices[v2].edges.add(v1)\n else:\n raise IndexError(\"That vertex does not exist!\")",
"def add_edge(self, v1, v2):\n\n (x1, y1) = v1\n (x2, y2) = v2\n\n if not self.has_vertex(x1, y1) or not self.has_vertex(x2, y2): return\n if v1 not in self.get_neighbors(x2, y2): return\n\n self._reachable[v1].add(v2)\n self._reachable[v2].add(v1)",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('That vertex does not exist')",
"def print_degree(self):\n vertex = int(input('enter vertex: '))\n in_degree = self._graph.get_in_degree(vertex)\n out_degree = self._graph.get_out_degree(vertex)\n print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))\n print('The out degree of ' + str(vertex) + ' is ' + str(out_degree))",
"def edge_direction(a, b):\n if a[0] == b[0]:\n return -1, 1\n elif a[0] == b[1]:\n return -1, -1\n elif a[1] == b[0]:\n return 1, 1\n elif a[1] == b[1]:\n return 1, -1\n else:\n constants.log.debug('\\n'.join([\n 'edges not connected!',\n 'vertex path %s',\n 'entity path: %s',\n 'entity[a]: %s,',\n 'entity[b]: %s']),\n vertex_path,\n entity_path,\n entities[ea].points,\n entities[eb].points)\n\n return None, None",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices: self.vertices[v1].add(v2)\n else: raise IndexError(\"Nonexistant Vert.\")",
"def add_edge(self, vertices: Iterable[\"Vertex\"]) -> None:\n vertices = list(vertices)\n if len(vertices) == 2:\n self.edges.append(self.add_vertices(vertices)) # type: ignore\n else:\n raise DXFValueError(\n \"Invalid vertices count, expected two vertices.\"\n )"
]
| [
"0.6220483",
"0.61472213",
"0.59278435",
"0.5844462",
"0.58335537",
"0.5662212",
"0.56271285",
"0.55602974",
"0.5559249",
"0.5526453",
"0.5518155",
"0.5488179",
"0.54742926",
"0.5436777",
"0.53099567",
"0.5294081",
"0.5268533",
"0.5267662",
"0.5257673",
"0.5257595",
"0.52558845",
"0.5249611",
"0.52464443",
"0.5246195",
"0.5231106",
"0.5222757",
"0.521104",
"0.5202654",
"0.51695275",
"0.51520526"
]
| 0.708619 | 0 |
restores the state of the graph from a previous copy made. If there is at least one copy to restore from | def restore_graph_ui(self):
if len(self._graph_copies) == 0:
print('No copies to restore!')
return
# the last made copy is restored in graph
self._graph = self._graph_copies.pop(-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restore(self):\n self.nodes.restore()",
"def _Restore(self) -> None:\n self._SetNodes(self._nodes)",
"def _restoreGraph(self):\n\n # self.tempG = self.g.copy()\n\n if nx.is_directed(self.g):\n self.tempG = nx.DiGraph(self.g)\n else:\n self.tempG = nx.Graph(self.g)\n self.deletedEdges = []\n self.deletedNodes = []",
"def _restore(self, graph):\n raise NotImplementedError()",
"def restore(self):\n if self._restored_model:\n return\n with self.eval_graph.graph.as_default():\n last_checkpoint = self._find_last_checkpoint()\n # TODO(rbharath): Is setting train=False right here?\n saver = tf.train.Saver()\n saver.restore(self._get_shared_session(train=False), last_checkpoint)\n self._restored_model = True",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def restore(self):\n self.abstract_obj.restore()",
"def restore(self):\n raise NotImplementedError",
"def _save_state_as_orig(self):\n self._orig = None\n self._orig = deepcopy(self)",
"def _restore(self, a_path):\n super(RDPAnalyzer, self)._restore(a_path)\n self._model._restore()",
"def restore_full_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreSystemState(state_ref)\n self.ale.deleteState(state_ref)",
"def restore_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreState(state_ref)\n self.ale.deleteState(state_ref)",
"def restore(self,):\n self.pos, self.dataptr, = self.stack.pop()",
"def restore(self, restore):\n self._restore = restore",
"def restore(self):\n self.weight = self._backup_weight",
"def restore_state(self, ckpt):\n raise NotImplemented()",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def restore(self, memento):\n self.state = memento.state",
"def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy()\n self.v = self.vb.copy()\n if self.en_bias: self.b = self.bb.copy()",
"def revert(self, fgraph, checkpoint):\r\n h = self.history[fgraph]\r\n self.history[fgraph] = None\r\n while len(h) > checkpoint:\r\n f = h.pop()\r\n f()\r\n self.history[fgraph] = h",
"def restore_last_undo_point(self):\n self.unload()",
"def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())",
"def restore(self):\n self.igate.restore()\n self.fgate.restore()\n self.ogate.restore()\n super(LSTM, self).restore()",
"def reload(self):\n self.restore()",
"def restore_object(self):\n self.co_worker_list = self.original_co_worker_list",
"def _restore_training_state(self, restore_state):\n self.load_state_dict(restore_state[\"model\"])\n self.optimizer.load_state_dict(restore_state[\"optimizer\"])\n self.lr_scheduler.load_state_dict(restore_state[\"lr_scheduler\"])\n start_iteration = restore_state[\"iteration\"] + 1\n if self.config[\"verbose\"]:\n print(f\"Restored checkpoint to iteration {start_iteration}.\")\n\n if restore_state[\"best_model_found\"]:\n # Update checkpointer with appropriate information about best model\n # Note that the best model found so far may not be the model in the\n # checkpoint that is currently being loaded.\n self.checkpointer.best_model_found = True\n self.checkpointer.best_iteration = restore_state[\"best_iteration\"]\n self.checkpointer.best_score = restore_state[\"best_score\"]\n if self.config[\"verbose\"]:\n print(\n f\"Updated checkpointer: \"\n f\"best_score={self.checkpointer.best_score:.3f}, \"\n f\"best_iteration={self.checkpointer.best_iteration}\"\n )\n return start_iteration",
"def restore(self):\n pert_params = list(self.net.parameters())\n saved_params = list(self.saved_net.parameters())\n for perturbed, saved in zip(pert_params, saved_params):\n perturbed_shape = perturbed.shape\n saved_shape = saved.shape\n perturbed = perturbed.flatten()\n saved = saved.flatten()\n for i, _ in enumerate(perturbed.data):\n perturbed.data[i] = saved.data[i]\n perturbed = perturbed.view(perturbed_shape)\n saved = saved.view(saved_shape)",
"def reset_graph(self):\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)",
"def restore_state(self, state: ale_py.ALEState):\n self.ale.restoreState(state)",
"def revert(self):\n\n if len(self.stack) == 0 or not self.revertable:\n return\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,THING_REVERT_DISTANCE))\n\n state = self.stack.pop()\n\n #not sure if this helps, but it can't hurt\n self.model.detachNode()\n\n for x in self.toRevert:\n self.toRevert[x](state[x])"
]
| [
"0.74744827",
"0.7448601",
"0.74397695",
"0.73925924",
"0.7309107",
"0.70217335",
"0.69016004",
"0.6828779",
"0.68268186",
"0.67718124",
"0.6744612",
"0.67356306",
"0.6699239",
"0.66833806",
"0.6653536",
"0.65983886",
"0.6584011",
"0.65656936",
"0.65646636",
"0.6524735",
"0.64864534",
"0.64658684",
"0.6452562",
"0.644115",
"0.6420117",
"0.64185786",
"0.6391825",
"0.6331862",
"0.6322302",
"0.63138765"
]
| 0.7628298 | 0 |
reads graph from a file whose name is given from keyboard. | def read_graph_ui(self):
filename = input('enter filename: ')
try:
self._graph = read_graph(filename)
except FileNotFoundError:
print('invalid filename! ') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g",
"def read_graph(filename, directed=True):\n if not directed:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n with open(filename) as f:\n for line in f:\n d = line.split()\n G.add_edge(int(d[0]), int(d[1]))\n print('Read Graph')\n return G",
"def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)",
"def read_graph(filename):\n return nx.read_edgelist(filename, create_using=nx.DiGraph(), nodetype=str)",
"def read_graph(file_name):\r\n with open(file_name, 'r') as f:\r\n lines = f.readlines()\r\n first_line = lines[0].strip().split()\r\n no_vertices = int(first_line[0])\r\n new_graph = UndirectedGraph(no_vertices)\r\n for line in lines[1:]:\r\n if line == \"\":\r\n continue\r\n line = line.strip().split()\r\n _from, _to, _cost = int(line[0]), int(line[1]), int(line[2])\r\n new_graph.add_edge(_from, _to, _cost)\r\n return new_graph",
"def load_graph(file_name, directed=True):\n G = nx.DiGraph() if directed else nx.Graph()\n with open(file_name, \"r\") as f:\n for line in f:\n tokens = line.split()\n u = int(tokens[0])\n v = int(tokens[1])\n if len(tokens) > 2:\n w = float(tokens[2])\n G.add_edge(u, v, weight=w)\n else:\n G.add_edge(u,v)\n return G",
"def read_graph(filename):\n G = Hypergraph()\n\n f = open(filename, 'r', encoding='utf8')\n lines = f.readlines()\n if args.weighted:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n weight = line[1]\n G.add_edge(edge_name, line[2:], float(weight))\n else:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n G.add_edge(edge_name, line[1:])\n f.close()\n return G",
"def load_graph(graphname,path='./data/',mname='A'):\n\n\tdata=sio.loadmat(path+graphname)\n\treturn data[mname]",
"def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!",
"def loadDataZachary(fileName):\n\n \"Initialize a graph\"\n G = nx.Graph()\n\n \"Open file\"\n f = open(fileName)\n\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n while line:\n if(line[0]!=\"%\"):\n ls =line.split(' ')\n num,nums=int(ls[0]),int(ls[1])\n G.add_edge(num,nums)\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n\n \"Closing the file\"\n f.close()\n\n return G, 'Zachary'",
"def read_graph(filename):\n with open(filename, 'r') as file: # open the file\n # read the number of nodes and number of edges\n num_nodes, num_edges = DataIO.__preprocess_line(file.readline())\n graph = GraphProcessing.construct_null_graph(num_nodes) # construct a null graph\n for line in file.readlines(): # for every line in the file\n preprocessed_line = DataIO.__preprocess_line(line) # preprocess the line\n if preprocessed_line: # if the preprocessed line is not a null string\n # read the first and second node and the edge weight\n source_node, terminal_node, weight = preprocessed_line\n graph[source_node][terminal_node] = weight\n graph[terminal_node][source_node] = weight\n return graph # return the final graph",
"def readMovieData(filename):\r\n graph = Graph()\r\n with open(filename, \"r\", encoding=\"latin-1\") as ins:\r\n array = []\r\n delimiter = '/'\r\n for line in ins:\r\n names = line.split(delimiter)\r\n array.append(names)\r\n for i in range(1, len(names)):\r\n graph.addEdge(names[0], names[i])\r\n return graph",
"def load_graph(filename):\n with tf.gfile.GFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')",
"def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')",
"def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')",
"def load_graph(filename):\n\twith tf.gfile.FastGFile(filename, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\ttf.import_graph_def(graph_def, name='')",
"def ReadGraph(inputFileName):\n inputFile = open(inputFileName)\n jsonGraphArray = json.load(inputFile)\n graph = Graph.Graph()\n graph.load_from_json(jsonGraphArray)\n inputFile.close()\n return graph",
"def build_graph(file_name):\n graph = MyGraph()\n with open(file_name, 'r') as fin:\n line = fin.readline().replace('\\n', '')\n while line != \"\":\n vals = line.split(':')\n graph.add_node(vals[0], pos=(int(vals[1]),int(vals[2])))\n line = fin.readline().replace('\\n', '')\n dest = fin.readline().replace('\\n','').split('\\t')\n line = fin.readline().replace('\\n', '')\n edges = []\n while line != '':\n node_info = line.split('\\t')\n src = node_info[0]\n for node in range(1,len(node_info)):\n if node_info[node] != '':\n if (dest[node],src) not in edges:\n edges.append((src,dest[node], node_info[node]))\n line = fin.readline().replace('\\n','')\n for edge in edges:\n graph.add_edge(edge[0], edge[1], weight=int(edge[2]))\n\n return graph",
"def read_graph(filename):\n\n print(\"\\n\\n========== Loading graph: \" + filename + '==================')\n edges = {}\n\n inFile = open(filename)\n for line in inFile:\n roadInfo = line.split()\n\n # Skip blank lines, read in contents from non-empty lines.\n if (len(roadInfo) > 0):\n srcCity = roadInfo[0]\n destCity = roadInfo[1]\n\n if srcCity in edges:\n edges[srcCity] = edges[srcCity] + [destCity]\n else:\n edges[srcCity] = [destCity]\n\n if destCity in edges:\n edges[destCity] = edges[destCity] + [srcCity]\n else:\n edges[destCity] = [srcCity]\n\n print(\" done.\\n\")\n return edges",
"def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph",
"def file_parse():\n\n\tfilename = input(\"Enter the file path for your graph: \")\n\ttarget = open(filename, 'r')\n\n\ttarget_lines = [] \t# List of lines from target file\n\t\n\t# Grab the graph count and node/edge count for the first graph\n\ti = 0\n\tfor line in target:\n\t\tif i == 0:\n\t\t\tgraph_count = int(line)\n\t\telif i == 1:\n\t\t\tnode_count = int(line)\n\t\telif i == 2:\n\t\t\tedge_count = int(line)\n\t\telse:\t\n\t\t\ttarget_lines.append(line.strip('\\n'))\n\t\ti += 1\n\n\treturn graph_create(target_lines, graph_count, node_count, edge_count)",
"def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')",
"def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')",
"def read(name):\n\n if not name.endswith(\"gml\"):\n name = \"{0}.gml\".format(name)\n with open(name) as f:\n lines = f.readlines()\n newlines = []\n for line in lines:\n if line.strip().startswith(\"name\"):\n newline = line.replace(\"name\", \"label\", 1)\n else:\n newline = line\n newlines.append(newline)\n newname = \"nx_{0}\".format(name)\n with open(newname, \"w\") as f:\n f.writelines(newlines)\n network = networkx.read_gml(newname)\n # It should return a Network object instead of DiGraph\n return network",
"def load_graph(self, path):\n if path.split('.')[-1]=='gexf':\n self.graph = nx.read_gexf(path)\n else:\n self.graph = nx.read_gpickle(path)",
"def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)",
"def loadgraph(self, path):\n\n raise NotImplementedError",
"def load_graph( gname ):\n return NX.read_gpickle( gname )",
"def read_file(path):\n\tG = nx.Graph()\n\n\twith open(path, 'r') as in_file:\n\t\tfor line in in_file:\n\t\t\tcontents = line.split(\" \")\n\t\t\tu = int(contents[0])\n\t\t\tv = int(contents[1])\n\t\t\tstreet_type = int(contents[2])\n\t\t\ttime = int(contents[3])\n\t\t\tlength = int(contents[4])\n\t\t\tcost = 1/float(length)\n\t\t\t\n\t\t\tG.add_node(u)\n\t\t\tG.add_node(v)\n\t\t\tif street_type is 1:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\telse:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\t\tG.add_edge(v, u, street_type=street_type, time=time, length=length, cost=cost)\n\n\treturn G",
"def _read_network_file(in_name, in_format=\"\", directed=False):\n\n if in_format == 'edges':\n if directed:\n g = nx.read_edgelist(in_name, create_using=nx.DiGraph())\n else:\n g = nx.read_edgelist(in_name, data=False)\n elif in_format == 'gefx':\n g = nx.read_gexf(in_name)\n elif in_format == 'gml':\n g = nx.read_gml(in_name)\n elif in_format == 'graphML' or in_format == 'graphml':\n g = nx.read_graphml(in_name)\n nodesInfo = g.nodes(data=True)\n if len(nx.get_node_attributes(g,\"label\"))>0:\n node2Label = {nodeid: data[\"label\"].replace(\" \",\"_\") for (nodeid, data) in nodesInfo}\n g = nx.relabel_nodes(g, node2Label, copy=False)\n elif in_format == 'pajek':\n g = nx.read_pajek(in_name)\n elif in_format == 'ncol':\n g = nx.read_edgelist(in_name)\n else:\n raise Exception(\"UNKNOWN FORMAT \" + in_format)\n return g"
]
| [
"0.72687954",
"0.6888777",
"0.6812073",
"0.68117404",
"0.6764579",
"0.6715689",
"0.67132574",
"0.66535294",
"0.6625131",
"0.6620927",
"0.6601009",
"0.6579922",
"0.65244764",
"0.6517937",
"0.6517937",
"0.64780235",
"0.6475796",
"0.6463978",
"0.64423335",
"0.6433082",
"0.64282024",
"0.63906807",
"0.63864756",
"0.63826317",
"0.635511",
"0.6325638",
"0.6311862",
"0.6272085",
"0.6233106",
"0.6227428"
]
| 0.7448128 | 0 |
Generate positions (x, y coordinates) for each spike on the probe. This function assumes that the spikes were generated with the kilosort algorithm so the base_folder holds all the necessary .npy arrays. In order for this function to find which channels are the most relevant in each spike it looks into the spike's assigned template (a channels x time points array in spike_templates.npy). It then find the minimum points of all channels, takes their median and their standard deviation and for each channel creates the difference between the minimum and the median. Finally it demarcates the relevant to the template channels by keeping the ones whose difference is larger than a number of times (threshold) over the standard deviation. It then picks the relevant channels of the spike's raw data, finds the differences between the minimum value and the channel's time series median value (over time), orders the channels according to these differences and assigns weights between 0 and 1 (0 for a difference of 0, 1 for a maximum difference). It finally finds the x, y positions of the selected channels and adds to the position of the largest difference channel the weighted average positions of the remaining selected channels | def generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,
used_spikes_indices=None, position_mult=2.25, threshold=0.1):
# Load the required data from the kilosort folder
channel_map = np.load(os.path.join(base_folder, 'channel_map.npy'))
active_channel_map = np.squeeze(channel_map, axis=1)
channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))
spike_templates = np.load(os.path.join(base_folder, ct.SPIKE_TEMPLATES_FILENAME))
templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))
data_raw = np.memmap(os.path.join(base_folder, binary_data_filename),
dtype=np.int16, mode='r')
number_of_timepoints_in_raw = int(data_raw.shape[0] / number_of_channels_in_binary_file)
data_raw_kilosorted = np.reshape(data_raw, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')
spike_times = np.squeeze(np.load(os.path.join(base_folder, ct.SPIKE_TIMES_FILENAME)).astype(np.int))
time_points = 50
if used_spikes_indices is None:
used_spikes_indices = np.arange(0, len(spike_times))
# Run the loop over all spikes to get the positions
counter = 0
weighted_average_postions = np.empty((len(used_spikes_indices), 2))
spike_distance_on_probe = np.empty(len(used_spikes_indices))
for spike_index in np.arange(0, len(used_spikes_indices)):
spike_raw_data = data_raw_kilosorted[active_channel_map,
(spike_times[used_spikes_indices[spike_index]]-time_points):
(spike_times[used_spikes_indices[spike_index]]+time_points)]
template = templates[spike_templates[used_spikes_indices[spike_index]], :, :].squeeze()
relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)
spike_raw_data_median_over_time = np.median(spike_raw_data, axis=1)
peaks_to_median = spike_raw_data_median_over_time - spike_raw_data.min(axis=1)
peaks_to_median = peaks_to_median[relevant_channels]
relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]
peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)
peaks_to_median_sorted.append(np.median(spike_raw_data_median_over_time[relevant_channels]))
weights = _normalize(peaks_to_median_sorted)[:-1]
relevant_channels_positions = channel_positions[relevant_channels_sorted]
pos_x = relevant_channels_positions[0, 0]
pos_y = relevant_channels_positions[0, 1]
new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])
new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])
weighted_average_postions[spike_index, :] = [new_pos_x, new_pos_y]
spike_distance_on_probe[spike_index] = np.sqrt(np.power(new_pos_x, 2) + np.power(new_pos_y, 2))
counter += 1
if counter % 5000 == 0:
print('Completed ' + str(counter) + ' spikes')
weighted_average_postions = weighted_average_postions * position_mult
# sort according to position on probe
spike_indices_sorted_by_probe_distance = np.array([b[0] for b in sorted(enumerate(spike_distance_on_probe),
key=lambda dist: dist[1])])
spike_distances_on_probe_sorted = np.array([b[1] for b in sorted(enumerate(spike_distance_on_probe),
key=lambda dist: dist[1])])
np.save(os.path.join(base_folder, ct.WEIGHTED_SPIKE_POSITIONS_FILENAME), weighted_average_postions)
return weighted_average_postions, spike_distance_on_probe, \
spike_indices_sorted_by_probe_distance, spike_distances_on_probe_sorted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_probe_positions_of_templates(base_folder, threshold=0.1, new_templates_array=None):\n # Load the required data from the kilosort folder\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n if new_templates_array is None:\n try:\n templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))\n except FileNotFoundError:\n exit('No new_templates_array passed and no templates.npy found in folder')\n try:\n template_markings = np.load(os.path.join(base_folder, ct.TEMPLATE_MARKING_FILENAME))\n except FileNotFoundError:\n template_markings = np.ones((len(templates)))\n templates = templates[template_markings > 0, :, :]\n\n else:\n if new_templates_array.shape[1] > new_templates_array.shape[2]:\n templates = np.reshape(new_templates_array, (new_templates_array.shape[0],\n new_templates_array.shape[2],\n new_templates_array.shape[1]))\n else:\n templates = new_templates_array\n\n # Run the loop over all templates to get the positions\n counter = 0\n templates_positions = []\n for template in templates:\n relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)\n\n template_median_over_time = np.median(template, axis=0)\n peaks_to_median = template_median_over_time - template.min(axis=0)\n peaks_to_median = peaks_to_median[relevant_channels]\n\n relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]\n\n peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)\n peaks_to_median_sorted.append(np.median(template_median_over_time[relevant_channels]))\n\n weights = _normalize(peaks_to_median_sorted)[:-1]\n relevant_channels_positions = channel_positions[relevant_channels_sorted]\n\n pos_x = relevant_channels_positions[0, 0]\n pos_y = relevant_channels_positions[0, 1]\n\n new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])\n new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])\n templates_positions.append([new_pos_x, new_pos_y])\n counter += 1\n if not (counter % 100):\n print('Completed ' + str(counter) + ' templates')\n\n templates_positions = np.array(templates_positions)\n\n np.save(os.path.join(base_folder, ct.WEIGHTED_TEMPLATE_POSITIONS_FILENAME), templates_positions)\n\n return np.array(templates_positions)",
"def apply_tracking2(td, num_spikes=20, alpha=0.5, threshold=-1):\n assert (alpha >= 0)\n assert (alpha <= 1)\n mix = 1 - alpha\n track_x = center_x = float(td.width / 2)\n track_y = center_y = float(td.height / 2)\n threshold_sq = math.floor(center_y ** 2)\n\n if threshold > 0:\n threshold_sq = math.floor(threshold ** 2)\n\n copy = np.copy(td.data).view(np.recarray)\n offset_x_arr = np.zeros(copy.size, np.float32)\n offset_y_arr = np.zeros(copy.size, np.float32)\n\n for spike_index in range(0, copy.size, num_spikes):\n frame_data = copy[spike_index:spike_index + num_spikes]\n distances = ((frame_data.x - track_x) ** 2) + (\n (frame_data.y - track_y) ** 2)\n valid_data = frame_data[distances < threshold_sq]\n\n if valid_data.size > 0:\n x_avg = float(np.sum(valid_data.x)) / valid_data.size\n y_avg = float(np.sum(valid_data.y)) / valid_data.size\n track_x = (track_x * alpha) + (x_avg * mix)\n track_y = (track_y * alpha) + (y_avg * mix)\n offset_x = int(round(center_x - track_x))\n offset_y = int(round(center_y - track_y))\n offset_x_arr[spike_index:spike_index + num_spikes] = offset_x\n offset_y_arr[spike_index:spike_index + num_spikes] = offset_y\n\n offset_x_arr[spike_index:] = offset_x\n offset_y_arr[spike_index:] = offset_y\n copy.x = (copy.x + offset_x_arr).astype(np.uint8)\n copy.y = (copy.y + offset_y_arr).astype(np.uint8)\n # remove the events that are out of bounds\n return copy[(copy.x >= 0) & (copy.y >= 0) & (copy.x < td.width) & (\n copy.y < td.height)]",
"def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)",
"def get_spike_data():\n neuron_spikes = []\n first = 0\n last = 0\n for i, file in enumerate(os.listdir(\"\"\"/Users/markusekvall/Desktop/\n final_entropy_model/Spike_trains\"\"\")):\n if file.endswith(\".mat\"):\n if file[0] == \".\":\n continue\n else:\n x = sio.loadmat(os.path.join(\"./Spike_trains\", file))\n cluster_class = x[\"cluster_class\"]\n # Set to ==2 if you want the second cluster\n idx = [cluster_class[:, 0] == 1]\n spike = cluster_class[idx[0], 1]\n if np.size(spike) != 0:\n if max(spike) > last:\n last = max(spike)\n if min(spike) > last:\n first = min(spike)\n neuron_spikes.append(spike)\n return neuron_spikes, first, last",
"def update_pos(self, dims=None, nsamples=CLUSTERPARAMMAXSAMPLES):\n sort = self.neuron.sort\n spikes = sort.spikes\n if dims is None: # use all of them\n dims = list(self.pos) # some of these might not exist in spikes array\n sids = self.neuron.sids\n nspikes = len(sids)\n if nsamples and nspikes > nsamples: # subsample spikes\n step = nspikes // nsamples + 1\n print('n%d: update_pos() sampling every %d spikes instead of all %d'\n % (self.id, step, nspikes))\n sids = sids[::step]\n nspikes = len(sids) # update\n\n # check for pre-calculated spike param means and stds\n try: sort.means\n except AttributeError: sort.means = {}\n try: sort.stds\n except AttributeError: sort.stds = {}\n\n ## FIXME: some code duplication from sort.get_param_matrix()?\n for dim in dims:\n try:\n spikes[dim]\n except ValueError:\n continue # this dim doesn't exist in spikes record array, ignore it\n # data from all spikes:\n data = spikes[dim]\n # data from neuron's spikes, potentially subsample of them,\n # copied for in-place normalization:\n subdata = np.float64(data[sids].copy())\n # update unnormalized position:\n self.pos[dim] = float(np.median(subdata)) # from np.float64 for clean jsonpickle\n # calculate mean and std for normalization:\n try:\n mean = sort.means[dim]\n except KeyError:\n mean = data.mean()\n sort.means[dim] = float(mean) # save, from np.float for clean jsonpickle\n if dim in ['x0', 'y0'] and sort.probe.ncols > 1: # norm spatial params by x0 std\n try:\n std = sort.stds['x0']\n except KeyError:\n std = spikes['x0'].std()\n sort.stds['x0'] = float(std) # save, from np.float for clean jsonpickle\n else: # normalize all other params by their std\n try:\n std = sort.stds[dim]\n except KeyError:\n std = data.std()\n sort.stds[dim] = float(std) # save, from np.float for clean jsonpickle\n # now do the actual normalization:\n subdata -= mean\n if std != 0:\n subdata /= std\n # update normalized position:\n self.normpos[dim] = float(np.median(subdata)) # from float64 for clean jsonpickle",
"def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)",
"def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)",
"def search_data(self, data_obj):\n logger.info(\"Start searching for coarse channel: %s\"%data_obj.header['coarse_chan'])\n self.logwriter.info(\"Start searching for %s ; coarse channel: %i \"%(data_obj.filename,data_obj.header['coarse_chan']))\n spectra, drift_indices = data_obj.load_data()\n tsteps = data_obj.tsteps\n tsteps_valid = data_obj.tsteps_valid\n tdwidth = data_obj.tdwidth\n fftlen = data_obj.fftlen\n nframes = tsteps_valid\n shoulder_size = data_obj.shoulder_size\n\n if self.flagging:\n ##EE This flags the edges of the PFF for BL data (with 3Hz res per channel).\n ##EE The PFF flat profile falls after around 100k channels.\n ##EE But it falls slowly enough that could use 50-80k channels.\n median_flag = np.median(spectra)\n# spectra[:,:80000] = median_flag/float(tsteps)\n# spectra[:,-80000:] = median_flag/float(tsteps)\n\n ##EE Flagging spikes in time series.\n time_series=spectra.sum(axis=1)\n time_series_median = np.median(time_series)\n mask=(time_series-time_series_median)/time_series.std() > 10 #Flagging spikes > 10 in SNR\n\n if mask.any():\n self.logwriter.info(\"Found spikes in the time series. Removing ...\")\n spectra[mask,:] = time_series_median/float(fftlen) # So that the value is not the median in the time_series.\n\n else:\n median_flag = np.array([0])\n\n # allocate array for findopplering\n # init findopplering array to zero\n tree_findoppler = np.zeros(tsteps * tdwidth,dtype=np.float64) + median_flag\n\n # allocate array for holding original\n # Allocates array in a fast way (without initialize)\n tree_findoppler_original = np.empty_like(tree_findoppler)\n\n # allocate array for negative doppler rates\n tree_findoppler_flip = np.empty_like(tree_findoppler)\n\n # build index mask for in-place tree doppler correction\n ibrev = np.zeros(tsteps, dtype=np.int32)\n\n for i in range(0, tsteps):\n ibrev[i] = bitrev(i, int(np.log2(tsteps)))\n\n##EE: should double check if tdwidth is really better than fftlen here.\n max_val = max_vals()\n if max_val.maxsnr == None:\n max_val.maxsnr = np.zeros(tdwidth, dtype=np.float64)\n if max_val.maxdrift == None:\n max_val.maxdrift = np.zeros(tdwidth, dtype=np.float64)\n if max_val.maxsmooth == None:\n max_val.maxsmooth = np.zeros(tdwidth, dtype='uint8')\n if max_val.maxid == None:\n max_val.maxid = np.zeros(tdwidth, dtype='uint32')\n if max_val.total_n_hits == None:\n max_val.total_n_hits = 0\n\n #EE: Making \"shoulders\" to avoid \"edge effects\". Could do further testing.\n specstart = int(tsteps*shoulder_size/2)\n specend = tdwidth - (tsteps * shoulder_size)\n\n #--------------------------------\n #Stats calc\n self.the_mean_val, self.the_stddev = comp_stats(spectra.sum(axis=0))\n\n #--------------------------------\n #Looping over drift_rate_nblock\n #--------------------------------\n drift_rate_nblock = int(np.floor(self.max_drift / (data_obj.drift_rate_resolution*tsteps_valid)))\n\n##EE-debuging kk = 0\n\n for drift_block in range(-1*drift_rate_nblock,drift_rate_nblock+1):\n logger.debug( \"Drift_block %i\"%drift_block)\n\n #----------------------------------------------------------------------\n # Negative drift rates search.\n #----------------------------------------------------------------------\n if drift_block <= 0:\n\n #Populates the find_doppler tree with the spectra\n populate_tree(spectra,tree_findoppler,nframes,tdwidth,tsteps,fftlen,shoulder_size,roll=drift_block,reverse=1)\n\n # populate original array\n np.copyto(tree_findoppler_original, tree_findoppler)\n\n # populate neg doppler array\n np.copyto(tree_findoppler_flip, tree_findoppler_original)\n \n # Flip matrix across X dimension to search negative doppler drift rates\n FlipX(tree_findoppler_flip, tdwidth, tsteps)\n logger.info(\"Doppler correcting reverse...\")\n tt.taylor_flt(tree_findoppler_flip, tsteps * tdwidth, tsteps)\n logger.debug( \"done...\")\n \n complete_drift_range = data_obj.drift_rate_resolution*np.array(range(-1*tsteps_valid*(np.abs(drift_block)+1)+1,-1*tsteps_valid*(np.abs(drift_block))+1))\n for k,drift_rate in enumerate(complete_drift_range[(complete_drift_range<self.min_drift) & (complete_drift_range>=-1*self.max_drift)]):\n # indx = ibrev[drift_indices[::-1][k]] * tdwidth\n\n # DCP 2020.04 -- WAR to drift rate in flipped files\n if data_obj.header['DELTAF'] < 0:\n drift_rate *= -1\n\n indx = ibrev[drift_indices[::-1][(complete_drift_range<self.min_drift) & (complete_drift_range>=-1*self.max_drift)][k]] * tdwidth\n\n # SEARCH NEGATIVE DRIFT RATES\n spectrum = tree_findoppler_flip[indx: indx + tdwidth]\n\n # normalize\n spectrum -= self.the_mean_val\n spectrum /= self.the_stddev\n\n #Reverse spectrum back\n spectrum = spectrum[::-1]\n\n n_hits, max_val = hitsearch(spectrum, specstart, specend, self.snr, drift_rate, data_obj.header, fftlen, tdwidth, max_val, 0)\n info_str = \"Found %d hits at drift rate %15.15f\\n\"%(n_hits, drift_rate)\n max_val.total_n_hits += n_hits\n logger.debug(info_str)\n self.logwriter.info(info_str)\n\n #----------------------------------------------------------------------\n # Positive drift rates search.\n #----------------------------------------------------------------------\n if drift_block >= 0:\n\n #Populates the find_doppler tree with the spectra\n populate_tree(spectra,tree_findoppler,nframes,tdwidth,tsteps,fftlen,shoulder_size,\n roll=drift_block,reverse=1)\n\n # populate original array\n np.copyto(tree_findoppler_original, tree_findoppler)\n\n logger.info(\"Doppler correcting forward...\")\n tt.taylor_flt(tree_findoppler, tsteps * tdwidth, tsteps)\n logger.debug( \"done...\")\n if (tree_findoppler == tree_findoppler_original).all():\n logger.error(\"taylor_flt has no effect?\")\n else:\n logger.debug(\"tree_findoppler changed\")\n\n ##EE: Calculates the range of drift rates for a full drift block.\n complete_drift_range = data_obj.drift_rate_resolution*np.array(range(tsteps_valid*(drift_block),tsteps_valid*(drift_block +1)))\n\n for k,drift_rate in enumerate(complete_drift_range[(complete_drift_range>=self.min_drift) & (complete_drift_range<=self.max_drift)]):\n\n indx = ibrev[drift_indices[k]] * tdwidth\n\n #DCP 2020.04 -- WAR to drift rate in flipped files\n if data_obj.header['DELTAF'] < 0:\n drift_rate *= -1\n\n # SEARCH POSITIVE DRIFT RATES\n spectrum = tree_findoppler[indx: indx+tdwidth]\n\n # normalize\n spectrum -= self.the_mean_val\n spectrum /= self.the_stddev\n\n n_hits, max_val = hitsearch(spectrum, specstart, specend, self.snr, drift_rate, data_obj.header, fftlen, tdwidth, max_val, 0)\n info_str = \"Found %d hits at drift rate %15.15f\\n\"%(n_hits, drift_rate)\n max_val.total_n_hits += n_hits\n logger.debug(info_str)\n self.logwriter.info(info_str)\n\n # Writing the top hits to file.\n self.filewriter = tophitsearch(tree_findoppler_original, max_val, tsteps, nframes, data_obj.header, tdwidth,\n fftlen, self.max_drift,data_obj.obs_length, out_dir = self.out_dir,\n logwriter=self.logwriter, filewriter=self.filewriter, obs_info=self.obs_info)\n\n logger.info(\"Total number of candidates for coarse channel \"+ str(data_obj.header['coarse_chan']) +\" is: %i\"%max_val.total_n_hits)",
"def main():\n\n pathfolder = \"/home/vanessa/DATA_SEEG/PKL_FILE/\"\n filename = \"/data.pkl\"\n # pathfolder = argv[1]\n # filename = argv[2]\n\n ti = 10. # initial time\n tf = 590. # final time\n t_split = 300. # split\n fs = 1000. # sampling frequency\n powerline = 50.\n\n thresholds = np.load(\"threshold.npy\") # load the threshold file\n meanthresh = thresholds.mean(axis=0)[1::2]\n stdthresh = thresholds.std(axis=0)[1::2]\n\n # features = 159 # classification features + (x,y,z)-coordinates\n\n for ii, id in enumerate(os.listdir(pathfolder)):\n\n print(id)\n\n df = pd.read_pickle(pathfolder + id + filename)\n\n validchannels = np.where(~df.loc[:, \"PTD\"].isnull())[0] # remove NaN values\n\n df = df.iloc[validchannels, :]\n _, p = df.shape\n\n timeseries = df.values[:, :-5] # we are not considering Y, ptd, coordinates\n\n data = remove_powerline(timeseries, fs) # remove power line effects\n\n #################### split into 2 fragments ############################\n\n split1half = data[:, int(fs*ti):int(fs*t_split)]\n split2half = data[:, int(fs*t_split):int(fs*tf)]\n\n timefeat1half = merge_temporal_features(split1half, fs, powerline,\n meanthresh)\n timefeat2half = merge_temporal_features(split2half, fs, powerline,\n meanthresh)\n\n ########################################################################\n\n cc = [df.index[t] for t in range(len(df.index))]\n arrays = [[id]*(2*len(df.index)), cc + cc]\n\n tuples = list(zip(*arrays))\n index = pd.MultiIndex.from_tuples(tuples, names=['patient', 'channel'])\n\n # temporal features from SEEG\n timefeatdf = pd.DataFrame(data=np.vstack((timefeat1half,\n timefeat2half)), index=index)\n\n # spatial features for MRI\n spacefeat = df.values[:, -4:]\n spacefeatdf = pd.DataFrame(data=np.vstack((spacefeat, spacefeat)),\n index=index, columns=\n ['PTD', 'xcoor', 'ycoor', 'zcoor'])\n\n # y labels\n ylab = df.values[:, -5]\n Ylabel = pd.DataFrame(data=np.append(ylab, ylab), index=index,\n columns=[\"Y\"])\n\n # pickle file in output\n outputpkl = pd.concat([timefeatdf, spacefeatdf, Ylabel], axis=1)\n\n outputpkl.to_pickle(pathfolder + id + \"/features.pkl\")\n\n if ii == 0:\n ddd = outputpkl\n else:\n ddd = pd.concat([ddd, outputpkl], axis=0)\n\n ddd.to_pickle(pathfolder + \"classificationset.pkl\")",
"def split(filepath, nsamples):\n start = np.cumsum([0] + list(nsamples[:-1]))\n if filepath[-10:] == 'analog.brw':\n filename = filepath[:-10]\n analog = read_3brain_analog(filepath)\n for i, (s,n) in enumerate(zip(start, nsamples)):\n name = f\"{filename}_part_{i}_analog.npz\"\n print(f\"Saving {name}\")\n sampling_rate = glia.sampling_rate(filepath)\n np.savez(name, analog=analog[s:s+n],\n sampling_rate=sampling_rate)\n elif filepath[-4:] == \".bxr\":\n filename = filepath[:-4]\n # split spike-sorted data\n with h5py.File(filepath, 'r') as h5:\n # shared setup for the concatenated arrays\n sampling_rate = float(h5[\"3BRecInfo\"][\"3BRecVars\"][\"SamplingRate\"][0])\n channel_map = h5[\"3BRecInfo\"][\"3BMeaStreams\"][\"Raw\"][\"Chs\"][()]\n \n # map 3brain unit num\n # numbers typically from -4 to 9000\n # where negative numbers appear across multiple channels\n # and thus are presumably bad units...?\n # positive-numbered units appear on one channel\n unit_id_2_num = {}\n\n n_unit_nums = 0\n if \"SpikeUnits\" in h5[\"3BResults\"][\"3BChEvents\"]:\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeUnits'], 10000):\n n_unit_nums = max(n_unit_nums, chunk.max())\n \n unit_map = {}\n channel_unit_count = {}\n\n\n # operate on each of the concatenated arrays, one at a time\n for i, (s,n) in enumerate(zip(start, nsamples)):\n startTime = s / sampling_rate\n first_idx = None\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], 10000):\n valid_idxs = np.argwhere(h5[\"3BResults/3BChEvents/SpikeTimes\"] > s)\n if len(valid_idxs) > 0:\n first_idx = valid_idxs[0][0]\n break\n assert not first_idx is None\n print(f\"identified start idx of {first_idx}.\")\n\n # for simplicity, we just iterate again, could have faster implementation\n last_idx = len(h5['3BResults/3BChEvents/SpikeTimes'])\n chunk_size = 10000\n for j, chunk in enumerate(iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], chunk_size)):\n invalid_idxs = np.argwhere(chunk > s + n)\n if len(invalid_idxs) > 0:\n last_idx = invalid_idxs[0][0] + j*chunk_size\n break\n print(f\"identified stop idx of {last_idx}.\")\n \n spike_channel_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeChIDs\"][first_idx:last_idx]\n spike_unit_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeUnits\"][first_idx:last_idx]\n # poorly named; time is in units of 1/sampling_rate\n # aka sample number\n # subtract to adjust start time\n spike_times = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeTimes\"][first_idx:last_idx] - s\n \n\n \n csv_name = f'{filename}_part_{i}_spikes.csv'\n spikes = zip(spike_channel_ids, spike_unit_ids, spike_times)\n tot_spikes = spike_times.shape[0]\n print(f\"creating {csv_name} ...\")\n with open(csv_name, 'w', newline='') as csvfile:\n fieldnames = ['channel_i', 'channel_j', 'unit', \"spike_time\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for channel, unit_id, spike_time in tqdm(spikes,\n total=tot_spikes):\n c = channel_map[channel]\n # convert to tuple\n # account for 1-indexing\n c = (c[0]-1,c[1]-1)\n \n # count num units on channel\n # first check if we've seen this channel before\n if not c in channel_unit_count:\n # if not, initialize channel_unit_count for the channel\n channel_unit_count[c] = 1\n unit_num = 0\n # add unit\n unit_id_2_num[unit_id] = unit_num\n else:\n \n # then check if we've seen this unit before\n if not unit_id in unit_id_2_num:\n # if not, assign unit_num for this new unit\n unit_num = channel_unit_count[c]\n unit_id_2_num[unit_id] = unit_num\n channel_unit_count[c] += 1\n else:\n # otherwise, look it up\n unit_num = unit_id_2_num[unit_id]\n \n \n t = spike_time / sampling_rate\n writer.writerow({\"channel_i\": c[0],\n \"channel_j\": c[1],\n \"unit\": unit_num,\n \"spike_time\": t})\n \n np.save(f\"{filename}_channel_map.npy\", channel_map)",
"def extract_poses(self, labels):\n height, width = self.topdown_view.shape\n n_gridpoints_width, n_gridpoints_height = (\n width // self.dist - 1,\n height // self.dist - 1,\n )\n self.gridpoints = []\n for h in range(n_gridpoints_height):\n for w in range(n_gridpoints_width):\n point = (self.dist + h * self.dist, self.dist + w * self.dist)\n if self.valid_point(*point):\n self.gridpoints.append(point)\n\n # Find the closest point of the target class to each gridpoint\n poses = []\n self.cpis = []\n for point in self.gridpoints:\n closest_point_of_interest, label = self._bfs(point, labels)\n if closest_point_of_interest is None:\n continue\n\n poses.append((point, closest_point_of_interest, label))\n self.cpis.append(closest_point_of_interest)\n\n # Convert from topdown map coordinate system to that of the pathfinder\n startw, starty, starth = self._get_pathfinder_reference_point()\n for i, pose in enumerate(poses):\n pos, cpi, label = pose\n r1, c1 = pos\n r2, c2 = cpi\n new_pos = np.array(\n [\n startw + c1 * self.pixels_per_meter,\n starty,\n starth + r1 * self.pixels_per_meter,\n ]\n )\n new_cpi = np.array(\n [\n startw + c2 * self.pixels_per_meter,\n starty,\n starth + r2 * self.pixels_per_meter,\n ]\n )\n cam_normal = new_cpi - new_pos\n new_rot = self._compute_quat(cam_normal)\n poses[i] = (new_pos, new_rot, label)\n\n return poses",
"def process_folder(path):\n chans = get_channels(path)\n for chan in sorted(chans):\n ncs_fname = os.path.splitext(chans[chan])[0]\n plot_fname = 'spikes_{}_{}'.format(chan, ncs_fname)\n save_fname = os.path.join(OVERVIEW, plot_fname)\n spikes_overview(ncs_fname, save_fname)",
"def get_Sparrow_vols(vol_points_list,calibration_list,output_file,demagnified_pitch_size=6.25):\n # main HDF5 file\n try:\n print \"Creating new HDF5 file:\", output_file\n volumes = h5py.File(output_file,'w-')\n except:\n print \"Opening existing HDF5 file:\", output_file\n volumes = h5py.File(output_file,'r+')\n\n # main loop over points in volume and supersampling factors\n for i in xrange(len(vol_points_list)):\n \n # get volume points and create HDF5 group to store results for this loop\n vol_points = vol_points_list[i]\n vols_by_points = volumes.create_group('vol_points_'+str(i))\n\n # loop over calibration files for different supersampling factors\n for calibration_file in calibration_list:\n print \"Analyzing:\", calibration_file\n \n # create subgroup to write data to for this calibration file\n vols_by_sampling = vols_by_points.create_group('supersampling_factor_'+calibration_file.split('/')[-1].split('.')[0].split('_')[1])\n\n # get covariance operator\n Cov, raydb = get_Cov_from_calibration( calibration_file )\n\n # get point coordinates in discretized volume\n vol_coords = []\n vol_coords.append( get_voxel_coords(vol_points[0], raydb, pitch=demagnified_pitch_size))\n vol_coords.append( get_voxel_coords(vol_points[1], raydb, pitch=demagnified_pitch_size))\n print \"Volume points:\", vol_points\n print \"Volume coordinates:\",vol_coords\n\n # generate two psfs for vol_points and add them to get a volume containing both\n psf0 = get_psf_vol(vol_coords[0],Cov,raydb=raydb)\n psf1 = get_psf_vol(vol_coords[1],Cov,raydb=raydb)\n vol_vec = psf0 + psf1\n vol = np.reshape(vol_vec, Cov.vol_shape)\n dset = vols_by_sampling.create_dataset('Sparrow_volume', data=vol)\n volumes.close()\n return True",
"def set_data(self, waveforms, clusters=None, cluster_colors=None,\n clusters_unique=None, clusters_ordered=None,\n masks=None, geometrical_positions=None, spike_ids=None,\n spatial_arrangement=None, superposition=None,\n box_size=None, probe_scale=None, subselect=None):\n \n # select only a subsample of the spikes\n if subselect:\n nspk = waveforms.shape[0]\n if nspk > 0:\n indices = np.unique(np.random.randint(low=0, high=nspk, size=subselect))\n # waveforms = waveforms[indices,...]\n waveforms = np.take(waveforms, indices, axis=0)\n # spike_ids = spike_ids[indices,...]\n spike_ids = np.take(spike_ids, indices, axis=0)\n # clusters = clusters[indices,...]\n clusters = np.take(clusters, indices, axis=0)\n # masks = masks[indices,...]\n masks = np.take(masks, indices, axis=0)\n \n \n self.nspikes, self.nsamples, self.nchannels = waveforms.shape\n self.npoints = waveforms.size\n self.geometrical_positions = geometrical_positions\n self.spike_ids = spike_ids\n self.waveforms = waveforms\n \n # data organizer: reorder data according to clusters\n self.data_organizer = SpikeDataOrganizer(waveforms,\n clusters=clusters,\n cluster_colors=cluster_colors,\n clusters_unique=clusters_unique,\n clusters_ordered=clusters_ordered,\n masks=masks,\n nchannels=self.nchannels,\n spike_ids=spike_ids)\n \n # get reordered data\n self.waveforms_reordered = self.data_organizer.data_reordered\n self.nclusters = self.data_organizer.nclusters\n self.clusters = self.data_organizer.clusters\n self.masks = self.data_organizer.masks\n self.cluster_colors = self.data_organizer.cluster_colors\n self.clusters_unique = self.data_organizer.clusters_unique\n self.clusters_rel = self.data_organizer.clusters_rel\n self.clusters_depth = self.data_organizer.clusters_depth\n self.cluster_sizes = self.data_organizer.cluster_sizes\n self.cluster_sizes_dict = self.data_organizer.cluster_sizes_dict\n \n # prepare GPU data: waveform initial positions and colors\n data = self.prepare_waveform_data()\n \n # masks\n self.full_masks = np.repeat(self.masks.T.ravel(), self.nsamples)\n self.full_clusters = np.tile(np.repeat(self.clusters_rel, self.nsamples), self.nchannels)\n self.full_clusters_depth = np.tile(np.repeat(self.clusters_depth, self.nsamples), self.nchannels)\n self.full_channels = np.repeat(np.arange(self.nchannels, dtype=np.int32), self.nspikes * self.nsamples)\n \n # normalization in dataio instead\n self.normalized_data = data\n \n # position waveforms\n self.position_manager.set_info(self.nchannels, self.nclusters, \n geometrical_positions=self.geometrical_positions,\n spatial_arrangement=spatial_arrangement,\n superposition=superposition,\n box_size=box_size,\n probe_scale=probe_scale)\n \n # update the highlight manager\n self.highlight_manager.initialize()",
"def identify_peaks(xCar, xDate, xDir, xFilename, outDir, processedFileLoc, Engineering, threshold='.1',\n rthresh = '.7',\n xTimeThreshold='5.0', minElevated='2', xB='102', basePerc='50'):\n import csv, numpy\n import shutil\n from shapely.geometry import Point\n import pandas as pd\n import geopandas as gpd\n\n\n try:\n baseCalc = float(basePerc)\n xABThreshold = float(threshold)\n minElevated = float(minElevated)\n rMin = float(rthresh)\n xDistThreshold = 160.0 # find the maximum CH4 reading of observations within street segments of this grouping distance in meters\n xSDF = 4 # multiplier times standard deviation for floating baseline added to mean\n\n xB = int(xB)\n xTimeThreshold = float(xTimeThreshold)\n fn = xDir + xFilename # set processed csv file to read in\n fnOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".csv\"\n fnShape = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".shp\"\n fnLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".log\"\n pkLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \"_info.csv\"\n jsonOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \".geojson\"\n infOut = processedFileLoc + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n ### TEST THING\n fn = xDir + xFilename # set raw text file to read in\n filenames = nameFiles(outDir,processedFileLoc,xCar,xDate,True)\n fnOut = filenames['fnOut']\n fnShape = filenames['fnShape']\n fnLog = filenames['fnLog']\n pkLog = filenames['pkLog']\n jsonOut = filenames['jsonOut']\n infOut = filenames['infOut']\n\n print(f\"{outDir}Peaks_{xCar}_{xDate}_info.csv\")\n fLog = open(fnLog, 'w')\n shutil.copy(infOut, pkLog)\n\n # field column indices for various variables\n if Engineering == True:\n fDate = 0; fTime = 1; fEpochTime = 2\n fNanoSeconds = 3; fVelocity = 4; fU = 5\n fV = 6; fW = 7; fBCH4 = 10\n fBCH4 = 8; fBRSSI = 9; fTCH4 = 10\n TRSSI = 11;PRESS = 12; INLET = 13\n TEMP = 14; CH4 = 15;H20 = 16\n C2H6 = 17; R = 18; C2C1 = 19\n BATT = 20; POWER = 21; CURR = 22\n SOCPER = 23;fLat = 24; fLon = 25\n elif not Engineering:\n fDate = 0; fTime = 1; fEpochTime = 2\n fNanoSeconds = 3;fVelocity = 4; fU = 5\n fV = 6; fW = 7\n fBCH4 = 8; fBRSSI = 9\n fTCH4 = 10; TRSSI = 11; PRESS = 12\n INLET = 13; TEMP = 14; CH4 = 15\n H20 = 16;C2H6 = 17; R = 18; C2C1 = 19\n BATT = 20; POWER = 21; CURR = 22\n SOCPER = 23; fLat = 24;fLon = 25;\n fUavg = 33; fVavg = 34; fWavg = 35;\n fRavg = 36; fthetavg=37;\n fDist = 38; fOdometer = 39\n\n # read data in from text file and extract desired fields into a list, padding with 5 minute and hourly average\n x1, x2, x3, x4, x5, x6, x7, x8, x9, x10,x11,x12,x13,x14,x15,x16,x17,x18 = [[] for _ in range(18)]\n\n count = -1\n with open(fn, 'r') as f:\n t = csv.reader(f)\n for row in t:\n woo = row\n # print(count)\n if count < 0:\n count += 1\n continue\n elif count >= 0:\n datet = row[fDate].replace(\"-\", \"\") + row[fTime].replace(\":\", \"\")\n ## if not engineering\n epoch = float(row[fEpochTime] + \".\" + row[fNanoSeconds][0])\n datetime = row[fDate].replace(\"-\", \"\") + row[fTime].replace(\":\", \"\")\n x1.append(epoch); x2.append(datetime)\n if row[fLat] == '':\n x3.append('')\n elif row[fLat] != '':\n x3.append(float(row[fLat]))\n if row[fLon] == '':\n x4.append('')\n elif row[fLon] != '':\n x4.append(float(row[fLon]))\n\n if row[fUavg] == '':\n x14.append('')\n elif row[fUavg] != '':\n x14.append(float(row[fUavg]))\n if row[fVavg] == '':\n x15.append('')\n elif row[fVavg] != '':\n x15.append(float(row[fVavg]))\n if row[fWavg] == '':\n x16.append('')\n elif row[fWavg] != '':\n x16.append(float(row[fWavg]))\n\n if row[fthetavg] == '':\n x18.append('')\n elif row[fthetavg] != '':\n x18.append(float(row[fthetavg]))\n if row[fRavg] == '':\n x17.append('')\n elif row[fRavg] != '':\n x17.append(float(row[fRavg]))\n\n x5.append(float(row[fBCH4]))\n x6.append(float(row[fTCH4]))\n x7.append(0.0)\n x8.append(0.0)\n x9.append(row[fOdometer])\n x11.append(float(row[C2H6]))\n x12.append(float(row[C2C1]))\n x13.append(float(row[R]))\n count += 1\n print(f\"Number of observations processed:{count}\")\n\n # convert lists to numpy arrays\n aEpochTime = numpy.array(x1)\n aDateTime = numpy.array(x2)\n aLat = numpy.array(x3)\n aLon = numpy.array(x4)\n aCH4 = numpy.array(x5)\n aTCH4 = numpy.array(x6)\n aMean = numpy.array(x7)\n aMeanC2H6 = numpy.array(x7)\n aThreshold = numpy.array(x8)\n aOdom = numpy.array(x9)\n\n # adding ethane stuff\n aC2H6 = numpy.array(x11)\n aC2C1 = numpy.array(x12)\n aR = numpy.array(x13)\n aUavg = numpy.array(x14)\n aVavg = numpy.array(x15)\n aWavg = numpy.array(x16)\n aRavg = numpy.array(x17)\n aThavg = numpy.array(x18)\n\n\n xLatMean = numpy.mean(aLat)\n xLonMean = numpy.mean(aLon)\n #xCH4Mean = numpy.mean(aCH4)\n #xC2H6Mean = numpy.mean(aC2H6)\n #xC2C1Mean = numpy.mean(aC2C1)\n\n fLog.write(\"Day CH4_mean = \" + str(numpy.mean(aCH4)) +\n \", Day CH4 SD = \" + str(numpy.std(aCH4)) + \"\\n\")\n fLog.write(\"Day C2H6 Mean = \" + str(numpy.mean(aC2H6)) +\n \", Day C2H6 SD = \" + str(numpy.std(aC2H6)) + \"\\n\")\n fLog.write(\"Center lon/lat = \" + str(xLonMean) + \", \" + str(xLatMean) + \"\\n\")\n\n lstCH4_AB = []\n\n # generate list of the index for observations that were above the threshold\n for i in range(0, count - 2):\n if ((count - 2) > xB):\n topBound = min((i + xB), (count - 2))\n botBound = max((i - xB), 0)\n\n for t in range(min((i + xB), (count - 2)), i, -1):\n if aEpochTime[t] < (aEpochTime[i] + (xB / 2)):\n topBound = t\n break\n for b in range(max((i - xB), 0), i):\n if aEpochTime[b] > (aEpochTime[i] - (xB / 2)):\n botBound = b\n break\n\n xCH4Mean = numpy.percentile(aCH4[botBound:topBound], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[botBound:topBound], baseCalc)\n\n # xCH4SD = numpy.std(aCH4[botBound:topBound])\n else:\n xCH4Mean = numpy.percentile(aCH4[0:(count - 2)], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[0:(count - 2)], baseCalc)\n\n # xCH4SD = numpy.std(aCH4[0:(count-2)])\n xThreshold = xCH4Mean + (xCH4Mean * xABThreshold)\n xThreshold_c2h6 = xC2H6Mean + (xC2H6Mean * xABThreshold)\n\n if (aCH4[i] > xThreshold and aR[i]>rMin):\n #if (aCH4[i] > xThreshold):\n lstCH4_AB.append(i)\n aMean[i] = xCH4Mean\n aMeanC2H6[i] = xC2H6Mean\n aThreshold[i] = xThreshold\n\n # now group the above baseline threshold observations into groups based on distance threshold\n lstCH4_ABP = []\n xDistPeak = 0.0\n xCH4Peak = 0.0\n xTime = 0.0\n cntPeak = 0\n cnt = 0\n sID = \"\"\n sPeriod5Min = \"\"\n prevIndex = 0\n for i in lstCH4_AB:\n if (cnt == 0):\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n else:\n # calculate distance between points\n xDist = haversine(xLat1, xLon1, aLat[i], aLon[i])\n xDistPeak += xDist\n xCH4Peak += (xDist * (aCH4[i] - aMean[i]))\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n if (sID == \"\"):\n xTime = aEpochTime[i]\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n if ((aEpochTime[i] - aEpochTime[prevIndex]) > xTimeThreshold): # initial start of a observed peak\n cntPeak += 1\n xTime = aEpochTime[i]\n xDistPeak = 0.0\n xCH4Peak = 0.0\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n # print str(i) +\", \" + str(xDist) + \",\" + str(cntPeak) +\",\" + str(xDistPeak)\n lstCH4_ABP.append(\n [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i], aThreshold[i],\n xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n aUavg[i],aVavg[i],aWavg[i],aRavg[i],aThavg[i]])\n cnt += 1\n prevIndex = i\n\n # Finding peak_id larger than 160.0 m\n tmpsidlist = []\n for r in lstCH4_ABP:\n if (float(r[9]) > 160.0) and (r[0] not in tmpsidlist):\n tmpsidlist.append(r[0])\n cntPeak -= len(tmpsidlist)\n\n fLog.write(\"Number of peaks found: \" + str(cntPeak) + \"\\n\")\n print(f\"{xCar} \\t {xDate} \\t {xFilename} \\t {count} \\t {len(lstCH4_ABP)}\")\n\n # write out the observed peaks to a csv to be read into a GIS\n fOut = open(fnOut, 'w')\n # s = \"PEAK_NUM,EPOCHSTART,EPOCH,DATETIME,CH4,LON,LAT,CH4_BASELINE,CH4_THRESHOLD,PEAK_DIST_M,PEAK_CH4,TCH4,PERIOD5MIN\\n\"\n s = \"OP_NUM,OP_EPOCHSTART,OB_EPOCH,OB_DATETIME,OB_CH4,OB_LON,OB_LAT,OB_CH4_BASELINE,\" \\\n \"OB_CH4_THRESHOLD,OP_PEAK_DIST_M,OP_PEAK_CH4,OB_TCH4,OB_C2H6,\" \\\n \"OB_C2C1,OB_R,OB_C2H6_BASELINE,OB_PERIOD5MIN,ODOMETER,OB_U_AVG,OB_V_AVG,OB_W_AVG,\" \\\n \"OB_R_AVG,OB_THETA_AVG\\n\"\n fOut.write(s)\n\n truecount = 0\n for r in lstCH4_ABP:\n if r[0] not in tmpsidlist:\n s = ''\n for rr in r:\n s += str(rr) + ','\n s = s[:-1]\n s += '\\n'\n fOut.write(s)\n truecount += 1\n fOut.close()\n fLog.close()\n\n openFile = pd.read_csv(fnOut)\n if openFile.shape[0] != 0:\n pkDistDf = openFile.copy().groupby('OP_NUM', as_index=False).apply(\n lambda x: max(x.ODOMETER) - min(x.ODOMETER))\n pkDistDf.columns = ['OP_NUM', 'OP_DISTANCE']\n openFile = pd.merge(openFile.copy(), pkDistDf)\n tempCount = openFile.groupby('OP_NUM', as_index=False).OP_EPOCHSTART.count().rename(\n columns={'OP_EPOCHSTART': 'Frequency'})\n tempCount = tempCount.loc[tempCount.Frequency >= minElevated, :]\n if tempCount.shape[0] == 0:\n print(f\"No Observed Peaks with enough Elevated Readings Found in the file: {xFilename}\")\n tempCount.to_csv(fnOut) ## added to deal with issue where it wasn't being filtered out\n elif tempCount.shape[0] != 0:\n oFile = pd.merge(openFile, tempCount, on=['OP_NUM'])\n openFile = oFile.copy()\n del (oFile)\n openFile[\"minElevated\"] = openFile.apply(lambda x: int(minElevated), axis=1)\n openFile['OB_CH4_AB'] = openFile.loc[:, 'OB_CH4'].sub(openFile.loc[:, 'OB_CH4_BASELINE'], axis=0)\n openFile['OB_C2H6_AB'] = openFile.loc[:, 'OB_C2H6'].sub(openFile.loc[:, 'OB_C2H6_BASELINE'],axis=0)\n openFile.to_csv(fnOut, index=False)\n\n\n fileWt = weighted_loc(openFile, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'}).reset_index(drop=True)\n geometry_temp = [Point(lon, lat) for lon, lat in zip(fileWt['pk_LON'], fileWt['pk_LAT'])]\n crs = 'EPSG:4326'\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' WITH BUFFER DISTANCE\n gdf_buff = gpd.GeoDataFrame(fileWt, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n\n ##maybe this is the issue?\n #gdf_buff = gdf_buff.to_crs(epsg=32610)\n #gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(30)\n try:\n gdf_buff.to_file(jsonOut, driver=\"GeoJSON\")\n #gdf_buff.to_file('testthing.geojson', driver=\"GeoJSON\")\n except:\n print(\"Error Saving JSON File\")\n elif openFile.shape[0] == 0:\n print(f\"No Observed Peaks Found in the file:{xFilename}\")\n except ValueError:\n print(\"Error in Identify Peaks\")\n return False",
"def identify_peaks(xCar, xDate, xDir, xFilename, outDir, processedFileLoc, Engineering, threshold='.1',\n rthresh = '.7',\n xTimeThreshold='5.0', minElevated='2', xB='102', basePerc='50'):\n import csv, numpy\n import shutil\n from shapely.geometry import Point\n import pandas as pd\n import geopandas as gpd\n\n\n try:\n baseCalc = float(basePerc)\n xABThreshold = float(threshold)\n minElevated = float(minElevated)\n rMin = float(rthresh)\n xDistThreshold = 160.0 # find the maximum CH4 reading of observations within street segments of this grouping distance in meters\n xSDF = 4 # multiplier times standard deviation for floating baseline added to mean\n\n xB = int(xB)\n xTimeThreshold = float(xTimeThreshold)\n fn = xDir + xFilename # set processed csv file to read in\n fnOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".csv\"\n fnShape = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".shp\"\n fnLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".log\"\n pkLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \"_info.csv\"\n jsonOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \".geojson\"\n infOut = processedFileLoc + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n ### TEST THING\n fn = xDir + xFilename # set raw text file to read in\n filenames = nameFiles(outDir,processedFileLoc,xCar,xDate,True)\n fnOut = filenames['fnOut']\n fnShape = filenames['fnShape']\n fnLog = filenames['fnLog']\n pkLog = filenames['pkLog']\n jsonOut = filenames['jsonOut']\n infOut = filenames['infOut']\n\n print(f\"{outDir}Peaks_{xCar}_{xDate}_info.csv\")\n fLog = open(fnLog, 'w')\n shutil.copy(infOut, pkLog)\n\n # field column indices for various variables\n if Engineering == True:\n fDate = 0; fTime = 1; fEpochTime = 2\n fNanoSeconds = 3; fVelocity = 4; fU = 5\n fV = 6; fW = 7; fBCH4 = 10\n fBCH4 = 8; fBRSSI = 9; fTCH4 = 10\n TRSSI = 11;PRESS = 12; INLET = 13\n TEMP = 14; CH4 = 15;H20 = 16\n C2H6 = 17; R = 18; C2C1 = 19\n BATT = 20; POWER = 21; CURR = 22\n SOCPER = 23;fLat = 24; fLon = 25\n elif not Engineering:\n fDate = 0; fTime = 1; fEpochTime = 2\n fNanoSeconds = 3;fVelocity = 4; fU = 5\n fV = 6; fW = 7\n fBCH4 = 8; fBRSSI = 9\n fTCH4 = 10; TRSSI = 11; PRESS = 12\n INLET = 13; TEMP = 14; CH4 = 15\n H20 = 16;C2H6 = 17; R = 18; C2C1 = 19\n BATT = 20; POWER = 21; CURR = 22\n SOCPER = 23; fLat = 24;fLon = 25;\n fUavg = 33; fVavg = 34; fWavg = 35;\n fRavg = 36; fthetavg=37;\n fDist = 38; fOdometer = 39\n\n # read data in from text file and extract desired fields into a list, padding with 5 minute and hourly average\n x1, x2, x3, x4, x5, x6, x7, x8, x9, x10,x11,x12,x13,x14,x15,x16,x17,x18 = [[] for _ in range(18)]\n\n count = -1\n with open(fn, 'r') as f:\n t = csv.reader(f)\n for row in t:\n woo = row\n # print(count)\n if count < 0:\n count += 1\n continue\n elif count >= 0:\n datet = row[fDate].replace(\"-\", \"\") + row[fTime].replace(\":\", \"\")\n ## if not engineering\n epoch = float(row[fEpochTime] + \".\" + row[fNanoSeconds][0])\n datetime = row[fDate].replace(\"-\", \"\") + row[fTime].replace(\":\", \"\")\n x1.append(epoch); x2.append(datetime)\n if row[fLat] == '':\n x3.append('')\n elif row[fLat] != '':\n x3.append(float(row[fLat]))\n if row[fLon] == '':\n x4.append('')\n elif row[fLon] != '':\n x4.append(float(row[fLon]))\n\n if row[fUavg] == '':\n x14.append('')\n elif row[fUavg] != '':\n x14.append(float(row[fUavg]))\n if row[fVavg] == '':\n x15.append('')\n elif row[fVavg] != '':\n x15.append(float(row[fVavg]))\n if row[fWavg] == '':\n x16.append('')\n elif row[fWavg] != '':\n x16.append(float(row[fWavg]))\n\n if row[fthetavg] == '':\n x18.append('')\n elif row[fthetavg] != '':\n x18.append(float(row[fthetavg]))\n if row[fRavg] == '':\n x17.append('')\n elif row[fRavg] != '':\n x17.append(float(row[fRavg]))\n\n x5.append(float(row[fBCH4]))\n x6.append(float(row[fTCH4]))\n x7.append(0.0)\n x8.append(0.0)\n x9.append(row[fOdometer])\n x11.append(float(row[C2H6]))\n x12.append(float(row[C2C1]))\n x13.append(float(row[R]))\n count += 1\n print(f\"Number of observations processed:{count}\")\n\n # convert lists to numpy arrays\n aEpochTime = numpy.array(x1)\n aDateTime = numpy.array(x2)\n aLat = numpy.array(x3)\n aLon = numpy.array(x4)\n aCH4 = numpy.array(x5)\n aTCH4 = numpy.array(x6)\n aMean = numpy.array(x7)\n aMeanC2H6 = numpy.array(x7)\n\n aMeanCH4_true = numpy.array(x7)\n aMedianCH4 = numpy.array(x7)\n aMaxCH4 = numpy.array(x7)\n aMinCH4 = numpy.array(x7)\n\n aSTDCH4 = numpy.array(x7)\n\n aThreshold = numpy.array(x8)\n aOdom = numpy.array(x9)\n\n # adding ethane stuff\n aC2H6 = numpy.array(x11)\n aC2C1 = numpy.array(x12)\n aR = numpy.array(x13)\n aUavg = numpy.array(x14)\n aVavg = numpy.array(x15)\n aWavg = numpy.array(x16)\n aRavg = numpy.array(x17)\n aThavg = numpy.array(x18)\n\n\n xLatMean = numpy.mean(aLat)\n xLonMean = numpy.mean(aLon)\n #xCH4Mean = numpy.mean(aCH4)\n #xC2H6Mean = numpy.mean(aC2H6)\n #xC2C1Mean = numpy.mean(aC2C1)\n\n fLog.write(\"Day CH4_mean = \" + str(numpy.mean(aCH4)) +\n \", Day CH4 SD = \" + str(numpy.std(aCH4)) + \"\\n\")\n fLog.write(\"Day C2H6 Mean = \" + str(numpy.mean(aC2H6)) +\n \", Day C2H6 SD = \" + str(numpy.std(aC2H6)) + \"\\n\")\n fLog.write(\"Center lon/lat = \" + str(xLonMean) + \", \" + str(xLatMean) + \"\\n\")\n\n lstCH4_AB = []\n\n # generate list of the index for observations that were above the threshold\n for i in range(0, count - 2):\n if ((count - 2) > xB):\n topBound = min((i + xB), (count - 2))\n botBound = max((i - xB), 0)\n\n for t in range(min((i + xB), (count - 2)), i, -1):\n if aEpochTime[t] < (aEpochTime[i] + (xB / 2)):\n topBound = t\n break\n for b in range(max((i - xB), 0), i):\n if aEpochTime[b] > (aEpochTime[i] - (xB / 2)):\n botBound = b\n break\n\n xCH4Mean = numpy.percentile(aCH4[botBound:topBound], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[botBound:topBound], baseCalc)\n xMeanCH4_true = numpy.mean(aCH4[botBound:topBound])\n xSTDCH4 = numpy.std(aCH4[botBound:topBound])\n xMaxCH4 = numpy.max(aCH4[botBound:topBound])\n xMinCH4 = numpy.min(aCH4[botBound:topBound])\n xMedianCH4 = numpy.percentile(aCH4[botBound:topBound], 50)\n\n # xCH4SD = numpy.std(aCH4[botBound:topBound])\n else:\n xCH4Mean = numpy.percentile(aCH4[0:(count - 2)], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[0:(count - 2)], baseCalc)\n xMeanCH4_true = numpy.mean(aCH4[0:(count - 2)])\n xSTDCH4 = numpy.std(aCH4[0:(count - 2)])\n xMaxCH4 = numpy.max(aCH4[0:(count - 2)])\n xMinCH4 = numpy.min(aCH4[0:(count - 2)])\n xMedianCH4 = numpy.percentile(aCH4[0:(count - 2)], 50)\n\n\n # xCH4SD = numpy.std(aCH4[0:(count-2)])\n xThreshold = xCH4Mean + (xCH4Mean * xABThreshold)\n xThreshold_c2h6 = xC2H6Mean + (xC2H6Mean * xABThreshold)\n\n if (aCH4[i] > xThreshold and aR[i]>rMin):\n #if (aCH4[i] > xThreshold):\n lstCH4_AB.append(i)\n aMean[i] = xCH4Mean\n aMeanC2H6[i] = xC2H6Mean\n aThreshold[i] = xThreshold\n aMeanCH4_true[i] = xMeanCH4_true\n aSTDCH4[i] = xSTDCH4\n aMaxCH4[i] = xMaxCH4\n aMinCH4[i] = xMinCH4\n aMedianCH4[i] = xMedianCH4\n # now group the above baseline threshold observations into groups based on distance threshold\n lstCH4_ABP = []\n xDistPeak = 0.0\n xCH4Peak = 0.0\n xTime = 0.0\n cntPeak = 0\n cnt = 0\n sID = \"\"\n sPeriod5Min = \"\"\n prevIndex = 0\n for i in lstCH4_AB:\n if (cnt == 0):\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n else:\n # calculate distance between points\n xDist = haversine(xLat1, xLon1, aLat[i], aLon[i])\n xDistPeak += xDist\n xCH4Peak += (xDist * (aCH4[i] - aMean[i]))\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n if (sID == \"\"):\n xTime = aEpochTime[i]\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n if ((aEpochTime[i] - aEpochTime[prevIndex]) > xTimeThreshold): # initial start of a observed peak\n cntPeak += 1\n xTime = aEpochTime[i]\n xDistPeak = 0.0\n xCH4Peak = 0.0\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n # print str(i) +\", \" + str(xDist) + \",\" + str(cntPeak) +\",\" + str(xDistPeak)\n #aMeanCH4_true[i], aSTDCH4[i]\n\n lstCH4_ABP.append(\n [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i],aMeanCH4_true[i],aSTDCH4[i],\n aMaxCH4[i],aMinCH4[i],aMedianCH4[i],aThreshold[i],\n xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n aUavg[i],aVavg[i],aWavg[i],aRavg[i],aThavg[i]])\n cnt += 1\n prevIndex = i\n\n # Finding peak_id larger than 160.0 m\n tmpsidlist = []\n for r in lstCH4_ABP:\n if (float(r[9]) > 160.0) and (r[0] not in tmpsidlist):\n tmpsidlist.append(r[0])\n cntPeak -= len(tmpsidlist)\n\n fLog.write(\"Number of peaks found: \" + str(cntPeak) + \"\\n\")\n print(f\"{xCar} \\t {xDate} \\t {xFilename} \\t {count} \\t {len(lstCH4_ABP)}\")\n\n # write out the observed peaks to a csv to be read into a GIS\n fOut = open(fnOut, 'w')\n # s = \"PEAK_NUM,EPOCHSTART,EPOCH,DATETIME,CH4,LON,LAT,CH4_BASELINE,CH4_THRESHOLD,PEAK_DIST_M,PEAK_CH4,TCH4,PERIOD5MIN\\n\"\n s = \"OP_NUM,OP_EPOCHSTART,OB_EPOCH,OB_DATETIME,OB_CH4,OB_LON,OB_LAT,OB_CH4_BASELINE,OB_CH4_MEAN,OB_CH4_STD,OB_CH4_MAX,OB_CH4_MIN,OB_CH4_MED,\" \\\n \"OB_CH4_THRESHOLD,OP_PEAK_DIST_M,OP_PEAK_CH4,OB_TCH4,OB_C2H6,\" \\\n \"OB_C2C1,OB_R,OB_C2H6_BASELINE,OB_PERIOD5MIN,ODOMETER,OB_U_AVG,OB_V_AVG,OB_W_AVG,\" \\\n \"OB_R_AVG,OB_THETA_AVG\\n\"\n fOut.write(s)\n\n truecount = 0\n for r in lstCH4_ABP:\n if r[0] not in tmpsidlist:\n s = ''\n for rr in r:\n s += str(rr) + ','\n s = s[:-1]\n s += '\\n'\n fOut.write(s)\n truecount += 1\n fOut.close()\n fLog.close()\n\n openFile = pd.read_csv(fnOut)\n if openFile.shape[0] != 0:\n pkDistDf = openFile.copy().groupby('OP_NUM', as_index=False).apply(\n lambda x: max(x.ODOMETER) - min(x.ODOMETER))\n pkDistDf.columns = ['OP_NUM', 'OP_DISTANCE']\n openFile = pd.merge(openFile.copy(), pkDistDf)\n tempCount = openFile.groupby('OP_NUM', as_index=False).OP_EPOCHSTART.count().rename(\n columns={'OP_EPOCHSTART': 'Frequency'})\n tempCount = tempCount.loc[tempCount.Frequency >= minElevated, :]\n if tempCount.shape[0] == 0:\n print(f\"No Observed Peaks with enough Elevated Readings Found in the file: {xFilename}\")\n tempCount.to_csv(fnOut) ## added to deal with issue where it wasn't being filtered out\n elif tempCount.shape[0] != 0:\n oFile = pd.merge(openFile, tempCount, on=['OP_NUM'])\n openFile = oFile.copy()\n del (oFile)\n openFile[\"minElevated\"] = openFile.apply(lambda x: int(minElevated), axis=1)\n openFile['OB_CH4_AB'] = openFile.loc[:, 'OB_CH4'].sub(openFile.loc[:, 'OB_CH4_BASELINE'], axis=0)\n openFile['OB_C2H6_AB'] = openFile.loc[:, 'OB_C2H6'].sub(openFile.loc[:, 'OB_C2H6_BASELINE'],axis=0)\n openFile.to_csv(fnOut, index=False)\n\n\n fileWt = weighted_loc(openFile, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'}).reset_index(drop=True)\n geometry_temp = [Point(lon, lat) for lon, lat in zip(fileWt['pk_LON'], fileWt['pk_LAT'])]\n crs = 'EPSG:4326'\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' WITH BUFFER DISTANCE\n gdf_buff = gpd.GeoDataFrame(fileWt, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n\n ##maybe this is the issue?\n #gdf_buff = gdf_buff.to_crs(epsg=32610)\n #gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(30)\n try:\n gdf_buff.to_file(jsonOut, driver=\"GeoJSON\")\n #gdf_buff.to_file('testthing.geojson', driver=\"GeoJSON\")\n except:\n print(\"Error Saving JSON File\")\n elif openFile.shape[0] == 0:\n print(f\"No Observed Peaks Found in the file:{xFilename}\")\n except ValueError:\n print(\"Error in Identify Peaks\")\n return False",
"def _moving_target_focus(path, size, cutout_fles, verbose=False):\n \n cutout_table_list = list()\n \n tck_tuple, u = splprep([path[\"position\"].ra, path[\"position\"].dec], u=path[\"time\"].jd, s=0)\n \n for fle in cutout_fles:\n if verbose:\n print(fle)\n \n # Get the stuff we need from the cutout file\n hdu = fits.open(fle)\n cutout_table = Table(hdu[1].data)\n cutout_wcs = WCS(hdu[2].header)\n hdu.close()\n \n \n path[\"x\"], path[\"y\"] = cutout_wcs.world_to_pixel(path[\"position\"])\n # This line might need to be refined\n rel_pts = ((path[\"x\"] - size[0]/2 >= 0) & (path[\"x\"] + size[0]/2 < cutout_wcs.array_shape[1]) & \n (path[\"y\"] - size[1]/2 >= 0) & (path[\"y\"] + size[1]/2 < cutout_wcs.array_shape[0]))\n \n if sum(rel_pts) == 0:\n continue\n \n cutout_table[\"time_jd\"] = cutout_table[\"TIME\"] + 2457000 # TESS specific code\n cutout_table = cutout_table[(cutout_table[\"time_jd\"] >= np.min(path[\"time\"][rel_pts].jd)) & \n (cutout_table[\"time_jd\"] <= np.max(path[\"time\"][rel_pts].jd))]\n \n cutout_table[\"positions\"] = SkyCoord(*splev(cutout_table[\"time_jd\"], tck_tuple), unit=\"deg\")\n cutout_table[\"x\"], cutout_table[\"y\"] = cutout_wcs.world_to_pixel(cutout_table[\"positions\"])\n cutout_table[\"bounds\"] = _get_bounds(cutout_table[\"x\"], cutout_table[\"y\"], size)\n \n \n cutout_table[\"TGT_X\"] = cutout_table[\"x\"] - cutout_table[\"bounds\"][:, 0, 0]\n cutout_table[\"TGT_Y\"] = cutout_table[\"y\"] - cutout_table[\"bounds\"][:, 1, 0]\n \n cutout_table[\"TGT_RA\"] = cutout_table[\"positions\"].ra.value\n cutout_table[\"TGT_DEC\"] = cutout_table[\"positions\"].dec.value\n\n # This is y vs x beacuse of the way the pixels are stored by fits\n cutout_table[\"bounds\"] = [(slice(*y), slice(*x)) for x, y in cutout_table[\"bounds\"]]\n \n cutout_table[\"RAW_CNTS\"] = [x[\"RAW_CNTS\"][tuple(x[\"bounds\"])] for x in cutout_table]\n cutout_table[\"FLUX\"] = [x[\"FLUX\"][tuple(x[\"bounds\"])] for x in cutout_table]\n cutout_table[\"FLUX_ERR\"] = [x[\"FLUX_ERR\"][tuple(x[\"bounds\"])] for x in cutout_table]\n cutout_table[\"FLUX_BKG\"] = [x[\"FLUX_BKG\"][tuple(x[\"bounds\"])] for x in cutout_table]\n cutout_table[\"FLUX_BKG_ERR\"] = [x[\"FLUX_BKG_ERR\"][tuple(x[\"bounds\"])] for x in cutout_table]\n \n cutout_table.remove_columns(['time_jd', 'bounds', 'x', 'y', \"positions\"])\n cutout_table_list.append(cutout_table)\n \n cutout_table = vstack(cutout_table_list)\n cutout_table.sort(\"TIME\")\n \n return cutout_table",
"def __init__(self, spikes, position, size=(100, 100), x_range=(-100, 100), y_range=(-100, 100)):\n self.ratemap = np.empty(shape=size)\n self.ratemap[:] = np.nan\n self.size = size\n self.x_range = x_range\n self.y_range = y_range\n self.x_pos_bins = np.linspace(x_range[0], x_range[1], num=size[0]+1)\n self.y_pos_bins = np.linspace(y_range[0], y_range[1], num=size[1]+1)\n self.spikes = spikes\n self.position = position\n self.last_t_end = 0\n self.last_t_init = 0",
"def get_kpoints(self,ifwrite='yes'):\n a11 = float(self.lat[2].split()[0])\n a12 = float(self.lat[2].split()[1])\n a13 = float(self.lat[2].split()[2])\n a21 = float(self.lat[3].split()[0])\n a22 = float(self.lat[3].split()[1])\n a23 = float(self.lat[3].split()[2])\n a31 = float(self.lat[4].split()[0])\n a32 = float(self.lat[4].split()[1])\n a33 = float(self.lat[4].split()[2])\n \n x0 = [a11, a12, a13]\n x1 = [a21, a22, a23]\n x2 = [a31, a32, a33]\n \n self.natom = sum(list(map(int,self.lat[6].split())))\n # Number of atoms in POSCAR/CONTCAR\n \n l0 = np.linalg.norm(x0)\n l1 = np.linalg.norm(x1)\n l2 = np.linalg.norm(x2)\n\n self.cell_norm = [l0, l1, l2]\n \n N = (l0*l1*l2*self.kppra/self.natom)**(1.0/3.0)\n \n k0 = int(N/l0)\n k1 = int(N/l1)\n k2 = int(N/l2)\n\n klist = [k0,k1,k2]\n flag = 0\n kn = klist[:]\n\n if len(set(klist)) == 1:\n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n kn = [v+1 for v in kn]\n elif len(set(klist)) == 3:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 3:\n kn[klist.index(sorted(klist)[flag])] += 1\n flag += 1\n else:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 2:\n tmp = sorted(set(klist))[flag]\n tmp_ind = []\n for i in range(3):\n if klist[i] == tmp:\n tmp_ind.append(i)\n kn = [kn[i]+1 if i in tmp_ind else kn[i] for i in range(3)]\n flag += 1\n\n self.kps = kn\n \n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n print(\"===== WARNING =====\")\n print(\"K-points generate method may not be appropriate!\")\n print(\"Check source code!!!!\")\n print(\"===================\")\n exit()\n\n #if ifwrite == 'yes':\n # self.write_output()",
"def decompedToSpikes1DMassData(dataFile='movingPointMassData/testPointMassDataDecmp000.pkl', saveName='movingPointMassData/testPointMassDataDecmpSpikes000.pkl'):\n\n # I need to set the mechanism of spike generation! We could do it in a number of ways. For example by time-rescaling\n # For time-rescaling I am relying on the algorithms presented by Brown et al. \"The TimeRescaling\n # Theorem and Its Application to Neural Spike Train Analysis\" (2001) and their simple algorithm to make spikes. NOTE,\n # this is meant to handle making data look like it is generated from Poisson process, so there is a measure of\n # stochasticity involved here. Use another function if this is not wanted. Alternatively I have just linearly make\n # spikes based on inegrating each analog signal.\n #\n # I need to fill in the data here below, (Hardcoding) in order to determing what type of thing I am doing.\n #\n # Fill in spikeGenType: tells us which spike generation optiont to call. The options are \"linearIntegral\" or\n # \"timeRescale\"\n spikeGenType = \"linearIntegral\"\n analgSingnalScaling = 20.\n # Fill in the\n\n # Load the data back (this is the decomposed version of the 1D moving mass data)\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n segmentedTrialsList = dataOut[0] # list of numpy.arrays(time-steps X numb. decomps) of the decomposed 1D signal\n gCenters = dataOut[1] # The centers of the Gaussaians\n b = dataOut[2]\n origFileName = dataOut[3]\n\n # I need to load the original filename to get the value of 'dt'\n inputDataFile = open(origFileName, \"rb\")\n origDataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n dt = origDataOut[7]\n\n # Now make spikes\n segmentedSpikesList = []\n if spikeGenType==\"linearIntegral\":\n for i in range(len(segmentedTrialsList)):\n segmentedSpikesList.append(analogToSpikes.genSpikesLinearly(segmentedTrialsList[i], dt=dt, scaling=analgSingnalScaling))\n\n # Save\n outputList = [segmentedSpikesList, dataFile, spikeGenType, analgSingnalScaling] # We also want to store the location of the originating decomped data\n outputFile = open(saveName, \"wb\")\n pickle.dump(outputList, outputFile)\n outputFile.close()",
"def specpolfinalstokes(infile_list,polcal='polcal.txt',logfile='salt.log',debug=False):\n\n patternlist = open(datadir+'wppaterns.txt','r').readlines()\n patternpairs = dict(); patternstokes = dict()\n for p in patternlist:\n if p.split()[0] == '#': continue\n patternpairs[p.split()[0]]=(len(p.split())-3)/2\n patternstokes[p.split()[0]]=int(p.split()[1])\n wav_l,heff_l,hpa_l,qeff_l = np.loadtxt(datadir+polcal,dtype=float,unpack=True)\n calversion = open(datadir+polcal, 'r').readlines()[1][2:].rstrip()\n\n with logging(logfile, debug) as log:\n \n # organize data using names\n files = len(infile_list)\n allrawlist = []\n for i in range(files):\n object,config,wvplt,count = os.path.basename(infile_list[i]).split('.')[0].rsplit('_',4)\n if (config[0]!='c')|(wvplt[0]!='h')|(not count.isdigit()):\n log.message('File '+infile_list[i]+' is not a raw stokes file.' , with_header=False) \n continue\n allrawlist.append([i,object,config,wvplt,count])\n configlist = sorted(list(set(ele[2] for ele in allrawlist))) # unique configs\n\n # correct raw stokes for track (TBS)\n\n # do one config at a time, since different configs may have different number of wavelengths\n for conf in configlist:\n log.message(\"\\nConfiguration: %s\" % conf, with_header=False) \n rawlist = [entry for entry in allrawlist if entry[2]==conf]\n for col in (4,3,1,2): rawlist = sorted(rawlist,key=operator.itemgetter(col)) \n rawstokes = len(rawlist)\n cols = pyfits.open(infile_list[rawlist[0][0]])['SCI'].data.shape[-1]\n stokes_jsw = np.zeros((rawstokes,2,cols)); \n var_jsw = np.zeros_like(stokes_jsw); bpm_jsw = np.zeros_like(stokes_jsw).astype(int)\n wav_jw = np.zeros((rawstokes,cols))\n comblist = []\n # get data\n for j in range(rawstokes):\n i,object,config,wvplt,count = rawlist[j]\n if j==0:\n lampid = pyfits.getheader(infile_list[i],0)['LAMPID'].strip().upper()\n telpa = float(pyfits.getheader(infile_list[i],0)['TELPA'])\n if lampid==\"NONE\":\n pacaltype = \"Equatorial\"\n hpa_l -= (telpa % 180)\n else:\n pacaltype =\"Instrumental\"\n calinfo = (pacaltype+' '+calversion)\n log.message(' Calibration: '+calinfo, with_header=False) \n \n wppat = pyfits.getheader(infile_list[i],0)['WPPATERN']\n wav0 = pyfits.getheader(infile_list[i],'SCI')['CRVAL1']\n dwav = pyfits.getheader(infile_list[i],'SCI')['CDELT1']\n stokes_jsw[j] = pyfits.open(infile_list[i])['SCI'].data.reshape((2,-1))\n var_jsw[j] = pyfits.open(infile_list[i])['VAR'].data.reshape((2,-1))\n bpm_jsw[j] = pyfits.open(infile_list[i])['BPM'].data.reshape((2,-1))\n wav_jw[j] = np.mgrid[wav0:(wav0+cols*dwav):dwav]\n if int(count)==1:\n comblist.append((j,object,config,wvplt,count,wppat))\n else:\n comblist[-1] = (j,object,config,wvplt,count,wppat)\n\n # combine multiple instances (count > 1)\n combstokes = len(comblist)\n stokes_ksw = np.zeros((combstokes,2,cols)); \n var_ksw = np.zeros_like(stokes_ksw)\n bpm_ksw = np.zeros_like(stokes_ksw).astype(int)\n wav_kw = np.zeros((combstokes,cols))\n chisqstokes_kw = np.zeros_like(wav_kw)\n obslist = []\n obsobject = ''\n obsconfig = ''\n chisqlist = [[]]\n for k in range(combstokes):\n j,object,config,wvplt,count,wppat = comblist[k]\n stokes_ksw[k] = stokes_jsw[j-int(count)+1:j+1].sum(axis=0)\n var_ksw[k] = var_jsw[j-int(count)+1:j+1].sum(axis=0) \n bpm_ksw[k] = (bpm_jsw[j-int(count)+1:j+1].sum(axis=0) > 0).astype(int)\n wav_kw[k] = wav_jw[j]\n\n # compute chisq/dof for multiple instances\n if int(count) > 1:\n combstokes_w = np.zeros(cols)\n bok = (bpm_ksw[k,1] == 0) \n combstokes_w[bok] = stokes_ksw[k,1,bok]/stokes_ksw[k,0,bok]\n for jj in range(j-int(count)+1,j+1):\n stokes_w = np.zeros(cols); errstokes_w = np.zeros_like(stokes_w)\n stokes_w[bok] = stokes_jsw[jj,1,bok]/stokes_jsw[jj,0,bok]\n errstokes_w[bok] = np.sqrt(var_jsw[jj,1,bok]/(stokes_jsw[jj,0,bok])**2)\n chisqstokes_kw[k,bok] += ((stokes_w[bok]-combstokes_w[bok])/errstokes_w[bok])**2\n chisqstokes_kw[k] /= int(count)-1\n chisqstokes = chisqstokes_kw[k].sum()/bok.sum()\n chisqlist[-1].append(chisqstokes)\n log.message(\" Chisq/dof Filter Pair %s: %7.2f\" % (wvplt,chisqstokes), with_header=False)\n if ((object != obsobject) | (config != obsconfig)):\n obslist.append([k,object,config,wppat,1])\n chisqlist.append([])\n obsobject = object; obsconfig = config\n else:\n obslist[-1][4] +=1\n \n # for each obs combine stokes, apply efficiency and PA calibration as appropriate for pattern, and save\n obss = len(obslist)\n for obs in range(obss):\n k,object,config,wppat,pairs = obslist[obs]\n obsname = object+\"_\"+config\n log.message(\"\\n Observation: %s\" % obsname, with_header=False)\n# print k,object,config,wppat,pairs\n finstokes = patternstokes[wppat]\n if pairs != patternpairs[wppat]:\n log.message(' Not a complete pattern, skipping observation', with_header=False) \n continue\n stokes_fw = np.zeros((finstokes,cols))\n var_fw = np.zeros_like(stokes_fw)\n ok_fw = bpm_ksw[k:k+pairs,:].sum(axis=0) == 0\n ok_w = ok_fw.all(axis=0)\n bpm_fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0)\n stokes_fw[0] = stokes_ksw[k:k+pairs,0].sum(axis=0)/pairs\n var_fw[0] = var_ksw[k:k+pairs,0].sum(axis=0)/pairs**2 \n\n if wppat.count('Linear'):\n var_fw = np.vstack((var_fw,np.zeros(cols))) # add QU covariance\n if wppat=='Linear':\n stokes_fw[1:,ok_w] = stokes_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])\n var_fw[1:3,ok_w] = var_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])**2\n elif wppat=='Linear-Hi':\n # for Linear-Hi, must go to normalized stokes in order for the pair combination to cancel systematic errors\n nstokes_pw = np.zeros((pairs,cols)); nvar_pw = np.zeros((pairs,cols))\n nstokes_fw = np.zeros((finstokes,cols)); nvar_fw = np.zeros((finstokes+1,cols))\n nstokes_pw[:,ok_w] = stokes_ksw[k:k+pairs,1,ok_w]/stokes_ksw[k:k+pairs,0,ok_w]\n nvar_pw[:,ok_w] = var_ksw[k:k+pairs,1,ok_w]/(stokes_ksw[k:k+pairs,0,ok_w])**2\n if debug: \n np.savetxt(obsname+\"_nstokes.txt\",np.vstack((ok_w.astype(int),nstokes_pw)).T,fmt=\"%3i \"+4*\"%10.6f \")\n np.savetxt(obsname+\"_nvar.txt\",np.vstack((ok_w.astype(int),nvar_pw)).T,fmt=\"%3i \"+4*\"%14.9f \")\n nstokes_fw[1] = 0.5*(nstokes_pw[0] + (nstokes_pw[1]-nstokes_pw[3])/np.sqrt(2.))\n nstokes_fw[2] = 0.5*(nstokes_pw[2] + (nstokes_pw[1]+nstokes_pw[3])/np.sqrt(2.))\n nvar_fw[1] = 0.25*(nvar_pw[0] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[2] = 0.25*(nvar_pw[2] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[3] = 0.25*((nvar_pw[1] - nvar_pw[3])/2.)\n stokes_fw[1:] = nstokes_fw[1:]*stokes_fw[0]\n var_fw[1:] = nvar_fw[1:]*stokes_fw[0]**2\n chisqq = ((nstokes_pw[0,ok_w] - nstokes_fw[1,ok_w])**2/nvar_fw[1,ok_w]).sum()/ok_w.sum() \n chisqu = ((nstokes_pw[2,ok_w] - nstokes_fw[2,ok_w])**2/nvar_fw[2,ok_w]).sum()/ok_w.sum()\n chisqlist[obs].append(chisqq)\n chisqlist[obs].append(chisqu)\n log.message(\" Chisq/dof Linear-Hi Q,U: %7.2f %7.2f\" % (chisqq,chisqu), with_header=False) \n\n # calculate, print estimated systematic error from chisq mean\n if len(chisqlist[obs]):\n chisqdof = np.array(chisqlist[obs]).mean()\n dofs = float(ok_fw[0].sum())\n chisqdoferr = np.sqrt(2./dofs)\n syserr = 0. # estimate systematic error using noncentral chisq distribution\n if (chisqdof - 1.) > 3.*chisqdoferr:\n nvar_fw = np.zeros_like(var_fw)\n nvar_fw[:,ok_fw[0]] = var_fw[:,ok_fw[0]]/stokes_fw[0,ok_fw[0]]**2\n syserr = np.sqrt(dofs*(chisqdof - 1.)/(1./nvar_fw[1,ok_fw[1]]).sum())\n print syserr \n \n log.message((\" Mean chisq/dof: %5.2f Estimated sys %%error: %5.2f\") % \\\n (chisqdof,100.*syserr), with_header=False)\n\n heff_w = interp1d(wav_l,heff_l,kind='cubic')(wav_kw[k])\n par_w = -interp1d(wav_l,hpa_l,kind='cubic')(wav_kw[k])\n c_w = np.cos(2.*np.radians(par_w)); s_w = np.sin(2.*np.radians(par_w))\n stokes_fw[1:] /= heff_w\n var_fw[1:] /= heff_w**2\n stokes_fw[1:] = stokes_fw[1]*c_w - stokes_fw[2]*s_w , \\\n stokes_fw[1]*s_w + stokes_fw[2]*c_w\n var_fw[1:3] = var_fw[1]*c_w**2 + var_fw[2]*s_w**2 , \\\n var_fw[1]*s_w**2 + var_fw[2]*c_w**2\n var_fw[3] = c_w*s_w*(var_fw[1] - var_fw[2]) + (c_w**2-s_w**2)*var_fw[3]\n\n # save final stokes fits file\n infile = infile_list[rawlist[comblist[k][0]][0]]\n hduout = pyfits.open(infile)\n hduout['SCI'].data = stokes_fw.astype('float32').reshape((3,1,-1))\n hduout['SCI'].header.update('CTYPE3','I,Q,U')\n hduout['VAR'].data = var_fw.astype('float32').reshape((4,1,-1))\n hduout['VAR'].header.update('CTYPE3','I,Q,U,QU')\n\n hduout['BPM'].data = bpm_fw.astype('uint8').reshape((3,1,-1))\n hduout['BPM'].header.update('CTYPE3','I,Q,U')\n hduout[0].header.update('POLCAL',calinfo)\n if len(chisqlist[obs]): \n hduout[0].header.update('SYSERR',100.*syserr, \\\n 'estimated % systematic error')\n outfile = object+'_'+config+'_stokes.fits'\n hduout.writeto(outfile,clobber=True,output_verify='warn')\n log.message('\\n '+outfile+' Stokes I,Q,U', with_header=False)\n \n# elif wppat.count('Circular'): TBS \n\n# elif wppat=='All-Stokes': TBS\n\n return",
"def identify_peaks_nowind(xCar, xDate, xDir, xFilename, outDir, processedFileLoc, Engineering, threshold='.1',\n rthresh = '.7',\n xTimeThreshold='5.0', minElevated='2', xB='102', basePerc='50'):\n import csv, numpy\n import shutil\n from shapely.geometry import Point\n import pandas as pd\n import geopandas as gpd\n\n\n try:\n baseCalc = float(basePerc)\n xABThreshold = float(threshold)\n minElevated = float(minElevated)\n rMin = float(rthresh)\n xDistThreshold = 160.0 # find the maximum CH4 reading of observations within street segments of this grouping distance in meters\n xSDF = 4 # multiplier times standard deviation for floating baseline added to mean\n\n xB = int(xB)\n xTimeThreshold = float(xTimeThreshold)\n fn = xDir + xFilename # set raw text file to read in\n fnOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".csv\"\n fnShape = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".shp\"\n fnLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".log\"\n pkLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \"_info.csv\"\n jsonOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \".geojson\"\n infOut = processedFileLoc + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n ### TEST THING\n fn = xDir + xFilename # set raw text file to read in\n filenames = nameFiles(outDir,processedFileLoc,xCar,xDate,True)\n fnOut = filenames['fnOut']\n fnShape = filenames['fnShape']\n fnLog = filenames['fnLog']\n pkLog = filenames['pkLog']\n jsonOut = filenames['jsonOut']\n infOut = filenames['infOut']\n\n print(f\"{outDir}Peaks_{xCar}_{xDate}_info.csv\")\n fLog = open(fnLog, 'w')\n shutil.copy(infOut, pkLog)\n\n # field column indices for various variables\n if Engineering == True:\n fDate = 0; fTime = 1; fEpochTime = 2\n fNanoSeconds = 3; fVelocity = 4; fU = 5\n fV = 6; fW = 7; fBCH4 = 10\n fBCH4 = 8; fBRSSI = 9; fTCH4 = 10\n TRSSI = 11;PRESS = 12; INLET = 13\n TEMP = 14; CH4 = 15;H20 = 16\n C2H6 = 17; R = 18; C2C1 = 19\n BATT = 20; POWER = 21; CURR = 22\n SOCPER = 23;fLat = 24; fLon = 25\n elif not Engineering:\n fDate = 0; fTime = 1; fEpochTime = 2\n fNanoSeconds = 3;fVelocity = 4; fU = 5\n fV = 6; fW = 7\n fBCH4 = 8; fBRSSI = 9\n fTCH4 = 10; TRSSI = 11; PRESS = 12\n INLET = 13; TEMP = 14; CH4 = 15\n H20 = 16;C2H6 = 17; R = 18; C2C1 = 19\n BATT = 20; POWER = 21; CURR = 22\n SOCPER = 23; fLat = 24;fLon = 25; fDist = 33; fOdometer = 34\n fUavg = 35; fVavg = 36; fWavg = 37\n\n # read data in from text file and extract desired fields into a list, padding with 5 minute and hourly average\n x1, x2, x3, x4, x5, x6, x7, x8, x9, x10,x11,x12,x13,x14,x15,x16 = [[] for _ in range(16)]\n\n count = -1\n with open(fn, 'r') as f:\n t = csv.reader(f)\n for row in t:\n woo = row\n # print(count)\n if count < 0:\n count += 1\n continue\n elif count >= 0:\n datet = row[fDate].replace(\"-\", \"\") + row[fTime].replace(\":\", \"\")\n ## if not engineering\n epoch = float(row[fEpochTime] + \".\" + row[fNanoSeconds][0])\n datetime = row[fDate].replace(\"-\", \"\") + row[fTime].replace(\":\", \"\")\n x1.append(epoch); x2.append(datetime)\n if row[fLat] == '':\n x3.append('')\n elif row[fLat] != '':\n x3.append(float(row[fLat]))\n if row[fLon] == '':\n x4.append('')\n elif row[fLon] != '':\n x4.append(float(row[fLon]))\n\n x5.append(float(row[fBCH4]))\n x6.append(float(row[fTCH4]))\n x7.append(0.0)\n x8.append(0.0)\n x9.append(row[fOdometer])\n x11.append(float(row[C2H6]))\n x12.append(float(row[C2C1]))\n x13.append(float(row[R]))\n x14.append(float(row[fUavg]))\n x15.append(float(row[fVavg]))\n x16.append(float(row[fWavg]))\n\n count += 1\n print(f\"Number of observations processed:{count}\")\n\n # convert lists to numpy arrays\n aEpochTime = numpy.array(x1)\n aDateTime = numpy.array(x2)\n aLat = numpy.array(x3)\n aLon = numpy.array(x4)\n aCH4 = numpy.array(x5)\n aTCH4 = numpy.array(x6)\n aMean = numpy.array(x7)\n arealMean = numpy.array(x7)\n astd = numpy.array(x7)\n\n aMeanC2H6 = numpy.array(x7)\n aThreshold = numpy.array(x8)\n aOdom = numpy.array(x9)\n\n # adding ethane stuff\n aC2H6 = numpy.array(x11)\n aC2C1 = numpy.array(x12)\n aR = numpy.array(x13)\n aUavg = numpy.array(x14)\n aVavg = numpy.array(x15)\n aWavg = numpy.array(x16)\n\n\n xLatMean = numpy.mean(aLat)\n xLonMean = numpy.mean(aLon)\n #xCH4Mean = numpy.mean(aCH4)\n #xC2H6Mean = numpy.mean(aC2H6)\n #xC2C1Mean = numpy.mean(aC2C1)\n\n fLog.write(\"Day CH4_mean = \" + str(numpy.mean(aCH4)) +\n \", Day CH4 SD = \" + str(numpy.std(aCH4)) + \"\\n\")\n fLog.write(\"Day C2H6 Mean = \" + str(numpy.mean(aC2H6)) +\n \", Day C2H6 SD = \" + str(numpy.std(aC2H6)) + \"\\n\")\n fLog.write(\"Center lon/lat = \" + str(xLonMean) + \", \" + str(xLatMean) + \"\\n\")\n\n lstCH4_AB = []\n\n # generate list of the index for observations that were above the threshold\n for i in range(0, count - 2):\n if ((count - 2) > xB):\n topBound = min((i + xB), (count - 2))\n botBound = max((i - xB), 0)\n\n for t in range(min((i + xB), (count - 2)), i, -1):\n if aEpochTime[t] < (aEpochTime[i] + (xB / 2)):\n topBound = t\n break\n for b in range(max((i - xB), 0), i):\n if aEpochTime[b] > (aEpochTime[i] - (xB / 2)):\n botBound = b\n break\n\n xCH4Mean = numpy.percentile(aCH4[botBound:topBound], baseCalc)\n xCH4_actualMean = numpy.mean(aCH4[botBound:topBound])\n xCH4_stdev = numpy.mean(aCH4[botBound:topBound])\n xC2H6Mean = numpy.percentile(aC2H6[botBound:topBound], baseCalc)\n\n # xCH4SD = numpy.std(aCH4[botBound:topBound])\n else:\n xCH4Mean = numpy.percentile(aCH4[0:(count - 2)], baseCalc)\n xCH4_actualMean = numpy.mean(aCH4[0:(count - 2)], baseCalc)\n xCH4_stdev = numpy.std(aCH4[0:(count - 2)], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[0:(count - 2)], baseCalc)\n\n # xCH4SD = numpy.std(aCH4[0:(count-2)])\n xThreshold = xCH4Mean + (xCH4Mean * xABThreshold)\n xThreshold_c2h6 = xC2H6Mean + (xC2H6Mean * xABThreshold)\n\n if (aCH4[i] > xThreshold and aR[i]>rMin):\n #if (aCH4[i] > xThreshold):\n lstCH4_AB.append(i)\n aMean[i] = xCH4Mean\n aMeanC2H6[i] = xC2H6Mean\n aThreshold[i] = xThreshold\n arealMean[i] = xCH4_actualMean\n astd[i] = xCH4_stdev\n\n # now group the above baseline threshold observations into groups based on distance threshold\n lstCH4_ABP = []\n xDistPeak = 0.0\n xCH4Peak = 0.0\n xTime = 0.0\n cntPeak = 0\n cnt = 0\n sID = \"\"\n sPeriod5Min = \"\"\n prevIndex = 0\n for i in lstCH4_AB:\n if (cnt == 0):\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n else:\n # calculate distance between points\n xDist = haversine(xLat1, xLon1, aLat[i], aLon[i])\n xDistPeak += xDist\n xCH4Peak += (xDist * (aCH4[i] - aMean[i]))\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n if (sID == \"\"):\n xTime = aEpochTime[i]\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n if ((aEpochTime[i] - aEpochTime[prevIndex]) > xTimeThreshold): # initial start of a observed peak\n cntPeak += 1\n xTime = aEpochTime[i]\n xDistPeak = 0.0\n xCH4Peak = 0.0\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n # print str(i) +\", \" + str(xDist) + \",\" + str(cntPeak) +\",\" + str(xDistPeak)\n lstCH4_ABP.append(\n [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i], aThreshold[i],\n xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n aUavg[i],aVavg[i],aWavg[i]])\n cnt += 1\n prevIndex = i\n\n # Finding peak_id larger than 160.0 m\n tmpsidlist = []\n for r in lstCH4_ABP:\n if (float(r[9]) > 160.0) and (r[0] not in tmpsidlist):\n tmpsidlist.append(r[0])\n cntPeak -= len(tmpsidlist)\n\n fLog.write(\"Number of peaks found: \" + str(cntPeak) + \"\\n\")\n print(f\"{xCar} \\t {xDate} \\t {xFilename} \\t {count} \\t {len(lstCH4_ABP)}\")\n\n # write out the observed peaks to a csv to be read into a GIS\n fOut = open(fnOut, 'w')\n # s = \"PEAK_NUM,EPOCHSTART,EPOCH,DATETIME,CH4,LON,LAT,CH4_BASELINE,CH4_THRESHOLD,PEAK_DIST_M,PEAK_CH4,TCH4,PERIOD5MIN\\n\"\n s = \"OP_NUM,OP_EPOCHSTART,OB_EPOCH,OB_DATETIME,OB_CH4,OB_LON,OB_LAT,OB_CH4_BASELINE,\" \\\n \"OB_CH4_THRESHOLD,OP_PEAK_DIST_M,OP_PEAK_CH4,OB_TCH4,OB_C2H6,\" \\\n \"OB_C2C1,OB_R,OB_C2H6_BASELINE,OB_PERIOD5MIN,ODOMETER,OB_U_AVG,OB_V_AVG,OB_W_AVG\\n\"\n fOut.write(s)\n\n truecount = 0\n for r in lstCH4_ABP:\n if r[0] not in tmpsidlist:\n s = ''\n for rr in r:\n s += str(rr) + ','\n s = s[:-1]\n s += '\\n'\n fOut.write(s)\n truecount += 1\n fOut.close()\n fLog.close()\n\n openFile = pd.read_csv(fnOut)\n if openFile.shape[0] != 0:\n pkDistDf = openFile.copy().groupby('OP_NUM', as_index=False).apply(\n lambda x: max(x.ODOMETER) - min(x.ODOMETER))\n pkDistDf.columns = ['OP_NUM', 'OP_DISTANCE']\n openFile = pd.merge(openFile.copy(), pkDistDf)\n tempCount = openFile.groupby('OP_NUM', as_index=False).OP_EPOCHSTART.count().rename(\n columns={'OP_EPOCHSTART': 'Frequency'})\n tempCount = tempCount.loc[tempCount.Frequency >= minElevated, :]\n if tempCount.shape[0] == 0:\n print(f\"No Observed Peaks with enough Elevated Readings Found in the file: {xFilename}\")\n\n elif tempCount.shape[0] != 0:\n oFile = pd.merge(openFile, tempCount, on=['OP_NUM'])\n openFile = oFile.copy()\n del (oFile)\n openFile[\"minElevated\"] = openFile.apply(lambda x: int(minElevated), axis=1)\n openFile['OB_CH4_AB'] = openFile.loc[:, 'OB_CH4'].sub(openFile.loc[:, 'OB_CH4_BASELINE'], axis=0)\n openFile['OB_C2H6_AB'] = openFile.loc[:, 'OB_C2H6'].sub(openFile.loc[:, 'OB_C2H6_BASELINE'],axis=0)\n openFile.to_csv(fnOut, index=False)\n\n\n fileWt = weighted_loc(openFile, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'}).reset_index(drop=True)\n geometry_temp = [Point(lon, lat) for lon, lat in zip(fileWt['pk_LON'], fileWt['pk_LAT'])]\n crs = 'EPSG:4326'\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' WITH BUFFER DISTANCE\n gdf_buff = gpd.GeoDataFrame(fileWt, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n\n ##maybe this is the issue?\n #gdf_buff = gdf_buff.to_crs(epsg=32610)\n #gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(30)\n try:\n gdf_buff.to_file(jsonOut, driver=\"GeoJSON\")\n #gdf_buff.to_file('testthing.geojson', driver=\"GeoJSON\")\n except:\n print(\"Error Saving JSON File\")\n elif openFile.shape[0] == 0:\n print(f\"No Observed Peaks Found in the file:{xFilename}\")\n except ValueError:\n print(\"Error in Identify Peaks\")\n return False",
"def align_spikes(spike_data, spt_dict, sp_win, type=\"max\", resample=1,\n contact=0, remove=True):\n\n tol = 0.1\n\n if (sp_win[0] > -tol) or (sp_win[1] < tol):\n warn('You are using very short sp_win. '\n 'This may lead to alignment problems.')\n\n spt = spt_dict['data'].copy()\n\n idx_align = np.arange(len(spt))\n\n #go in a loop until all spikes are correctly aligned\n iter_id = 0\n while len(idx_align) > 0:\n spt_align = {'data': spt[idx_align]}\n spt_inbound = filter_spt(spike_data, spt_align, sp_win)\n idx_align = idx_align[spt_inbound]\n sp_waves_dict = extract_spikes(spike_data, spt_align, sp_win,\n resample=resample, contacts=contact)\n\n sp_waves = sp_waves_dict['data'][:, spt_inbound, 0]\n time = sp_waves_dict['time']\n\n if type == \"max\":\n i = sp_waves.argmax(0)\n elif type == \"min\":\n i = sp_waves.argmin(0)\n\n #move spike markers\n shift = time[i]\n spt[idx_align] += shift\n\n #if spike maximum/minimum was at the edge we have to extract it at the\n # new marker and repeat the alignment\n\n idx_align = idx_align[(shift < (sp_win[0] + tol)) |\n (shift > (sp_win[1] - tol))]\n iter_id += 1\n\n ret_dict = {'data': spt}\n\n if remove:\n #remove double spikes\n FS = spike_data['FS']\n ret_dict = remove_doubles(ret_dict, 1000.0 / FS)\n\n return ret_dict",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def source_gen(stellar, threshold):\n source = []\n for i in stellar:\n if i[2] > threshold:\n source.append(NewPoint(i))\n \n #sort objects by x-axis\n source.sort(key=lambda x: x[1])\n source.sort(key=lambda x: x[0])\n return source",
"def extract_spikes(spike_data, spt_dict, sp_win,\n resample=1, contacts='all'):\n sp_data = spike_data['data']\n n_contacts = spike_data['n_contacts']\n\n if contacts == \"all\":\n contacts = np.arange(n_contacts)\n elif isinstance(contacts, int):\n contacts = np.array([contacts])\n else:\n contacts = np.asarray(contacts)\n\n FS = spike_data['FS']\n spt = spt_dict['data']\n idx = np.arange(len(spt))\n inner_idx = filter_spt(spike_data, spt_dict, sp_win)\n outer_idx = idx[~np.in1d(idx, inner_idx)]\n\n indices = (spt / 1000.0 * FS).astype(np.int32)\n win = (np.asarray(sp_win) / 1000.0 * FS).astype(np.int32)\n time = np.arange(win[1] - win[0]) * 1000.0 / FS + sp_win[0]\n n_contacts, n_pts = sp_data.shape\n\n # auxiliary function to find a valid spike window within data range\n minmax = lambda x: np.max([np.min([n_pts, x]), 0])\n spWave = np.zeros((len(time), len(spt), len(contacts)),\n dtype=np.float32)\n\n for i in inner_idx:\n sp = indices[i]\n spWave[:, i, :] = np.atleast_2d(sp_data[contacts,\n sp + win[0]:sp + win[1]]).T\n\n for i in outer_idx:\n sp = indices[i]\n l, r = map(minmax, sp + win)\n if l != r:\n spWave[(l - sp) - win[0]:(r - sp) - win[0], i, :] = \\\n sp_data[contacts, l:r].T\n\n wavedict = {\"data\": spWave, \"time\": time, \"FS\": FS}\n\n if len(idx) != len(inner_idx):\n is_valid = np.zeros(len(spt), dtype=np.bool)\n is_valid[inner_idx] = True\n wavedict['is_valid'] = is_valid\n\n if resample != 1:\n warn(\"resample argument is deprecated.\"\n \"Please update your code to use function\"\n \"resample_spikes\", DeprecationWarning)\n wavedict = resample_spikes(wavedict, FS * resample)\n return wavedict",
"def measurement_update(particles, measured_marker_list, grid):\n weight = []\n cnt = 0\n\n # no new sensor info\n if len(measured_marker_list) == 0:\n s = 1\n for p in particles:\n weight.append((p, 1/len(particles)))\n else:\n for p in particles:\n markers_visible_to_p = p.read_markers(grid)\n\n if p.x < 0 or p.x >= grid.width or p.y < 0 or p.y >= grid.height:\n weight.append((p, 0))\n continue\n if (p.x, p.y) in grid.occupied:\n weight.append((p, 0))\n continue\n\n match = []\n diff = int(math.fabs(len(measured_marker_list)-len(markers_visible_to_p)))\n\n for cm in measured_marker_list:\n if len(markers_visible_to_p) == 0:\n break\n cmx, cmy, cmh = add_marker_measurement_noise(cm, MARKER_TRANS_SIGMA, MARKER_ROT_SIGMA)\n\n # find minp, the closest marker out of markers_visible_to_particle\n minp = markers_visible_to_p[0]\n mind = grid_distance(cmx, cmy, minp[0], minp[1])\n\n for mvp in markers_visible_to_p:\n mvpx, mvpy, mvph = mvp[0], mvp[1], mvp[2]\n dist = grid_distance(cmx, cmy, mvpx, mvpy)\n if dist < mind:\n mind = dist\n minp = mvp\n\n # store the pairing [cm, m] for later calculations\n match.append((minp, cm))\n markers_visible_to_p.remove(minp)\n\n # use match to calculate weight of p\n prob = 1\n\n maxc1 = 0\n maxc2 = (45 ** 2) / (2*(MARKER_ROT_SIGMA ** 2))\n c1 = 2*(MARKER_TRANS_SIGMA ** 2)\n c2 = 2*(MARKER_ROT_SIGMA ** 2)\n\n for i, j in match:\n distBetweenMarkers = grid_distance(i[0], i[1], j[0], j[1])\n angleBetweenMarkers = diff_heading_deg(i[2], j[2])\n const1 = (distBetweenMarkers ** 2) / c1\n const2 = (angleBetweenMarkers ** 2) / c2\n maxc1 = max(maxc1, const1)\n prob *= np.exp(-const1-const2)\n\n for _ in range(diff):\n prob *= np.exp(-maxc1-maxc2)\n\n weight.append((p, prob))\n\n #normalize weight\n s = 0\n weight.sort(key=lambda x: x[1])\n delete = int(PARTICLE_COUNT/100)\n weight = weight[delete:]\n for i, j in weight:\n if j == 0:\n cnt+=1\n else:\n s += j\n weight = weight[cnt:]\n cnt += delete\n\n plist = []\n wlist = []\n\n for i, j in weight:\n newi = Particle(i.x, i.y, i.h)\n wlist.append(j/s)\n plist.append(newi)\n\n newplist = []\n\n if plist != []:\n newplist = np.random.choice(plist, size=len(plist), replace = True, p=wlist)\n\n measured_particles = Particle.create_random(cnt, grid)[:]\n\n for p in newplist:\n ph = add_gaussian_noise(p.h, ODOM_HEAD_SIGMA)\n px = add_gaussian_noise(p.x, ODOM_TRANS_SIGMA)\n py = add_gaussian_noise(p.y, ODOM_TRANS_SIGMA)\n newp = Particle(px, py, ph)\n measured_particles.append(newp)\n\n return measured_particles",
"def shift_to_match(folder, x=0, y=0, z=0, angle=0, dim=120, energies=['40kVp', '80kVp'],\n directory='D:/Research/Python Data/CBCT/'):\n path = directory + folder + '/'\n\n for energy in energies:\n\n load_path = path + energy\n\n gof.create_folder(folder_name='Shifted Matrices', directory_path=load_path)\n\n load_path = load_path + '/RawMatrices/'\n save_path = path + energy + '/Shifted Matrices/'\n\n # Get all the slices to shift\n files = os.listdir(load_path)\n\n for file in files:\n temp = np.load(load_path + file)\n\n if energy is '40kVp':\n # Don't need to do anything for 40 kVp images\n np.save(save_path + file, temp)\n else:\n savefile = file\n # Shift within XY plane (the slice plane)\n if y is not 0:\n temp = np.roll(temp, y, axis=0) # Y shift\n if x is not 0:\n temp = np.roll(temp, x, axis=1) # X shift\n\n # Rotation\n if angle is not 0:\n index = np.round(np.abs(angle), decimals=0)\n index = int(index)\n temp = rotate(temp, angle)\n temp = temp[index:index + dim, index:index + dim]\n\n # Shift slices in the z (rename files)\n if z is not 0:\n file = file.replace('.npy', '')\n file = file.replace('volume0', '')\n file = int(file) + z\n if file < 10:\n savefile = 'volume000' + str(file) + '.npy'\n elif file < 100 and file >= 10:\n savefile = 'volume00' + str(file) + '.npy'\n else:\n savefile = 'volume0' + str(file) + '.npy'\n\n np.save(save_path + savefile, temp)",
"def sample_drive_gear(drive_contour: np.ndarray, target_driven_contour: np.ndarray, k: int,\n sampling_count: Tuple[int, int], keep_count: int, comparing_accuracy: int, max_sample_depth: int,\n debugging_path: str, subplots: Union[List[Axes], None]) \\\n -> List[Tuple[float, float, float, float, np.ndarray]]:\n drive_polygon = Polygon(drive_contour)\n min_x, min_y, max_x, max_y = drive_polygon.bounds\n windows = [(min_x, max_x, min_y, max_y)]\n result_pool = []\n for iter_time in range(max_sample_depth):\n # split the windows up\n next_windows = []\n for window in windows:\n min_x, max_x, min_y, max_y = window\n new_windows = itertools.product(uniform_interval(min_x, max_x, sampling_count[0]),\n uniform_interval(min_y, max_y, sampling_count[1]))\n new_windows = [(x, y, z, w) for (x, y), (z, w) in new_windows] # repack\n next_windows += new_windows\n windows = next_windows\n # get results\n result_pool = []\n for index, window in enumerate(windows):\n result = sample_result(drive_contour, drive_polygon, window, k)\n if result is None:\n score = 1e8\n result_pool.append((score, None, None, window, result))\n else:\n *center, center_distance, result = result\n result = counterclockwise_orientation(result)\n score = shape_difference_rating(target_driven_contour, result, comparing_accuracy,\n distance_function=trivial_distance)\n result_pool.append((score, center, center_distance, window, result))\n if subplots is not None:\n update_polygon_subplots(drive_contour, result, subplots)\n min_x, max_x, min_y, max_y = window\n sample_region = Rectangle((min_x, min_y), max_x - min_x, max_y - min_y, color='red', fill=False)\n subplots[0].add_patch(sample_region)\n subplots[0].scatter(center[0], center[1], 5)\n subplots[1].scatter(0, 0, 3)\n subplots[0].text(0, 0, str(center))\n subplots[1].text(0, 0, str(score))\n plt.savefig(os.path.join(debugging_path, f'{iter_time}_{index}.png'))\n save_contour(os.path.join(debugging_path, f'{iter_time}_{index}_driven.dat'), result)\n result_pool.sort(key=lambda tup: tup[0])\n result_pool = result_pool[:keep_count]\n windows = [result[3] for result in result_pool]\n result_pool = [(score, center_x, center_y, center_distance, driven_contour) for\n score, (center_x, center_y), center_distance, window, driven_contour in result_pool]\n return result_pool",
"def find_center(file):\n\n data = pyfits.getdata(file)\n chipx = data.field('X')\n chipy = data.field('Y')\n#\n#--- because the array is too large to handle in one swipe, divide it into 8x8 segments\n#\n xmin = min(chipx)\n ymin = min(chipy)\n xmax = max(chipx)\n ymax = max(chipy)\n xstep = int((xmax-xmin) / 8 )\n ystep = int((ymax-ymin) / 8 )\n#\n#--- find the interval which contains largest samples \n#\n cposx = 0\n cposy = 0\n cmax = 0\n for i in range (0, 8):\n xstart = xstep * i + xmin\n xstop = xstart + xstep\n for j in range (0, 8):\n ystart = ystep * j + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n if len(chipx_p) > cmax:\n cmax = len(chipx_p)\n cposx = i\n cposy = j\n#\n#--- extract the area of the highest count\n#\n xpos_list = []\n ypos_list = []\n maxv_list = []\n xstart = xstep * cposx + xmin\n xstop = xstart + xstep\n\n ystart = ystep * cposy + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n#\n#--- count up the events. bin to 2x2 so that we get enough count in each bin\n#\n xmin = min(chipx_p)\n xmax = max(chipx_p)\n xdim = int(0.5 * (xmax - xmin)) + 1\n ymin = min(chipy_p)\n ymax = max(chipy_p)\n ydim = int(0.5 * (ymax - ymin)) + 1\n\n cbin = [[0 for y in range(0, ydim)] for x in range(0, xdim)]\n for j in range(0, len(chipy_p)):\n xpos = int(0.5 * (chipx_p[j]-xmin))\n ypos = int(0.5 * (chipy_p[j]-ymin))\n cbin[xpos][ypos] += 1\n#\n#--- now find max position\n#\n vmax = 0\n xx = 0\n yy = 0\n for m in range(0, xdim):\n for n in range(0, ydim):\n if cbin[m][n] > vmax:\n vmax = cbin[m][n]\n xx = m\n yy = n\n#\n#--- take the mddle of the bin as the brightest spot\n#\n xv = int(xx * 2.0 + 1.0 + xmin)\n yv = int(yy * 2.0 + 1.0 + ymin)\n\n return [xv, yv]"
]
| [
"0.73915523",
"0.5584274",
"0.53825366",
"0.53255445",
"0.51729035",
"0.5165869",
"0.5158448",
"0.5107639",
"0.506456",
"0.50437576",
"0.50229704",
"0.5019746",
"0.501889",
"0.5014978",
"0.5013797",
"0.5013797",
"0.49993762",
"0.49964085",
"0.49760303",
"0.49754888",
"0.49720588",
"0.4968512",
"0.4935593",
"0.49353734",
"0.4919031",
"0.49113432",
"0.48860678",
"0.48816553",
"0.48432794",
"0.48381186"
]
| 0.76321465 | 0 |
Generate positions (x, y coordinates) for each template found by kilosort on the probe or passed to it by the new_templates_array. This function assumes that the base_folder holds all the necessary .npy arrays. If no new_templates_array is passed it will look for the templates.npy file (created by kilosort) which is the average of all spikes for each template (so a (templates x time x channels) data cube). It will also try to find the file template_marking.npy which is produced after cleaning using the spikesort_tsne_guis.clean_kilosort_templates GUI. If this is found only the non noise templates will have their position evaluated. If not found all templates will be considered. If a new_templates_array is passed (a data cube of either (templates x time x channels) or (templates x channels x time) dimensions) then this will be used to calculate the positions. In order for this function to find which channels are the most relevant in each template it looks into the template (a (channels x time) array). It then find the minimum points of all channels, takes their median and their standard deviation and for each channel creates the difference between the minimum and the median. Finally it demarcates the relevant to the template channels by keeping the ones whose difference is larger than a number of times (threshold) over the standard deviation. It then picks the relevant channels of the spike's raw data, finds the differences between the minimum value and the channel's time series median value (over time), orders the channels according to these differences and assigns weights between 0 and 1 (0 for a difference of 0, 1 for a maximum difference). It finally finds the x, y positions of the selected channels and adds to the position of the largest difference channel the weighted average positions of the remaining selected channels | def generate_probe_positions_of_templates(base_folder, threshold=0.1, new_templates_array=None):
# Load the required data from the kilosort folder
channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))
if new_templates_array is None:
try:
templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))
except FileNotFoundError:
exit('No new_templates_array passed and no templates.npy found in folder')
try:
template_markings = np.load(os.path.join(base_folder, ct.TEMPLATE_MARKING_FILENAME))
except FileNotFoundError:
template_markings = np.ones((len(templates)))
templates = templates[template_markings > 0, :, :]
else:
if new_templates_array.shape[1] > new_templates_array.shape[2]:
templates = np.reshape(new_templates_array, (new_templates_array.shape[0],
new_templates_array.shape[2],
new_templates_array.shape[1]))
else:
templates = new_templates_array
# Run the loop over all templates to get the positions
counter = 0
templates_positions = []
for template in templates:
relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)
template_median_over_time = np.median(template, axis=0)
peaks_to_median = template_median_over_time - template.min(axis=0)
peaks_to_median = peaks_to_median[relevant_channels]
relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]
peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)
peaks_to_median_sorted.append(np.median(template_median_over_time[relevant_channels]))
weights = _normalize(peaks_to_median_sorted)[:-1]
relevant_channels_positions = channel_positions[relevant_channels_sorted]
pos_x = relevant_channels_positions[0, 0]
pos_y = relevant_channels_positions[0, 1]
new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])
new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])
templates_positions.append([new_pos_x, new_pos_y])
counter += 1
if not (counter % 100):
print('Completed ' + str(counter) + ' templates')
templates_positions = np.array(templates_positions)
np.save(os.path.join(base_folder, ct.WEIGHTED_TEMPLATE_POSITIONS_FILENAME), templates_positions)
return np.array(templates_positions) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,\n used_spikes_indices=None, position_mult=2.25, threshold=0.1):\n # Load the required data from the kilosort folder\n channel_map = np.load(os.path.join(base_folder, 'channel_map.npy'))\n active_channel_map = np.squeeze(channel_map, axis=1)\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n\n spike_templates = np.load(os.path.join(base_folder, ct.SPIKE_TEMPLATES_FILENAME))\n templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))\n\n data_raw = np.memmap(os.path.join(base_folder, binary_data_filename),\n dtype=np.int16, mode='r')\n\n number_of_timepoints_in_raw = int(data_raw.shape[0] / number_of_channels_in_binary_file)\n data_raw_kilosorted = np.reshape(data_raw, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')\n\n spike_times = np.squeeze(np.load(os.path.join(base_folder, ct.SPIKE_TIMES_FILENAME)).astype(np.int))\n\n time_points = 50\n if used_spikes_indices is None:\n used_spikes_indices = np.arange(0, len(spike_times))\n\n # Run the loop over all spikes to get the positions\n counter = 0\n weighted_average_postions = np.empty((len(used_spikes_indices), 2))\n spike_distance_on_probe = np.empty(len(used_spikes_indices))\n for spike_index in np.arange(0, len(used_spikes_indices)):\n spike_raw_data = data_raw_kilosorted[active_channel_map,\n (spike_times[used_spikes_indices[spike_index]]-time_points):\n (spike_times[used_spikes_indices[spike_index]]+time_points)]\n template = templates[spike_templates[used_spikes_indices[spike_index]], :, :].squeeze()\n relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)\n\n spike_raw_data_median_over_time = np.median(spike_raw_data, axis=1)\n peaks_to_median = spike_raw_data_median_over_time - spike_raw_data.min(axis=1)\n peaks_to_median = peaks_to_median[relevant_channels]\n\n relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]\n\n peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)\n peaks_to_median_sorted.append(np.median(spike_raw_data_median_over_time[relevant_channels]))\n\n weights = _normalize(peaks_to_median_sorted)[:-1]\n relevant_channels_positions = channel_positions[relevant_channels_sorted]\n\n pos_x = relevant_channels_positions[0, 0]\n pos_y = relevant_channels_positions[0, 1]\n\n new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])\n new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])\n weighted_average_postions[spike_index, :] = [new_pos_x, new_pos_y]\n spike_distance_on_probe[spike_index] = np.sqrt(np.power(new_pos_x, 2) + np.power(new_pos_y, 2))\n\n counter += 1\n if counter % 5000 == 0:\n print('Completed ' + str(counter) + ' spikes')\n weighted_average_postions = weighted_average_postions * position_mult\n\n # sort according to position on probe\n spike_indices_sorted_by_probe_distance = np.array([b[0] for b in sorted(enumerate(spike_distance_on_probe),\n key=lambda dist: dist[1])])\n spike_distances_on_probe_sorted = np.array([b[1] for b in sorted(enumerate(spike_distance_on_probe),\n key=lambda dist: dist[1])])\n\n np.save(os.path.join(base_folder, ct.WEIGHTED_SPIKE_POSITIONS_FILENAME), weighted_average_postions)\n\n return weighted_average_postions, spike_distance_on_probe, \\\n spike_indices_sorted_by_probe_distance, spike_distances_on_probe_sorted",
"def get_template_series(self, nb_images):\n\n # Tab for the series of images\n self.template = []\n\n # Tab\n temp = []\n\n # Make current position the zero position\n self.arm.set_to_zero([0, 1, 2])\n self.microscope.set_to_zero([0, 1, 2])\n\n # Take imges only in the template zone\n template = self.template_zone()\n height, width = template.shape[:2]\n\n # Tab of weight to detect where the pipette is\n weight = []\n\n # Detecting the tip\n for i in range(3):\n for j in range(3):\n if (i != 1) & (j != 1):\n # divide template zone into 8 images\n temp = template[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n\n # Search the tip using the number of darkest pixel in the image\n bin_edge, _ = np.histogram(temp.flatten())\n weight += [bin_edge.min()]\n else:\n # image is the center of template zone, do not consider to have functional get_withdraw_sign method\n weight += [-1]\n\n # pipette is in the image with the most darkest pixels\n index = weight.index(max(weight))\n j = index % 3\n i = index // 3\n\n # Update the position of the tip in image\n self.template_loc = [temp.shape[1] * (1 - j / 2.), temp.shape[0] * (1 - i / 2.)]\n\n # Get the series of template images at different height\n for k in range(nb_images):\n self.microscope.absolute_move(k - (nb_images - 1) / 2, 2)\n self.microscope.wait_motor_stop(2)\n time.sleep(1)\n img = self.template_zone()\n height, width = img.shape[:2]\n img = img[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n self.template += [img]\n\n # reset position at the end\n self.go_to_zero()\n pass",
"def find_best_template(input_measurements, z, library=K15_SED_templates, \n NUM_BEST_TEMPLATES=3, visualize=True, ax=None, verbose=True):\n clean_measurements = parse_measurements(input_measurements)\n\n # record chi^2 of successful templates\n chi_squareds = np.zeros_like(library, dtype=float) * np.nan\n normalizations = np.zeros_like(library, dtype=float) * np.nan\n infrared_luminosities = np.zeros_like(library, dtype=float) * np.nan\n\n for i, template in enumerate(library):\n chi2, norm = fit_sed(template=template, measurements=clean_measurements, z=z, \n verbose=verbose)\n\n chi_squareds[i] = chi2\n normalizations[i] = norm\n infrared_luminosities[i] = infrared_luminosity(template, norm)\n\n model_order = np.argsort(chi_squareds)\n lowest_chi2 = np.nanmin(chi_squareds)\n\n best_template = library[np.nanargmin(chi_squareds)]\n best_L_IR = infrared_luminosities[np.nanargmin(chi_squareds)]\n best_norm = normalizations[np.nanargmin(chi_squareds)]\n\n if VISUALIZE:\n # plot up to the top 5 successful templates\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(8, 5))\n\n for arg, ls in zip(model_order[:NUM_BEST_TEMPLATES], ['-', '--', ':', '-.']):\n\n template = K15_SED_templates[arg]\n chi2 = chi_squareds[arg]\n norm = normalizations[arg]\n L_IR = infrared_luminosity(best_template, norm)\n\n waves, f_nu = model_sed(template, z)\n \n if np.isnan(chi2):\n continue\n\n log_L_IR_text = r'$\\log (L_{{\\rm IR}}/L_\\odot)={:.2f}$'.format(np.log10(L_IR))\n\n ax.plot(waves, f_nu * norm, alpha=(0.5 + lowest_chi2 / (2 * chi2)), ls=ls, c='k',\n label=r'{} ($\\chi_\\nu^2={:.2f}$, {:s})'.format(template[:3]+template[-1], \n chi2, log_L_IR_text))\n\n # plot measurements (including upper limits)\n for measured_wave, measured_flux, measured_uncertainty in clean_measurements:\n if np.isfinite(measured_flux):\n ax.errorbar(measured_wave, measured_flux, measured_uncertainty, \n marker='o', color='black', ls='', zorder=10)\n else:\n ax.errorbar(measured_wave, 3 * measured_uncertainty, yerr=measured_uncertainty,\n color='black', uplims=True)\n \n # aesthetics (if within a subplot, then don't show these)\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n if ax is None:\n ax.set_xlabel(r'Observed wavelength [$\\mu$m]', fontsize=12)\n ax.set_ylabel(r'Flux density [mJy]', fontsize=12)\n\n # legend\n ax.legend(frameon=False, loc='lower center')\n\n\n return best_template, best_L_IR, best_norm",
"def mseed_2_templates(wav_dirs, cat, outdir, length, prepick,\n highcut=None, lowcut=None, f_order=None,\n samp_rate=None, min_snr=2.,\n start=None, end=None, miniseed=True,\n asdf_file=False, debug=1):\n\n # Establish date range for template creation\n cat.events.sort(key=lambda x: x.origins[-1].time)\n if start:\n cat_start = datetime.datetime.strptime(start, '%d/%m/%Y')\n cat_end = datetime.datetime.strptime(end, '%d/%m/%Y')\n else:\n cat_start = cat[0].origins[-1].time.date\n cat_end = cat[-1].origins[-1].time.date\n for date in date_generator(cat_start, cat_end):\n dto = UTCDateTime(date)\n print('Processing templates for: %s' % str(dto))\n q_start = dto - 10\n q_end = dto + 86410\n # Establish which events are in this day\n sch_str_start = 'time >= %s' % str(dto)\n sch_str_end = 'time <= %s' % str(dto + 86400)\n tmp_cat = cat.filter(sch_str_start, sch_str_end)\n if len(tmp_cat) == 0:\n print('No events on: %s' % str(dto))\n continue\n # Which stachans we got?\n stachans = {pk.waveform_id.station_code: [] for ev in tmp_cat\n for pk in ev.picks}\n for ev in tmp_cat:\n for pk in ev.picks:\n chan_code = pk.waveform_id.channel_code\n if chan_code not in stachans[pk.waveform_id.station_code]:\n stachans[pk.waveform_id.station_code].append(chan_code)\n wav_read_start = timer()\n # Be sure to go +/- 10 sec to account for GeoNet shit timing\n if asdf_file:\n with pyasdf.ASDFDataSet(asdf_file) as ds:\n st = Stream()\n for sta, chans in iter(stachans.items()):\n for station in ds.ifilter(ds.q.station == sta,\n ds.q.channel == chans,\n ds.q.starttime >= q_start,\n ds.q.endtime <= q_end):\n st += station.raw_recording\n elif miniseed:\n wav_ds = ['%s%d' % (d, dto.year) for d in wav_dirs]\n st = grab_day_wavs(wav_ds, dto, stachans)\n wav_read_stop = timer()\n print('Reading waveforms took %.3f seconds' % (wav_read_stop\n - wav_read_start))\n print('Looping through stachans to merge/resamp')\n stachans = [(tr.stats.station, tr.stats.channel) for tr in st]\n for stachan in list(set(stachans)):\n tmp_st = st.select(station=stachan[0], channel=stachan[1])\n if len(tmp_st) > 1 and len(set([tr.stats.sampling_rate for tr in tmp_st])) > 1:\n print('Traces from %s.%s have differing samp rates' % (stachan[0], stachan[1]))\n for tr in tmp_st:\n st.remove(tr)\n tmp_st.resample(sampling_rate=samp_rate)\n st += tmp_st\n st.merge(fill_value='interpolate')\n resamp_stop = timer()\n print('Resample/merge took %s secs' % str(resamp_stop - wav_read_stop))\n print('Preprocessing...')\n # Process the stream\n try:\n st1 = pre_processing.dayproc(st, lowcut=lowcut, highcut=highcut,\n filt_order=f_order, samp_rate=samp_rate,\n starttime=dto, debug=debug, ignore_length=True,\n num_cores=4)\n except NotImplementedError or Exception as e:\n print('Found error in dayproc, noting date and continuing')\n print(e)\n with open('%s/dayproc_errors.txt' % outdir, mode='a') as fo:\n fo.write('%s\\n%s\\n' % (str(date), e))\n continue\n print('Feeding stream to template_gen...')\n for event in tmp_cat:\n print('Copying stream to keep away from the trim...')\n trim_st = copy.deepcopy(st1)\n ev_name = str(event.resource_id).split('/')[-1]\n pk_stachans = ['%s.%s' % (pk.waveform_id.station_code,\n pk.waveform_id.channel_code)\n for pk in event.picks]\n # Run check to ensure that there is only one pick for each channel\n dups = [pk for pk, count\n in collections.Counter(pk_stachans).items() if count > 1]\n if len(dups) > 0:\n print('Event %s still has dup picks. Skipping' % ev_name)\n continue\n template = template_gen(event.picks, trim_st, length=length,\n prepick=prepick, min_snr=min_snr)\n if len([tr for tr in template\n if tr.stats.channel[-1] == 'Z']) < 6:\n print('Skipping template with fewer than 6 Z-comp traces')\n continue\n # temp_list.append(template)\n print('Writing event %s to file...' % ev_name)\n template.write('%s/%s.mseed' % (outdir, ev_name),\n format=\"MSEED\")\n del trim_st\n del tmp_cat, st1, st",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack00.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(self.boxsize)+\",\"+str(self.boxsize)\n\t\t\t\t+\" spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()",
"def make_from_templates(templates, min_amplitude, max_amplitude,\n n_per_amplitude_step, probabilities=None,\n return_metadata=False,\n n_repeat=1):\n logger = logging.getLogger(__name__)\n\n logger.debug('templates shape: %s, min amplitude: %s, '\n 'max_amplitude: %s', templates.shape, min_amplitude,\n max_amplitude)\n\n n_templates, waveform_length, n_neighbors = templates.shape\n\n n_per_template = n_per_amplitude_step * n_repeat\n\n x = np.zeros((n_per_template * n_templates,\n waveform_length, n_neighbors))\n\n d = max_amplitude - min_amplitude\n\n amps_range = (min_amplitude + np.arange(n_per_amplitude_step)\n * d / (n_per_amplitude_step - 1))\n\n if probabilities is not None:\n amps_range = draw_with_group_probabilities(amps_range, probabilities)\n\n amps_range = amps_range[:, np.newaxis, np.newaxis]\n\n # go over every template\n for k in range(n_templates):\n\n # get current template and scale it\n current = templates[k]\n amp = np.max(np.abs(current))\n scaled = (current/amp)[np.newaxis, :, :]\n\n # create n clean spikes by scaling the template along the range\n spikes_in_range = scaled * amps_range\n\n # repeat n times\n spikes_in_range_repeated = np.repeat(spikes_in_range,\n repeats=n_repeat,\n axis=0)\n\n x[k * n_per_template:\n (k + 1) * n_per_template] = spikes_in_range_repeated\n\n if return_metadata:\n ids = [[k]*n_per_amplitude_step for k in range(n_templates)]\n ids = np.array([item for sublist in ids for item in sublist])\n metadata = dict(ids=ids)\n return yarr.ArrayWithMetadata(x, metadata)\n else:\n return x",
"def get_template_files(fs, template_type):\n # no template fitting for null runs\n if fs[\"null_run\"]:\n template_type = None\n\n if \"template_type\" in fs:\n if template_type == fs[\"template_type\"]:\n return\n\n fs[\"template_type\"] = template_type\n\n # find all corresponding foreground templates\n if template_type is None:\n fs[\"template_root\"] = None\n fs[\"template_root2\"] = None\n fs[\"template_files\"] = None\n fs[\"template_files2\"] = None\n fs[\"template_noise_root\"] = None\n fs[\"template_noise_root2\"] = None\n fs[\"template_noise_files\"] = None\n fs[\"template_noise_files2\"] = None\n fs[\"num_template\"] = 0\n fs[\"num_template_noise\"] = 0\n else:\n num_template_noise = None\n for hm in [\"1\", \"2\"]:\n suff = \"\" if hm == \"1\" else \"2\"\n troot = os.path.join(\n fs[\"data_root\"],\n \"templates_{}\".format(template_type),\n \"halfmission-{}\".format(hm),\n )\n ### this block is so sims with template type like\n # 353_100_gauss_003 can use ensemble in 353_100_gauss\n tp = template_type.split(\"_\")\n ttype = template_type\n if tp[-1].isdigit():\n if ttype[-7:] not in [\"353_100\", \"217_100\"]:\n ttype = \"_\".join(tp[:-1])\n\n tnroot = os.path.join(\n fs[\"data_root\"],\n \"templates_noise_{}\".format(ttype),\n \"halfmission-{}\".format(hm),\n )\n\n tfiles = []\n tnfiles = []\n for f in fs[\"map_files\"]:\n nfile = f.replace(fs[\"map_root\"], troot)\n if not os.path.exists(nfile):\n raise OSError(\"Missing hm-{} template for {}\".format(hm, f))\n tfiles.append(nfile)\n nfiles = sorted(\n glob.glob(\n f.replace(fs[\"map_root\"], tnroot).replace(\n \".fits\", \"_*.fits\"\n )\n )\n )\n if not len(nfiles):\n raise OSError(\n \"Missing hm-{} template noise for {}\".format(hm, f)\n )\n tnfiles.append(nfiles)\n if num_template_noise is not None:\n if len(nfiles) != num_template_noise:\n raise OSError(\n \"Wrong number of template noise sims. \"\n \"Found {} files, expected {}.\".format(\n len(nfiles), num_template_noise\n )\n )\n\n num_template_noise = len(nfiles)\n\n tfiles = np.asarray(tfiles)\n tnfiles = np.asarray(tnfiles)\n fs[\"template_root{}\".format(suff)] = troot\n fs[\"template_files{}\".format(suff)] = tfiles\n fs[\"template_noise_root{}\".format(suff)] = tnroot\n fs[\"template_noise_files{}\".format(suff)] = tnfiles\n\n fs[\"num_template\"] = len(fs[\"template_files\"])\n fs[\"num_template_noise\"] = num_template_noise\n self.log(\n \"Found {} templates in {}\".format(\n fs[\"num_template\"], fs[\"template_root\"]\n ),\n \"info\",\n )\n self.log(\n \"Found {} template noise files in {}\".format(\n fs[\"num_template_noise\"], fs[\"template_noise_root\"]\n ),\n \"info\",\n )\n self.log(\"Template files: {}\".format(fs[\"template_files\"]), \"debug\")\n\n fields = [\n \"template_type\",\n \"template_root\",\n \"template_root2\",\n \"template_files\",\n \"template_files2\",\n \"template_noise_root\",\n \"template_noise_root2\",\n \"template_noise_files\",\n \"template_noise_files2\",\n \"num_template\",\n \"num_template_noise\",\n ]\n for k in fields:\n setattr(self, k, fs[k])",
"def updateTemplateStack(self, alignedstack, partlist, iternum):\n\n\t\t#templatestr = os.path.join(self.params['rundir'], \"templates/filt*.mrc\")\n\t\t#oldfilelist = glob.glob(templatestr)\n\n\t\t### clear old stacks\n\t\ttemplatestack = os.path.join(self.params['rundir'], (\"templatestack%02d.spi\" % iternum))\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### calculate correlation stats\n\t\tstatlist = []\n\t\tfor partdict in partlist:\n\t\t\tstatlist.append(partdict['score'])\n\t\tstatlist.sort()\n\t\tcutoff = statlist[int(0.1*len(partlist))]*0.999\n\t\tapDisplay.printMsg(\"using a 10% correlation cutoff of: \"+str(round(cutoff)))\n\n\t\t### init list of files\n\t\tkeeplists = []\n\t\tfor templatenum in range(1, self.params['numtemplate']+1):\n\t\t\tf = open((\"templates/keeplist%02d-%02d.list\" % (iternum, templatenum)), \"w\")\n\t\t\tkeeplists.append(f)\n\t\tjunk = open((\"templates/rejectlist%02d.list\" % (iternum)), \"w\")\n\n\t\t### allocate particles to keep lists\n\t\tnumjunk = 0\n\t\tfor partdict in partlist:\n\t\t\t#EMAN lists start at zero\n\t\t\tif partdict['score'] > cutoff:\n\t\t\t\tkeeplists[partdict['template']-1].write(str(partdict['num']-1)+\"\\n\")\n\t\t\telse:\n\t\t\t\tnumjunk+=1\n\t\t\t\tjunk.write(str(partdict['num']-1)+\"\\n\")\n\t\tfor f in keeplists:\n\t\t\tf.close()\n\t\tjunk.close()\n\n\t\t### average junk for fun\n\t\tapDisplay.printMsg(str(numjunk)+\" particles were marked as junk\")\n\t\tif numjunk == 0:\n\t\t\tjunk = open((\"templates/rejectlist%02d.list\" % (iternum)), \"w\")\n\t\t\trandpart = random.random()*(len(partlist)-1)\n\t\t\tjunk.write(str(randpart)+\"\\n\")\n\t\t\tjunk.close()\n\t\tjunklist = \"templates/rejectlist%02d.list\" % (iternum)\n\t\tjunkmrcfile = \"templates/junkavg%02d.mrc\" % (iternum)\n\t\temancmd = (\"proc2d \"+alignedstack+\" \"+junkmrcfile\n\t\t\t+\" list=\"+junklist\n\t\t\t+\" edgenorm average norm=0,1 \")\n\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\t### create averaged templates\n\t\tfilelist = []\n\t\tfor templatenum in range(1, self.params['numtemplate']+1):\n\t\t\tkeeplist = \"templates/keeplist%02d-%02d.list\" % (iternum, templatenum)\n\t\t\tmrcfile = \"templates/templateavg%02d-%02d.mrc\" % (iternum, templatenum)\n\t\t\tif os.path.isfile(keeplist) and os.stat(keeplist)[6] > 1:\n\t\t\t\temancmd = (\"proc2d \"+alignedstack+\" \"+mrcfile\n\t\t\t\t\t+\" list=\"+keeplist\n\t\t\t\t\t+\" edgenorm average norm=0,1 \")\n\t\t\t\tif self.params['csym'] is not None:\n\t\t\t\t\temancmd += \"sym=c%d\"%(self.params['csym'])\n\t\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\t\t\telse:\n\t\t\t\tapDisplay.printWarning(\"No particles aligned to template \"+str(templatenum))\n\t\t\t\tif numjunk == 0:\n\t\t\t\t\tapDisplay.printWarning(\"Using random particle as new template\")\n\t\t\t\telse:\n\t\t\t\t\tapDisplay.printWarning(\"Using worst 10% of particles as new template\")\n\t\t\t\tlasttemplate = \"templates/templateavg%02d-%02d.mrc\" % (iternum-1, templatenum)\n\t\t\t\tif not os.path.isfile(lasttemplate):\n\t\t\t\t\tlasttemplate = \"templates/scaledTemplate%d.mrc\" % (templatenum)\n\t\t\t\tlastdata = imagenorm.maxNormalizeImage(imagefile.mrcToArray(lasttemplate))\n\t\t\t\tjunkdata = imagenorm.maxNormalizeImage(imagefile.mrcToArray(junkmrcfile))\n\t\t\t\timagefile.arrayToMrc((lastdata+3.0*junkdata), \"temp.mrc\")\n\t\t\t\temancmd = (\"proc2d temp.mrc \"+mrcfile\n\t\t\t\t\t+\" addnoise=1.5 \"\n\t\t\t\t\t+\" edgenorm norm=0,1 \")\n\t\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\t\t\tfilelist.append(mrcfile)\n\n\t\t### create new template stack\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d \"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(self.boxsize)+\",\"+str(self.boxsize)\n\t\t\t\t+\" edgenorm norm=0,1 spiderswap \")\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def compute_templates(filename, TDUR, filt, ratios, dt, ncor, window, \\\n winlength, nattempts, waittime, method='RMS'):\n # To transform latitude and longitude into kilometers\n a = 6378.136\n e = 0.006694470\n lat0 = 41.0\n lon0 = -123.0\n dx = (pi / 180.0) * a * cos(lat0 * pi / 180.0) / sqrt(1.0 - e * e * \\\n sin(lat0 * pi / 180.0) * sin(lat0 * pi / 180.0))\n dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * sin(lat0 * \\\n pi / 180.0) * sin(lat0 * pi / 180.0)) ** 1.5)\n\n # Get the names of the stations which have a waveform for this LFE family\n file = open('../data/Plourde_2015/detections/' + filename + \\\n '_detect5_cull.txt')\n first_line = file.readline().strip()\n staNames = first_line.split()\n file.close()\n\n # Get the time of LFE detections\n LFEtime = np.loadtxt('../data/Plourde_2015/detections/' + filename + \\\n '_detect5_cull.txt', \\\n dtype={'names': ('unknown', 'day', 'hour', 'second', 'threshold'), \\\n 'formats': (np.float, '|S6', np.int, np.float, np.float)}, \\\n skiprows=2)\n\n # Get the network, channels, and location of the stations\n staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \\\n sep=r'\\s{1,}', header=None)\n staloc.columns = ['station', 'network', 'channels', 'location', \\\n 'server', 'latitude', 'longitude']\n\n # Get the location of the source of the LFE\n LFEloc = np.loadtxt('../data/Plourde_2015/templates_list.txt', \\\n dtype={'names': ('name', 'family', 'lat', 'lon', 'depth', 'eH', \\\n 'eZ', 'nb'), \\\n 'formats': ('S13', 'S3', np.float, np.float, np.float, \\\n np.float, np.float, np.int)}, \\\n skiprows=1)\n for ie in range(0, len(LFEloc)):\n if (filename == LFEloc[ie][0].decode('utf-8')):\n lats = LFEloc[ie][2]\n lons = LFEloc[ie][3]\n xs = dx * (lons - lon0)\n ys = dy * (lats - lat0)\n\n # Create directory to store the waveforms\n namedir = 'templates/' + filename\n if not os.path.exists(namedir):\n os.makedirs(namedir)\n\n # Read origin time and station slowness files\n origintime = pickle.load(open('timearrival/origintime.pkl', 'rb'))\n slowness = pickle.load(open('timearrival/slowness.pkl', 'rb'))\n\n # File to write error messages\n errorfile = 'error/' + filename + '.txt'\n\n # Loop over stations\n for station in staNames:\n # Create streams\n EW = Stream()\n NS = Stream()\n UD = Stream()\n # Get station metadata for downloading\n for ir in range(0, len(staloc)):\n if (station == staloc['station'][ir]):\n network = staloc['network'][ir]\n channels = staloc['channels'][ir]\n location = staloc['location'][ir]\n server = staloc['server'][ir]\n # Compute source-receiver distance\n latitude = staloc['latitude'][ir]\n longitude = staloc['longitude'][ir]\n xr = dx * (longitude - lon0)\n yr = dy * (latitude - lat0)\n distance = sqrt((xr - xs) ** 2.0 + (yr - ys) ** 2.0)\n # Loop on LFEs\n for i in range(0, np.shape(LFEtime)[0]):\n YMD = LFEtime[i][1]\n myYear = 2000 + int(YMD[0 : 2])\n myMonth = int(YMD[2 : 4])\n myDay = int(YMD[4 : 6])\n myHour = LFEtime[i][2] - 1\n myMinute = int(LFEtime[i][3] / 60.0)\n mySecond = int(LFEtime[i][3] - 60.0 * myMinute)\n myMicrosecond = int(1000000.0 * \\\n (LFEtime[i][3] - 60.0 * myMinute - mySecond))\n Tori = UTCDateTime(year=myYear, month=myMonth, day=myDay, \\\n hour=myHour, minute=myMinute, second=mySecond, \\\n microsecond=myMicrosecond)\n Tstart = Tori - TDUR\n Tend = Tori + 60.0 + TDUR\n # First case: we can get the data from IRIS\n if (server == 'IRIS'):\n (D, orientation) = get_from_IRIS(station, network, channels, \\\n location, Tstart, Tend, filt, dt, nattempts, waittime, \\\n errorfile)\n # Second case: we get the data from NCEDC\n elif (server == 'NCEDC'):\n (D, orientation) = get_from_NCEDC(station, network, channels, \\\n location, Tstart, Tend, filt, dt, nattempts, waittime, \\\n errorfile)\n else:\n raise ValueError( \\\n 'You can only download data from IRIS and NCEDC')\n if (type(D) == obspy.core.stream.Stream):\n # Add to stream\n if (channels == 'EH1,EH2,EHZ'):\n EW.append(D.select(channel='EH1').slice(Tori, \\\n Tori + 60.0)[0])\n NS.append(D.select(channel='EH2').slice(Tori, \\\n Tori + 60.0)[0])\n UD.append(D.select(channel='EHZ').slice(Tori, \\\n Tori + 60.0)[0])\n else:\n EW.append(D.select(component='E').slice(Tori, \\\n Tori + 60.0)[0])\n NS.append(D.select(component='N').slice(Tori, \\\n Tori + 60.0)[0])\n UD.append(D.select(component='Z').slice(Tori, \\\n Tori + 60.0)[0])\n else:\n print('Failed at downloading data')\n # Stack\n if (len(EW) > 0 and len(NS) > 0 and len(UD) > 0):\n # Stack waveforms\n EWstack = linstack([EW], normalize=True, method=method) \n NSstack = linstack([NS], normalize=True, method=method)\n UDstack = linstack([UD], normalize=True, method=method)\n # Initializations\n maxCC = np.zeros(len(EW))\n cc0EW = np.zeros(len(EW))\n cc0NS = np.zeros(len(EW))\n cc0UD = np.zeros(len(EW))\n if (window == True):\n # Get time arrival\n arrivaltime = origintime[filename] + \\\n slowness[station] * distance\n Tmin = arrivaltime - winlength / 2.0\n Tmax = arrivaltime + winlength / 2.0\n if Tmin < 0.0:\n Tmin = 0.0\n if Tmax > EWstack[0].stats.delta * (EWstack[0].stats.npts - 1):\n Tmax = EWstack[0].stats.delta * (EWstack[0].stats.npts - 1)\n ibegin = int(Tmin / EWstack[0].stats.delta)\n iend = int(Tmax / EWstack[0].stats.delta) + 1\n # Cross correlation\n for i in range(0, len(EW)):\n ccEW = correlate(EWstack[0].data[ibegin : iend], \\\n EW[i].data[ibegin : iend], ncor)\n ccNS = correlate(NSstack[0].data[ibegin : iend], \\\n NS[i].data[ibegin : iend], ncor)\n ccUD = correlate(UDstack[0].data[ibegin : iend], \\\n UD[i].data[ibegin : iend], ncor)\n maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)\n cc0EW[i] = ccEW[ncor]\n cc0NS[i] = ccNS[ncor]\n cc0UD[i] = ccUD[ncor]\n else:\n # Cross correlation\n for i in range(0, len(EW)):\n ccEW = correlate(EWstack[0].data, EW[i].data, ncor)\n ccNS = correlate(NSstack[0].data, NS[i].data, ncor)\n ccUD = correlate(UDstack[0].data, UD[i].data, ncor)\n maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)\n cc0EW[i] = ccEW[ncor]\n cc0NS[i] = ccNS[ncor]\n cc0UD[i] = ccUD[ncor]\n # Sort cross correlations\n index = np.flip(np.argsort(maxCC), axis=0)\n EWbest = Stream()\n NSbest = Stream()\n UDbest = Stream()\n # Compute stack of best LFEs\n for j in range(0, len(ratios)):\n nLFE = int(ratios[j] * len(EW) / 100.0)\n EWselect = Stream()\n NSselect = Stream()\n UDselect = Stream()\n for i in range(0, nLFE):\n EWselect.append(EW[index[i]])\n NSselect.append(NS[index[i]])\n UDselect.append(UD[index[i]])\n # Stack best LFEs\n EWbest.append(linstack([EWselect], normalize=True, \\\n method=method)[0])\n NSbest.append(linstack([NSselect], normalize=True, \\\n method=method)[0])\n UDbest.append(linstack([UDselect], normalize=True, \\\n method=method)[0])\n # Plot figure\n plt.figure(1, figsize=(20, 15))\n params = {'xtick.labelsize':16,\n 'ytick.labelsize':16}\n pylab.rcParams.update(params) \n colors = cm.rainbow(np.linspace(0, 1, len(ratios)))\n # East - West component\n ax1 = plt.subplot(311)\n dt = EWstack[0].stats.delta\n nt = EWstack[0].stats.npts\n t = dt * np.arange(0, nt)\n for j in range(0, len(ratios)):\n if (method == 'RMS'):\n norm = EWbest[j].data / np.sqrt(np.mean(np.square( \\\n EWbest[j].data)))\n elif (method == 'MAD'):\n norm = EWbest[j].data / np.median(np.abs(EWbest[j].data - \\\n np.median(EWbest[j].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, color = colors[j], \\\n label = str(int(ratios[j])) + '%')\n if (method == 'RMS'):\n norm = EWstack[0].data / np.sqrt(np.mean(np.square( \\\n EWstack[0].data)))\n elif (method == 'MAD'):\n norm = EWstack[0].data / np.median(np.abs(EWstack[0].data - \\\n np.median(EWstack[0].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, 'k', label='All')\n if (window == True):\n plt.axvline(Tmin, linewidth=2, color='grey')\n plt.axvline(Tmax, linewidth=2, color='grey')\n plt.xlim([np.min(t), np.max(t)])\n plt.title('East - West component', fontsize=24)\n plt.xlabel('Time (s)', fontsize=24)\n plt.legend(loc=1)\n # North - South component\n ax2 = plt.subplot(312)\n dt = NSstack[0].stats.delta\n nt = NSstack[0].stats.npts\n t = dt * np.arange(0, nt)\n for j in range(0, len(ratios)):\n if (method == 'RMS'):\n norm = NSbest[j].data / np.sqrt(np.mean(np.square( \\\n NSbest[j].data)))\n elif (method == 'MAD'):\n norm = NSbest[j].data / np.median(np.abs(NSbest[j].data - \\\n np.median(NSbest[j].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, color = colors[j], \\\n label = str(int(ratios[j])) + '%')\n if (method == 'RMS'):\n norm = NSstack[0].data / np.sqrt(np.mean(np.square( \\\n NSstack[0].data)))\n elif (method == 'MAD'):\n norm = NSstack[0].data / np.median(np.abs(NSstack[0].data - \\\n np.median(NSstack[0].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, 'k', label='All')\n if (window == True):\n plt.axvline(Tmin, linewidth=2, color='grey')\n plt.axvline(Tmax, linewidth=2, color='grey')\n plt.xlim([np.min(t), np.max(t)])\n plt.title('North - South component', fontsize=24)\n plt.xlabel('Time (s)', fontsize=24)\n plt.legend(loc=1)\n # Vertical component\n ax3 = plt.subplot(313)\n dt = UDstack[0].stats.delta\n nt = UDstack[0].stats.npts\n t = dt * np.arange(0, nt)\n for j in range(0, len(ratios)):\n if (method == 'RMS'):\n norm = UDbest[j].data / np.sqrt(np.mean(np.square( \\\n UDbest[j].data)))\n elif (method == 'MAD'):\n norm = UDbest[j].data / np.median(np.abs(UDbest[j].data - \\\n np.median(UDbest[j].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, color = colors[j], \\\n label = str(int(ratios[j])) + '%')\n if (method == 'RMS'):\n norm = UDstack[0].data / np.sqrt(np.mean(np.square( \\\n UDstack[0].data)))\n elif (method == 'MAD'):\n norm = UDstack[0].data / np.median(np.abs(UDstack[0].data - \\\n np.median(UDstack[0].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, 'k', label='All')\n if (window == True):\n plt.axvline(Tmin, linewidth=2, color='grey')\n plt.axvline(Tmax, linewidth=2, color='grey')\n plt.xlim([np.min(t), np.max(t)])\n plt.title('Vertical component', fontsize=24)\n plt.xlabel('Time (s)', fontsize=24)\n plt.legend(loc=1)\n # End figure\n plt.suptitle(station, fontsize=24)\n plt.savefig(namedir + '/' + station + '.eps', format='eps')\n ax1.clear()\n ax2.clear()\n ax3.clear()\n plt.close(1)\n # Save stacks into files\n savename = namedir + '/' + station +'.pkl'\n pickle.dump([EWstack[0], NSstack[0], UDstack[0]], \\\n open(savename, 'wb'))\n for j in range(0, len(ratios)):\n savename = namedir + '/' + station + '_' + \\\n str(int(ratios[j])) + '.pkl'\n pickle.dump([EWbest[j], NSbest[j], UDbest[j]], \\\n open(savename, 'wb'))\n # Save cross correlations into files\n savename = namedir + '/' + station + '_cc.pkl'\n pickle.dump([cc0EW, cc0NS, cc0UD], \\\n open(savename, 'wb'))",
"def prepare_CvD16_for_M2L_calc(templates_lam_range, verbose=False):\n import glob\n import os\n template_glob=os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*.s100')\n\n vcj_models=sorted(glob.glob(template_glob))\n temp_lamdas, x35, x3, x23, kroupa, flat=np.genfromtxt(vcj_models[-1], unpack=True)\n\n n_ages=7\n n_zs=5\n n_imfs=5\n\n \n\n\n Zs=['m1.5', 'm1.0', 'm0.5', 'p0.0', 'p0.2']\n ages=[1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.5]\n model_imfs_order=['x35', 'x3', 'x23', 'kroupa', 'flat']\n\n t_mask = ((temp_lamdas > templates_lam_range[0]) & (temp_lamdas <templates_lam_range[1]))\n\n\n\n y=x35[t_mask]\n x=temp_lamdas[t_mask]\n\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x)\n\n templates=np.empty((len(out), n_ages, n_zs, n_imfs))\n\n\n\n for a, Z in enumerate(Zs): \n for b, age in enumerate(ages):\n model=glob.glob(os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*{}*{}.ssp.s100'.format(Z, age)))[0]\n if verbose:\n print 'Loading {}'.format(model)\n data=np.genfromtxt(model)\n\n for c, counter in enumerate(reversed(range(1, data.shape[-1]))):\n \n #Interpolate templates onto a uniform wavelength grid and then log-rebin\n y=data[:, counter][t_mask] \n x=temp_lamdas[t_mask]\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x) \n\n templates[:, b, a, c]=out\n\n return templates, new_x",
"def plume_location_multiple_realizations(t_target_array, folder, prefix, n_realization, attrib='x_array', start=0):\n #first stich together all the arrays\n #read in he first one\n\n file_name = prefix + '_' + str(start) + \".pkl\"\n file_address = os.path.join(folder, file_name)\n with open(file_address, 'rb') as input:\n dataHolder = pickle.load(input)\n x_big = getattr(dataHolder, attrib)\n t_big = dataHolder.t_array\n print 'making large array from realizations...'\n for i in range(start+1, start+n_realization):\n file_name = prefix + \"_\" + str(i) + \".pkl\"\n file_address = os.path.join(folder, file_name)\n with open(file_address, 'rb') as input:\n dataHolder = pickle.load(input)\n x_mat = getattr(dataHolder, attrib)\n t_mat = dataHolder.t_array\n x_big = np.vstack((x_big, x_mat))\n t_big = np.vstack((t_big, t_mat))\n print 'done'\n n_given_times = len(t_target_array)\n n_particles = t_big.shape[0]\n out_put_array = np.zeros((n_given_times, n_particles))\n for i in range(n_given_times):\n t = t_target_array[i]\n out_put_array[i, :] = plume_location_at_given_time(t, x_big, t_big)\n return out_put_array",
"def match_templates(image, templates, overlap=0.15):\n default_threshold = 80\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rectangles = np.empty([0, 2, 2], dtype=int)\n for template in templates:\n threshold = template.get('threshold', default_threshold)\n if threshold > 100:\n threshold = 100\n elif threshold < 0:\n threshold = 0\n threshold /= 100.0\n template_image = template.get('image')\n template_flip = template.get('flip')\n template_mask = template.get('mask')\n template_method = template.get('method', 'canny') # defaults to canny\n gray_template = cv2.cvtColor(template_image, cv2.COLOR_BGR2GRAY)\n transformations = [lambda im: im]\n if template_flip:\n if template_flip[0] in ('h', 'a'):\n transformations.append(lambda im: cv2.flip(im, 1))\n if template_flip[0] in ('v', 'a'):\n transformations.append(lambda im: cv2.flip(im, 0))\n if template_flip[0] in ('b', 'a'):\n transformations.append(lambda im: cv2.flip(cv2.flip(im, 1), 0))\n for transformation in transformations:\n transformed_template = transformation(gray_template)\n height, width = transformed_template.shape\n if template_mask is not None:\n transformed_mask = transformation(template_mask)\n else:\n transformed_mask = None\n results = match_template_mask(gray_image, transformed_template,\n transformed_mask, template_method)\n index = results >= threshold\n y1, x1 = np.where(index)\n y2, x2 = y1 + height, x1 + width\n coords = np.array([x1, y1, x2, y2], dtype=int).T\n probs = results[index]\n boxes = np.array(\n object_detection.non_max_suppression(coords, probs, overlap)\n )\n xyboxes = boxes.reshape(boxes.shape[0], 2, 2) # list of x,y points\n rectangles = np.vstack([rectangles, xyboxes])\n return rectangles.astype(int)",
"def prepare_templates(params, outfile, redo=False):\n if os.path.exists(outfile) and not redo:\n return\n emiles = EMILES()\n wmin = params[\"wmin\"] * u.micrometer\n wmax = params[\"wmax\"] * u.micrometer\n # Modify wmin to compensate for the recession velocity of the system\n zmax = (params[\"vsyst\"] + 3000) / const.c.to(\"km/s\").value\n wrest = wmin / (1 + zmax)\n grid = np.array(np.meshgrid(params[\"ages\"], params[\"metals\"],\n params[\"bis\"])).T.reshape(-1, 3)\n ssppars = Table(grid, names=[\"T\", \"Z\", \"imf\"])\n filenames = []\n for args in grid:\n filenames.append(os.path.join(emiles.data_dir,\n emiles.filename(*args)))\n wave, spec = misc.read_spec(filenames[0])\n wave = wave * u.angstrom\n idx = np.where((wave > wrest) & (wave <= wmax))\n wave = wave[idx]\n spec = spec[idx]\n wrange = [wave[0].to(\"angstrom\").value, wave[-1].to(\"angstrom\").value]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps = np.zeros((len(filenames), len(newflux)))\n print(\"Processing SSP files\")\n for i, fname in tqdm(enumerate(filenames)):\n spec = fits.getdata(fname)[idx]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps[i] = newflux\n norm = np.median(ssps)\n ssps /= norm\n hdu1 = fits.PrimaryHDU(ssps)\n hdu1.header[\"EXTNAME\"] = \"SSPS\"\n hdu1.header[\"BSCALE\"] = (norm, \"Scale to convert from ADU to flux.\")\n hdu2 = fits.BinTableHDU(ssppars)\n hdu2.header[\"EXTNAME\"] = \"PARAMS\"\n hdu1.header[\"CRVAL1\"] = logLam[0]\n hdu1.header[\"CD1_1\"] = logLam[1] - logLam[0]\n hdu1.header[\"CRPIX1\"] = 1.\n # Making wavelength array\n hdu3 = fits.BinTableHDU(Table([logLam], names=[\"loglam\"]))\n hdu3.header[\"EXTNAME\"] = \"LOGLAM\"\n hdulist = fits.HDUList([hdu1, hdu2, hdu3])\n hdulist.writeto(outfile, overwrite=True)\n return",
"def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template",
"def array_templates(templates, max_R=5000):\n from grizli.utils_c.interp import interp_conserve_c\n \n wave = np.unique(np.hstack([templates[t].wave for t in templates]))\n clipsum, iter = 1, 0\n while (clipsum > 0) & (iter < 10):\n clip = np.gradient(wave)/wave < 1/max_R\n idx = np.arange(len(wave))[clip]\n wave[idx[::2]] = np.nan\n wave = wave[np.isfinite(wave)]\n iter += 1\n clipsum = clip.sum()\n #print(iter, clipsum)\n \n NTEMP = len(templates)\n flux_arr = np.zeros((NTEMP, len(wave)))\n \n for i, t in enumerate(templates):\n flux_arr[i,:] = interp_conserve_c(wave, templates[t].wave,\n templates[t].flux)\n \n is_line = np.array([t.startswith('line ') for t in templates])\n \n return wave, flux_arr, is_line",
"def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list",
"def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_):\n\n # thickness of rings for template match\n #commented out because this is passed now\n #rw = 8 #default 2 from DeepMoon project, we use 8 or 4\n\n # threshold target\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = [] # coordinates extracted from template matching\n corr = [] # correlation coefficient for coordinates set\n for r in radii:\n # template\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n\n # template match - result is nxn array of probabilities\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n\n # store x,y,r\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n\n # remove duplicates from template matching at neighboring radii/locations\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n\n dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n # replace current coord with max match probability coord in\n # duplicate list\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n\n return coords",
"def search_data(templates, pols, matched_pols=False, reverse_nesting=False, flatten=False):\n # type check\n if isinstance(templates, str):\n templates = [templates]\n if isinstance(pols, (str, int)):\n pols = [pols]\n # search for datafiles\n datafiles = []\n datapols = []\n for pol in pols:\n dps = []\n dfs = []\n for template in templates:\n _dfs = glob.glob(template.format(pol=pol))\n if len(_dfs) > 0:\n dfs.extend(_dfs)\n dps.extend([pol for df in _dfs])\n if len(dfs) > 0:\n datafiles.append(sorted(dfs))\n datapols.append(dps)\n # get unique files\n allfiles = [item for sublist in datafiles for item in sublist]\n allpols = [item for sublist in datapols for item in sublist]\n unique_files = set()\n for _file in allfiles:\n for pol in pols:\n if f\".{pol}.\" in _file:\n unique_files.update({_file.replace(f\".{pol}.\", \".{pol}.\")})\n break\n unique_files = sorted(unique_files)\n # check for unique files with all pols\n if matched_pols:\n Npols = len(pols)\n _templates = []\n for _file in unique_files:\n goodfile = True\n for pol in pols:\n if _file.format(pol=pol) not in allfiles:\n goodfile = False\n if goodfile:\n _templates.append(_file)\n\n # achieve goal by calling search_data with new _templates that are polarization matched\n datafiles, datapols = search_data(_templates, pols, matched_pols=False, reverse_nesting=False)\n # reverse nesting if desired\n if reverse_nesting:\n datafiles = []\n datapols = []\n for _file in unique_files:\n dfs = []\n dps = []\n for pol in pols:\n df = _file.format(pol=pol)\n if df in allfiles:\n dfs.append(df)\n dps.append(pol)\n datafiles.append(dfs)\n datapols.append(dps)\n # flatten\n if flatten:\n datafiles = [item for sublist in datafiles for item in sublist]\n datapols = [item for sublist in datapols for item in sublist]\n\n return datafiles, datapols",
"def applyPhotoZ (self,arr):\n print \"Applying Template SED PZs\"\n\n ztrue = arr['z']\n\n #select a template\n templates = ['El_B2004a.sed']+['Sbc_B2004a.sed','Scd_B2004a.sed']\n templates = templates +['Im_B2004a.sed','SB3_B2004a.sed','SB2_B2004a.sed','ssp_25Myr_z008.sed','ssp_5Myr_z008.sed']\n\n #read in f_mod files, interpolate, get values of f_mod_b\n ngals = len(ztrue)\n\n f_mod_o = np.zeros((self.nb, ngals))\n for z in range(ngals):\n #currently templates are randomly chosen but probably should be an input with true z\n templateno = np.random.choice(range(self.nt))\n for b in range(self.nb):\n spl = InterpolatedUnivariateSpline(self.z_grid, self.f_mod[:,templateno,b])\n f_mod_o[b][z] = spl(ztrue[z])\n\n #select sigma_b - 10% for now\n sigma = 0.1*f_mod_o\n #select observed fluxes f_obs_b = f_mod_b + sigma_b*rando\n f_obs = f_mod_o+ sigma * (np.random.normal(0.,1.,self.nb*ngals).reshape((self.nb,ngals)))\n # I don't seem to be able to find a more efficient way\n arrx=np.zeros(ngals,dtype=[('pz_f_obs',float,(self.nb,)),('pz_flux_sigma',float,(self.nb,))])\n arrx['pz_f_obs']=f_obs.T\n arrx['pz_flux_sigma']=sigma.T\n arr = recfunctions.merge_arrays((arr,arrx),flatten=True,usemask=False)\n return arr",
"def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n # print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # TODO - make the default configurable.\n# if src.crs == None:\n# src.crs = CRS.from_epsg(4326)\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n # print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs",
"def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)",
"def ts_method(signal, peaks, template_duration: float = 0.12, fs: int = processing.FS, window: int = 10, **kwargs):\n\n t_dur = round(template_duration * fs)\n if not t_dur % 2 == 0:\n t_dur += 1\n dims = signal.shape\n # if np.max(np.abs(signal[0, :])) < np.max(np.abs(signal[1, :])):\n # r_peaks = find_qrs(signal[1, :], peak_search=peak_search)\n # r_peaks = peak_enhance(signal[1, :], peaks=r_peaks, window=0.2)\n # else:\n # processing.scatter_beautiful(r_peaks * 1000 / fs, title='peaks')\n extracted_signal = np.copy(signal)\n # print(len(r_peaks))\n # Please, rework it...\n for n in range(dims[0]):\n for i in range(0, len(peaks), window):\n\n if i + window > len(peaks):\n r_peaks = peaks[i:]\n else:\n r_peaks = peaks[i:i + window]\n\n template = np.full((len(r_peaks), t_dur), np.nan)\n for num, r_ind in enumerate(r_peaks):\n if r_ind < t_dur // 2:\n template[num, t_dur // 2 - r_ind - 1:] = extracted_signal[n, 0:r_ind + t_dur // 2 + 1]\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n template[num, 0:dims[1] - r_ind + t_dur // 2] = extracted_signal[n, r_ind - t_dur // 2:]\n else:\n template[num] = extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2]\n template_mean = np.nanmean(template, axis=0) # None for edge cases\n for r_ind in r_peaks:\n if r_ind < t_dur // 2:\n extracted_signal[n, 0:r_ind + t_dur // 2 + 1] -= template_mean[t_dur // 2 - r_ind - 1:]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel start ' + str(n))\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2 + 1] -= template_mean[\n 0:dims[1] - r_ind + t_dur // 2]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel end ' + str(n))\n else:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2] -= template_mean\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel ' + str(n))\n return extracted_signal",
"def no_icp(source, templates, home=None, use_2d=False):\n if source.shape[0] == 0:\n return create_config(0,0,0, fitness=0)\n origorig = source.copy()\n source, xinit, yinit, theta = init_transform(source)\n\n remove_out = False\n if remove_out:\n iq_range=0.5\n pcnt = (1 - iq_range) / 2\n xqlow, xqhigh = np.quantile(source[:,0], [pcnt, 1-pcnt])\n yqlow, yqhigh = np.quantile(source[:,1], [pcnt, 1-pcnt])\n xiqr, yiqr = xqhigh-xqlow, yqhigh-yqlow\n no_out = (source[:,0]<xqhigh+1.5*xiqr) & (source[:,0]>xqlow-1.5*xiqr) &\\\n (source[:,1]<yqhigh+1.5*yiqr) & (source[:,1]>yqlow-1.5*yiqr)\n source = source[no_out,:]\n\n remove_low = False\n if remove_low:\n not_low = (source[:,-1]>0.15) & (source[:,-1]<1.4)\n source = source[not_low,:]\n\n if source.shape[0] == 0:\n return create_config(0,0,0,fitness=0)\n\n\n # Recenter the data independently from point density\n xmin, xmax = np.min(source[:,0]), np.max(source[:,0])\n ymin, ymax = np.min(source[:,1]), np.max(source[:,1])\n x_center, y_center = xmin+(xmax-xmin)/2, ymin+(ymax-ymin)/2\n center2 = rotate(-theta, np.array([[x_center, y_center, 0]]), axis=\"z\")\n xinit += center2[0][0]\n yinit += center2[0][1]\n source = source - [x_center, y_center, 0]\n\n angle_diffs = [-5.,-2.,-1.,0.,1.,2.,5.]\n center_diffs = [-0.2,-0.15,-0.10,-0.07,-0.05,-0.01,0,0.01,0.05,0.07,0.10,0.15,0.2]\n lengths = [2.,2.1]\n widths = [1.]\n trials = list(product(center_diffs, center_diffs, angle_diffs, lengths, widths))\n\n trialmap = np.array([np.array([xinit, yinit, np.degrees(theta)-90, 0.0, 0.0])+np.array(t) for t in trials])\n results = multi_grid_iou(origorig[:, :2], trialmap, 0.05)\n best = np.argmax(results)\n cfg = create_config(*trialmap[best])\n cfg[\"bed\"][\"fitness\"] = np.max(results)\n return cfg",
"def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs",
"def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)",
"def get_array_grid(self):\n print('Making array grid')\n grid_list = []\n or_list = [0, 0, 0]\n far_list = [0, 0, 0]\n\n for root, subdirs, files in os.walk(self.stem):\n for filename in files:\n if self.probe in filename and self.prot_name in filename and 'ccp4' in filename:\n if ('frequency' not in filename) and ('ranges' not in filename):\n grid_list.append(join(self.stem, filename))\n g = Grid.from_file(join(self.stem, filename))\n _or_list = [g.bounding_box[0][j] for j in range(3)]\n _far_list = [g.bounding_box[1][m] for m in range(3)]\n\n for i in range(3):\n or_list[i] = min(or_list[i], _or_list[i])\n far_list[i] = max(far_list[i], _far_list[i])\n\n self.grid_list = grid_list\n self.spacing = g.spacing\n self.tup_max_length = len(grid_list)\n self.array_grid_origin = (or_list[0], or_list[1], or_list[2])\n self.array_grid_far_corner = (far_list[0], far_list[1], far_list[2])",
"def extract_template(temp_dir, fea_type):\n kps = []\n descriptors = np.array([])\n in_path = temp_dir + 'imgs/' # images\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n print(img.shape)\n kp, des = get_des(fea_type, img)\n if descriptors.size == 0:\n kps = kp\n descriptors = des\n else:\n kps.extend(kp)\n descriptors = np.vstack((descriptors, des))\n\n print(\"template descriptors shape: \" + str(descriptors.shape))\n with open(temp_dir + fea_type + '_template_0.pickle', 'wb') as ff:\n pickle.dump(descriptors, ff)\n\n # with open(temp_dir + fea_type + '_template_0.pickle', 'rb') as f:\n # template = pickle.load(f)\n\n return",
"def processTemplates(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n ## Now fetch all the template paths from shotgun\r\n getTemplatePaths = tk.paths_from_template(templateFile, {'Step' : 'Light', 'id' : id, 'Shot' : shotNum})\r\n debug(app = self, method = 'processTemplates', message = 'getTemplatePaths: %s' % getTemplatePaths, verbose = False)\r\n \r\n ## Now look for each assets template path: \r\n xmlFile = max(getTemplatePaths) \r\n debug(app = self, method = 'processTemplates', message = 'Max Version xmlFile.... %s' % xmlFile, verbose = False)\r\n \r\n ## Now if versions has stuff in it..\r\n if not xmlFile:\r\n debug(app = self, method = 'processTemplates', message = 'Can not find any xml files for %s' % shotNum, verbose = False)\r\n pass\r\n else:\r\n \r\n debug(app = self, method = 'processTemplates', message = 'PathTo: %s' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n if os.path.isfile(xmlFile.replace(os.path.sep, \"/\")):## is this a valid xml file!?\r\n inprogressBar.updateProgress(percent = 10, doingWhat = 'createAll shaders...')\r\n self._createAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n \r\n inprogressBar.updateProgress(percent = 30, doingWhat = 'connectAll shaders...')\r\n self._connectAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n else:\r\n debug(app = self, method = 'processTemplates', message = 'FAILED Can not find a valid published xml file for %s ...' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n pass",
"def processTemplate(padPath,dateStart,dateStop,sensor,abbr='spg',whichAx='s',pm='+',tag='untitled',Nfft=None,No=None):\n\n showArgs(padPath,dateStart,dateStop,sensor,abbr,whichAx,pm,tag,Nfft,No)\n\n # get list of pad header files that cover span of interest\n padFiles,sampleRate,dataColumns = getPadHeaderFiles(padPath,dateStart,dateStop,sensor)\n if not(padFiles): return # no files?\n\n # get samples to skip and actualStart from first PAD file\n startOffsetSamples,actualStart = startOffset(padFiles[0],sampleRate,dateStart,dataColumns)\n\n # get header template to lead the way from first PAD file (and dataFile)\n headerTemplate,dataFile = pareHeader(padFiles[0])\n strFs = headerTemplate['SampleRate']\n\n # if Nfft or No not defined, then get defaults\n if not Nfft or not No:\n Nfft,No = cmi.getNfftNo(float(strFs))\n \n print 'B ' + dataFile # FIRST PAD FILE TO WORK ON\n #octaveCalcSpec(dataFile,startOffsetSamples,'inf',abbr,whichAx,pm,tag,strFs,Nfft,No)\n\n # work pad files list for loop & last processing below\n h1=padFiles[0] \n del(padFiles[0])\n if not(padFiles): return # only one file done\n lastFile = padFiles[-1]\n del(padFiles[-1])\n \n # now do all but last file\n padFiles.reverse()\n while padFiles:\n\theaderFile = padFiles.pop()\n\tthisHeader,dataFile = pareHeader(headerFile)\n\tif thisHeader == headerTemplate:\n\t print 'M ' + dataFile # ONE OF TWEEN PAD FILES TO WORK ON\n #octaveCalcSpec(dataFile,0,'inf',abbr,whichAx,pm,tag,strFs,Nfft,No)\n\telse:\n\t print 'X ' + dataFile # DOES NOT MATCH HEADER TEMPLATE\n\n # determine samples to skip in last pad file\n thisHeader,dataFile = pareHeader(lastFile)\n if thisHeader == headerTemplate:\n stopNumRecords,actualStop = endNum(lastFile,sampleRate,dateStop)\n print 'E ' + dataFile # LAST OF PAD FILES TO WORK ON\n #octaveCalcSpec(dataFile,0,stopNumRecords,abbr,whichAx,pm,tag,strFs,Nfft,No)\n else:\n print 'X ' + dataFile # DOES NOT MATCH HEADER TEMPLATE"
]
| [
"0.6328705",
"0.53906786",
"0.5349962",
"0.5301935",
"0.5298006",
"0.52869534",
"0.5275999",
"0.5120418",
"0.5108772",
"0.50137204",
"0.4991059",
"0.4970462",
"0.49680394",
"0.49202514",
"0.49110833",
"0.48883888",
"0.4870274",
"0.48590174",
"0.48584995",
"0.48383528",
"0.48140445",
"0.4791925",
"0.47820556",
"0.47628656",
"0.47610855",
"0.4755531",
"0.47414035",
"0.47115067",
"0.47048184",
"0.46867168"
]
| 0.839063 | 0 |
Plot the spike positions as a scatter plot on a probe marked with brain regions | def view_spike_positions(spike_positions, brain_regions, probe_dimensions, labels_offset=80, font_size=20):
fig = plt.figure()
ax = fig.add_axes([0.08, 0.05, 0.9, 0.9])
ax.scatter(spike_positions[:, 0], spike_positions[:, 1], s=5)
ax.set_xlim(0, probe_dimensions[0])
ax.set_ylim(0, probe_dimensions[1])
ax.yaxis.set_ticks(np.arange(0, probe_dimensions[1], 100))
ax.tick_params(axis='y', direction='in', length=5, width=1, colors='b')
for region in brain_regions:
ax.text(2, brain_regions[region] - labels_offset, region, fontsize=font_size)
ax.plot([0, probe_dimensions[0]], [brain_regions[region], brain_regions[region]], 'k--', linewidth=2)
return fig, ax | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_variant_positions(strains):\n if strains.lower() == 'all':\n strains = None\n strains = get_required_strains(strains)\n gd_data = []\n with database.make_connection() as connection:\n for strain in strains:\n hits = r.table(TABLE).filter(lambda row: row['StrainID'].match(\n strain)).pluck('Position', 'Class').run(connection)\n feat = []\n for hit in hits:\n cur = hit['Position']\n feat.append(misc.create_feature(cur, cur, hit['Class'], strand=None))\n gd_data.append(feat)\n imaging.plot_SNPs(gd_data, strains)",
"def plot_se_pan_positions(self):\n plt.figure(figsize=(10,10))\n plt.plot(self.pan['RAJ2000'],self.pan['DEJ2000'],'bo',markersize=10,mfc='None',label='PanSTARRS')\n \n plt.plot(self.secat['ALPHA_J2000'],self.secat['DELTA_J2000'],'r.',label='SE')\n plt.legend()\n plt.gca().invert_xaxis()\n #print(f\"number of matched sources = {np.sum(zp.matchflag)}\")\n\n # add circle\n\n circle1 = plt.Circle((self.centerRA, self.centerDEC), self.radius, color='c',alpha=.2)\n plt.gca().add_patch(circle1)\n\n plt.savefig('plots/'+self.plotprefix.replace('.fits','')+'se-pan-positions.png')",
"def plotFeatures(self):\n fl=np.array(self.xp)*0.0+0.25*self.farr.max()\n self.splines=self.axes.plot(self.xp, fl , ls='', marker='|', ms=20, color='#00FF00')\n #set up the text position\n tsize=0.83\n self.ymin, self.ymax = self.axes.get_ylim()\n ppp=(self.ymax-self.ymin)/(self.arcfigure.figure.get_figheight()*self.arcfigure.figure.get_dpi())\n f=self.ymax-10*tsize*ppp\n for x,w in zip(self.xp, self.wp):\n w='%6.2f' % float(w)\n self.axes.text(x, f, w, size='small', rotation='vertical', color='#00FF00')",
"def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot",
"def plot_stereomatic(name, database):\n fig, ax = plt.subplots()\n fig.set_size_inches(16,12)\n x = [ xx * 0.0005 for xx in range(-10000,10000)]\n y = []\n for xx in x:\n y.append(stereomatic_descriptor(name, xx, database))\n\n ax.scatter(x, y, label='%s'%name, color='k', s=10)\n plt.xlim((0,4))\n plt.ylim((0,5))\n x_new_ticks = np.linspace(0,4,21)\n y_new_ticks = np.linspace(0,5,11)\n plt.xticks(x_new_ticks, fontsize=10)\n plt.yticks(y_new_ticks, fontsize=10)\n plt.xlabel('x', fontsize=10)\n plt.ylabel('y', fontsize=10)\n plt.title('stereometic Function', fontsize=10, y=1.05)\n plt.legend(loc='best', fontsize=10)\n # plt.show()\n plt.savefig('%s.png'%name)\n plt.close(fig)",
"def plot_dots(\n self, neuron, std_thresh=2, pos_color=\"k\", transient_color=\"r\", ax=None\n ):\n # Define threshold.\n thresh = np.mean(self.data[\"neural\"][neuron]) + std_thresh * np.std(\n self.data[\"neural\"][neuron]\n )\n supra_thresh = self.data[\"neural\"][neuron] > thresh\n\n # Plot.\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.scatter(self.data[\"x\"], self.data[\"y\"], s=3, c=pos_color)\n ax.scatter(\n self.data[\"x\"][supra_thresh],\n self.data[\"y\"][supra_thresh],\n s=3,\n c=transient_color,\n )",
"def plot(sigma, strikes, dips):\n values, vectors = principal(sigma)\n sigma1, sigma2, sigma3 = vectors\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='stereonet')\n plt.hold(True)\n ax.density_contourf(strikes, dips)\n #ax.pole(strikes, dips, 'b.')\n ax.line(sigma1[0],sigma1[1], 'r^', label='sigma1', markersize=18)\n ax.line(sigma2[0],sigma2[1], 'g^', label='sigma2', markersize=18)\n ax.line(sigma3[0],sigma3[1], 'b^', label='sigma3', markersize=18)",
"def plot_scatter_points(self):\n self.plot(1)",
"def scatter_plot(self, speed=0.001):\n FPARTX, FPARTY, FPARTST = self.get_particles_props('x', 'y', 'state')\n plt.clf()\n plt.scatter(FPARTX, FPARTY, FPARTST*5 + 0.01, c=FPARTST, cmap=\"jet\")\n plt.clim(0, 1)\n plt.colorbar()\n plt.pause(speed)",
"def plot(self):\n #prepare the marker list\n marker = itertools.cycle((',', '+', '.', 'o', '*',\n '^', 'v', '<', '>', '8',\n 's', 'p', 'h', 'H', 'D',\n 'd'))\n # first categorised with plane\n for each_plane in self.plane_list:\n if self.is_literal:\n label = \"[\" + \"{0} {1} {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"]\"\n else:\n label = \"{\"+\"{0}, {1}, {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"}\"\n x_list = []\n y_list = []\n if self.is_literal:\n tmp = [each_plane]\n opposite_plane = [-item for item in each_plane]\n tmp.append(opposite_plane)\n else:\n tmp = PoleFigure.get_permutations(each_plane)\n # second categorised with grain ID\n my_marker = \".\" # default marker\n for i in range(len(self.__data)):\n each_euler = self.__data[i]\n if self.unique_marker:\n my_marker = marker.next()\n plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble\n euler = EulerAngle(each_euler[0], each_euler[1], each_euler[2])\n rot_m = np.dot(self.__ref, euler.rotation_matrix)\n self.__data[i] = RotationMatrix(rot_m).euler_angle\n for each_pole in tmp:\n tmp_pole = np.array(each_pole) / self.lattice_vector\n tmp_pole /= np.linalg.norm(tmp_pole)\n coord = np.dot(rot_m, tmp_pole)\n if coord[2] < 0:\n continue # not pointing up, moving on\n else:\n x = coord[0] / (1.0 + float(coord[2]))\n y = coord[1] / (1.0 + float(coord[2]))\n # need to rotate 90 degree\n x_list.append(y)\n y_list.append(-x)\n # start plotting\n if self.__clr_list is not None:\n clr = self.__clr_list.next()\n else:\n clr = np.random.rand(3, 1)\n plt.scatter(x_list, y_list, marker=my_marker, c=clr, label=label, edgecolor='none')\n # label x/y axis\n plt.text(1.1, 0.0, \"y\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n plt.text(0.0, -1.1, \"x\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n # set legend\n plt.legend(loc='upper left', numpoints=1, ncol=6, fontsize=8, bbox_to_anchor=(0, 0))\n plt.title(self.title)\n plt.savefig(self.title + \".\" + self.output)\n plt.close()",
"def plot(self):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=Axes3D.name)\n\n # TODO Use numpy to rotate esp_points matrix for faster variable access.\n ax.scatter(\n xs=[i[0][0] for i in self.esp_points],\n ys=[i[0][1] for i in self.esp_points],\n zs=[i[0][2] for i in self.esp_points],\n c=[i[1] for i in self.esp_points],\n marker='o',\n s=2,\n alpha=0.5\n )\n\n ax.scatter(\n xs=[i[0][0] for i in self.atom_points],\n ys=[i[0][1] for i in self.atom_points],\n zs=[i[0][2] for i in self.atom_points],\n c=[i[1] for i in self.atom_points],\n marker='X',\n s=100\n )\n\n plt.show()",
"def main():\n # Initialize the Serpinski set\n print(\"==> Making serpinski set...\")\n my_serpinski = Serpinski(400, 400, 0)\n num = 8\n print(\"==> Generating\", num, \"levels of subsets :)\")\n for _ in range(9):\n my_serpinski.add_subset()\n # Draw Serpinski\n # print(\"==> Drawing the set. This might take quite some time!\\\n # Damn Inefficient!\")\n # my_serpinski.draw_me()\n\n # Initialize Coordinates\n length = 50000 # Number of random dots\n x_coord = []\n y_coord = []\n index = 0\n\n # try length particles in serp set\n print(\"==> Randomly choosing\", length, \"dots...\")\n while index < length:\n # Chech if dot in bound\n rand_y = np.random.uniform(low=400.0 - 200.0 * np.sqrt(3) / 2.0,\n high=400.0)\n # rand_x in triangle // condition //\n diff = 400.0 - rand_y\n x_diff = diff / np.sqrt(3)\n rand_x = np.random.uniform(low=400.0 - x_diff,\n high=400 + x_diff)\n\n if my_serpinski.is_bound(rand_x, rand_y):\n x_coord.append(rand_x)\n y_coord.append(rand_y)\n index += 1\n\n # Draw image using scatter\n print(\"Scattering the dots ;)\")\n plt.scatter(x_coord, y_coord, s=0.1)\n # Show image\n dpi = 600\n print(\"==> Saving to .jpg with dpi=\", dpi)\n plt.savefig(\"fractalstuff.jpg\", dpi=dpi, bbox_inches='tight')",
"def coords_plot(self):\n self.load_coords()\n x = []\n y = []\n px = [] \n for item in self.coords:\n if item[1] >52.10 and item[1] <52.4 and item[2]>20.8 and item [2] <21.4:\n x.append(item[1])\n y.append(item[2])\n px.append(item[3])\n plt.scatter(x,y,c=px,s=150,alpha=0.3)\n plt.show()",
"def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()",
"def find_chart():\r\n ###############################################\r\n # Read values of S/N\r\n sn = np.loadtxt(outtable, usecols=(14,))\r\n xs, ys = np.loadtxt(outtable, usecols=(1, 2)).T\r\n specs = np.loadtxt(outtable, usecols=(0,), dtype=str)\r\n ###############################################\r\n # Find good (and bad) regions according to S/N\r\n good = np.where(((~np.isnan(sn)) & (sn >= sn_cut)))[0]\r\n bad = np.where((sn < sn_cut))[0]\r\n ###############################################\r\n # Filter arrays for S/N\r\n sn = sn[good]\r\n xs = xs[good]\r\n ys = ys[good]\r\n specs = specs[good].tolist()\r\n specs = [x.replace(\".fits\", \"\")[1:] for x in specs]\r\n ###############################################\r\n # Set limits for the plot\r\n norm = Normalize(0, 1)\r\n ###############################################\r\n # Set colormap\r\n # cmap = brewer2mpl.get_map('YlGnBu', 'sequential', 5).mpl_colormap\r\n # Produces a collection of polygons with colors according to S/N values\r\n coll = PolyCollection(polygons_bins[good], array=np.ones_like(sn),\r\n cmap=\"gray\", edgecolors='0.5', norm=norm)\r\n ###############################################\r\n # Initiate figure and axis for matplotlib\r\n fig = plt.figure(figsize=(6.25, 6))\r\n gs = gridspec.GridSpec(1, 1)\r\n gs.update(left=0.08, right=0.985, bottom=0.08, top=0.985, hspace=0.05,\r\n wspace=0.06)\r\n ax = plt.subplot(gs[0])\r\n ###############################################\r\n # Draw the polygons\r\n draw_map(fig, ax, coll)\r\n ###############################################\r\n # Add contours according to V-band image\r\n draw_contours(\"vband\", fig, ax)\r\n ###############################################\r\n for x, y, spec in zip(xs, ys, specs):\r\n ax.text(x, y, spec, fontsize=10)\r\n # Write labels\r\n xylabels(ax)\r\n ##############################################\r\n # Save the figure\r\n plt.show()\r\n plt.savefig(\"figs/find_chart.pdf\")\r\n return",
"def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)",
"def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")",
"def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()",
"def plot_harris_points(image, filtered_coords):\n plt.figure()\n plt.imshow(image)\n plt.plot([p[1] for p in filtered_coords], [p[0] for p in filtered_coords], 'r*')\n plt.axis('off')\n plt.title('harris points')\n plt.show()",
"def plot_spikes(self, show=False, save_path=None, expand = False):\n spikes = np.array(self.spike_history)\n spike_time, e_idx = np.where(spikes)\n spike_time = spike_time.astype('float32')\n spike_time *= self.global_dt\n spike_time_pair = zip(e_idx,spike_time)\n spike_time_pair.sort()\n spike_time_pair = np.array(spike_time_pair)\n spike_time_pair = list(np.split(spike_time_pair, np.where(np.diff(spike_time_pair[:,0]))[0]+1))\n\n if self.enable_spike_dump:\n n = len(self.all_compartments)\n else:\n n = len(self.electrodes)\n\n s = []\n for i in xrange(n):\n s1 = [t[:,1] for t in spike_time_pair if t[0,0] == i]\n s.append(s1)\n\n fig = plt.figure()\n ax = self.raster(s)\n\n if n < 50 or expand:\n ax.set_yticks(np.arange(1, n + 1))\n if self.enable_spike_dump:\n ax.set_yticklabels(tuple(self.all_compartments))\n else:\n ax.set_yticklabels(tuple(self.electrodes))\n else:\n ax.set_yticklabels([])\n\n ax.set_ylabel('Electrode IDX')\n ax.set_xlabel('Time (msec)')\n ax.set_title('CSTMD Electrode Spikes for ' + str(n) + ' compartments')\n\n if not show and expand:\n if n > 40:\n w,h = fig.get_size_inches()\n h *= n / 40\n fig.set_size_inches(w,h)\n\n if save_path is not None:\n #fig.tight_layout()\n plt.savefig(save_path, bbox_inches='tight')\n print \"Saved Cstmd spike train to \" + save_path\n plt.gcf().clear()\n if show:\n plt.show()",
"def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)",
"def plot(model, center, extent, outname):\n # define model grid\n xg = np.linspace(-extent, extent, model.shape[0])\n yg = xg.copy()\n interp_func = RectBivariateSpline(xg, yg, model)\n\n x = np.array([-2, -1, 0, 1, 2]) + center[0]\n y = np.array([-2, -1, 0, 1, 2]) + center[1]\n psf = interp_func(x, y)\n\n x, y = np.meshgrid(x, y)\n f = pl.figure(figsize=(10, 5))\n\n pl.gray()\n ax1 = pl.subplot(121)\n ax1.imshow(model, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n ax1.plot(x, y, 's', mec='r', mfc='none', mew=2)\n\n pl.xlim(-2.5, 2.5)\n pl.ylim(-2.5, 2.5)\n ax2 = pl.subplot(122)\n ax2.imshow(psf, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n\n ax2.set_xticks([-2, -1, 0, 1, 2])\n ax2.set_yticks([-2, -1, 0, 1, 2])\n ax2.set_xticklabels(['%0.3f' % v for v in x[0]])\n ax2.set_yticklabels(['%0.3f' % v for v in y[:, 0]])\n\n coordsA, coordsB = \"data\", \"data\"\n pixels = np.array([[0.0, 0.0], [2., 2.], [-1., -1.]])\n locs = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, -0.5]])\n rads = [0.15, 0.25, -0.25]\n for i, p in enumerate(pixels):\n xy1 = p + center\n xy2 = p + locs[i]\n con = ConnectionPatch(xyA=xy2, xyB=xy1, coordsA=coordsA,\n coordsB=coordsB, axesA=ax2, axesB=ax1,\n arrowstyle=\"<-, head_length=1.2, head_width=0.8\", \n shrinkB=5,\n connectionstyle='arc3, rad=%s' % rads[i],\n color='r', lw=2)\n ax2.add_artist(con)\n ax2.plot(p[0], p[1], 's', mfc='none', mec='r', mew=2, ms=50)\n\n #pl.xlim(-2.5, 2.5)\n #pl.ylim(-2.5, 2.5)\n f.savefig(outname)",
"def make_spark(pricestack):\n _x = pricestack - np.mean(pricestack)\n fig, _ax = plt.subplots(1, 1, figsize=(10, 3))\n plt.plot(_x, color='k', linewidth=6)\n plt.plot(len(_x) - 1, _x[-1], color='r', marker='o')\n\n for _, i in _ax.spines.items():\n i.set_visible(False)\n _ax.set_xticks = ([])\n _ax.set_yticks = ([])\n _ax.axhline(c='k', linewidth=4, linestyle=(0, (5, 2, 1, 2)))\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=17)\n buf.seek(0)\n imgspk = Image.open(buf)\n\n plt.clf()\n _ax.cla()\n plt.close(fig)\n return imgspk",
"def trajectoire(self):\n trajx = []\n trajy = []\n for i in range(0, len(self.pos)):\n trajx.append(self.pos[i].x)\n trajy.append(self.pos[i].y)\n plt.plot(trajx, trajy) # color=self.color)\n plt.show()",
"def plot(self):\n\t\tself.plotOfSpect()",
"def plot_visco_profiles(pointsh5, skip=slice(None,None,1), xscale=1e3, yscale=1e-2, tscale=3.1536e7, adjustRadial=False, benchmark=[], title=None):\n\tplt.figure()\n\n\tcoords,data,number,times = pu.load_h5_visco(pointsh5)\n\n\t#x = 1e3*np.loadtxt(points,usecols=[0]) # output_points2.txt\n\t#y = np.zeros_like(x)\n\tx = coords[:,0]\n\ty = np.zeros_like(x)\n\n\t# NOTE: plot elastic solution by passing dictionary as showelastic\n\t# Plot analytic elastic solution (t=0)\n\t#print(benchmark)\n\tif len(benchmark)>=1:\n\t\tur = zeros_like(x)\n\t\tuz = np.zeros_like(x)\n\t\tfor b in benchmark:\n\t\t\turi,uzi = m.calc_mogi_dp(x,y,**params)\n\t\t\tur += uri\n\t\t\tuz += uzi\n\t\tplt.plot(x*xscale,uz*yscale,'ko',label='benchmark')\n\n\t# Convert units\n\t#ur = np.hypot(data[:,:,0], data[:,:,1]) #assume progiles are along EW profile\n\tur = data[:,:,0]\n\tuz = data[:,:,2]\n\tx = x / xscale\n\tur = ur / yscale #cm\n\tuz = uz / yscale #cm\n\ttimes = times / tscale\n\t#times = times / 8.64e4 #days\n\t#times = times / 31536000 #years\n\n\t#plots = np.arange(0,times.size,skip)\n\t#print(plots.size)\n\t#way to cycle through markers if plotting many lines\n\t#marker = itertools.cycle(['o','^','s','D']) #plot(marker=marker.next() iterates list)\n\t#way to use gradually changing colors from a colormap\n\t#color = plt.cm.jet(1.0*i/plots.size)\n\tindplots = np.arange(times.size-1)\n\tprint(indplots)\n\tindplots = indplots[skip]\n\tprint(indplots)\n\tfor i in indplots:\n\t\tline, = plt.plot(x, uz[i], color=plt.cm.jet(1.0*i/indplots[-1]), label='{:.1f}'.format(times[i]))\n\t\tplt.plot(x, ur[i], ls='dashed', color=line.get_color())\n\t#print uz[i]\n\t#print uz[i-1]\n\n\tif title:\n\t\tplt.title(title)\n\telse:\n\t\tplt.title(pointsh5)\n\n\tplt.axhline(color='k',linestyle='dashed')\n\tplt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\tplt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.show()\n\tplt.legend(title='{}'.format(get_unit(tscale)))\n\tplt.grid()",
"def add_pseudo_experiments(self, xlabel, ylabel, injkey, fhkey):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n xdata = self.values[injkey][fhkey][xlabel]\n ydata = self.values[injkey][fhkey][ylabel]\n self.make_2d_scatter_plot(\n xdata=xdata['vals'],\n ydata=ydata['vals'],\n plot_cor=False,\n set_range=False\n )",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')",
"def visualize(title, particles):\n\n plt.figure(figsize=(10,10))\n plt.title(\"Best configuration for \" + str(len(particles)) + \" particles\", size=25)\n plt.xlabel(\"xcoordinate\", size=18)\n plt.ylabel(\"ycoordinate\", size=18)\n\n plt.xticks(size=13)\n plt.yticks(size=13)\n\n circle = plt.Circle((0, 0), 1)\n circle.set_edgecolor(\"red\")\n circle.set_facecolor(\"none\")\n fig = plt.gcf()\n ax = fig.gca()\n\n ax.add_artist(circle)\n plt.xlim(-1.1,1.1)\n plt.ylim(-1.1,1.1)\n\n # draw all the particles\n for particle in particles:\n plt.scatter(particle.x, particle.y)\n\n fig.savefig(title)",
"def mri_point_plot(self, vcol=1):\n img = self.voxels\n points = self.point_position \n ax = []\n fig = plt.figure(figsize=(9, 8))\n # TODO make this setable in the function call\n columns = 3\n rows = 2\n\n for i in range(points.shape[0]):\n im_slice = int(np.round(points[i, vcol]))\n if vcol == 0:\n im = img[im_slice, :, :]\n elif vcol == 1:\n im = img[:, im_slice, :]\n else:\n im = img[:, :, im_slice]\n ax.append( fig.add_subplot(rows, columns, i+1))\n ax[-1].set_title(\"Image depth: \"+str(im_slice)) # set title\n plt.imshow(im)\n plot_cols = np.array([0, 1, 2])\n plot_cols = plot_cols[plot_cols != vcol]\n plt.plot(points[i, min(plot_cols)], points[i, max(plot_cols)], 'ro')\n\n plt.show()"
]
| [
"0.6227864",
"0.62214625",
"0.6140126",
"0.6102602",
"0.6080271",
"0.6071637",
"0.602967",
"0.6007783",
"0.5980094",
"0.596806",
"0.5963319",
"0.59417874",
"0.5923047",
"0.5893003",
"0.58578354",
"0.5840446",
"0.5825529",
"0.5800904",
"0.5780873",
"0.5771907",
"0.5767062",
"0.5766869",
"0.5766142",
"0.5760967",
"0.57552826",
"0.5751434",
"0.574853",
"0.5745326",
"0.5731572",
"0.5724125"
]
| 0.70573217 | 0 |
Returns the Collatz function value of value. | def collatz(value):
assert value >= 1
if value % 2 == 0:
return value/2
else:
return 3 * value + 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]",
"def f_c(self, z, m):\n\t return (1. - self.f_o(z))*m/(self.ALPHA_1*self.MMAX + m)",
"def f_c(self, z, m):\n\t return (1. - self.f_o(z))*m/(self.ALPHA_1*self.mmax + m)",
"def zvalue(value, sigma, mu):\n return (value - mu) / sigma",
"def get_z(self):\n return self.coords[2]",
"def Get_CalOutZ_Value(self):\r\n z = self.Get_RawOutZ_Value()\r\n if(z >= self.minZ and z <= self.maxZ):\r\n return 0\r\n else:\r\n return z - self.meanZ",
"def get_value(self, x, y, z):\n\t\treturn self.data[ self.xyz_to_offset(x,y,z) ]",
"def get_value(self, func):\n value = func(self.position)\n if value < self.best_value: # minimisation option\n self.best_value = value\n self.best_position = self.position\n #check if value is in the space limits\n if value > z_end:\n self.value = z_end\n if value < z_begin:\n self.value = z_begin\n else:\n self.value = value",
"def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1",
"def get_coord_val(self, x, y, z):\n if self.is_4d():\n #return self._data[y, x, z, self._time_point]\n return self._data[self._y_shift - y, x, z, self._time_point]\n else:\n #return self._data[y, x, z]\n return self._data[self._y_shift - y, x, z]",
"def get_value(self):\n return complex(*self.points[0, :2])",
"def x_value(self, z):\n return z * self.p + self.n",
"def getScaledZ(self, z_value):\n assert False",
"def rescaleZ(self, z_value):\n return z_value",
"def get_lz(self):\r\n return self.dz * self.nz - self.oz",
"def get_z(self) -> int:\n return self.__z",
"def f(z,c):\n zz = z*z + c\n return zz",
"def getZ(self):\n\t\treturn self.coords.z",
"def z(self):\n return self.coords[2]",
"def __get_z__(self):\n return self.Direction['z']",
"def collatz(n, out=None):\n if out is None:\n out = []\n if n in out:\n return out+[n]\n else:\n out.append(n)\n if n%2 == 0:\n return collatz(n//2, out)\n else:\n return collatz(n*3+1, out)",
"def z2u(self, z):\n return norm.cdf(z)",
"def z(self):\n return self._coords[2]",
"def z ( self ) :\n return self.zvar",
"def FtoC(F):\n c = ((F-32)/9.)*5\n return c",
"def getZMax(self):\n return self.zmax",
"def stump_C(z) :\n\n if z > 0 :\n return (1 - cos(sqrt(z)))/z \n elif z < 0 :\n return (cosh(sqrt(-z)) - 1)/(-z)\n else :\n return 0.5",
"def c_s(self, z):\n R = self.R_bg(z)\n return const.c/np.sqrt(3.*(1.+R))",
"def getZCoord(self, x, y):\n n = self.normal()\n z = (-n.x * (x - self.p0.x) - n.y * (y - self.p0.y) + n.z * self.p0.z) / n.z\n return z",
"def colfct(self, x):\n for i in xrange(self.anz_seg):\n # find interval which contains x\n if self.xmin[i]<=x<=self.xmax[i]:\n # normalize to [0, 1]\n x = (x-self.xmin[i])/(self.xmax[i]-self.xmin[i])\n return self.colmap[i].colfct(x)\n print \"no interval found for x=%e - should not happen\" % x\n return 0.0"
]
| [
"0.6321339",
"0.6254899",
"0.62513334",
"0.6250638",
"0.5922092",
"0.5831883",
"0.5831861",
"0.5827949",
"0.58229166",
"0.58096135",
"0.5748661",
"0.57371926",
"0.57262725",
"0.5722124",
"0.57200646",
"0.57154316",
"0.5712435",
"0.5700306",
"0.5682831",
"0.5676214",
"0.566184",
"0.5642473",
"0.5620809",
"0.56127316",
"0.5608953",
"0.5600565",
"0.55899525",
"0.5548606",
"0.5537523",
"0.5535582"
]
| 0.7737109 | 0 |
Returns the collatz length associated with val, building the collatz_len_dict as it goes. | def collatz_length(val):
assert val >= 1
# Seed the dictionary with collatz_length(1) = 1.
if val == 1:
collatz_len_dict[1] = 1
return collatz_len_dict[1]
# Return the collatz length if it exists in the dictionary.
if val in collatz_len_dict:
return collatz_len_dict[val]
# Make a recursive call to collatz_length() using mapped_val to find this
# val's length.
mapped_val = collatz(val)
collatz_len_dict[val] = 1 + collatz_length(mapped_val)
return collatz_len_dict[val] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\r\n \r\n TOP_VAL = 1000000\r\n for i in xrange(1, TOP_VAL):\r\n _ = collatz_length(i) # Seed each collatz length.\r\n \r\n # Find the key with largest value in the collatz length dictionary.\r\n value, collatz_len = max(collatz_len_dict.iteritems(),\r\n key=lambda x:x[1])\r\n \r\n print 'Value %d has max collatz length: %d' % (value, collatz_len)",
"def initalise_column_lengths(coldata):\n for key, _ in coldata.items():\n coldata[key]['collen'] = len(coldata[key]['head'])\n return coldata",
"def i2len(self, pkt, val):\n fld_len = self._fld.i2len(pkt, val)\n return fld_len + self.padlen(fld_len)",
"def length(self):\n return self.get_delta_value(self.Z_INDEX)",
"def _genLenScale(self):\n # TODO: this function is an interface to specify non-stationary length scale field\n #pdb.set_trace()\n lenXField = self.lenXYZ[0, 0] * np.ones(self.nCell_kl)\n lenYField = self.lenXYZ[0, 1] * np.ones(self.nCell_kl)\n lenZField = self.lenXYZ[0, 2] * np.ones(self.nCell_kl)\n\n return lenXField, lenYField, lenZField",
"def get_length(val):\n if isinstance(val, str):\n return len(val)\n if isinstance(val, int):\n return len('%8s' % val)\n if isinstance(val, float):\n return len('%15.4f' % val)\n if isinstance(val, bool):\n return 5",
"def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def calc_col_len(self): # Calculates length of each column\n print('\\nColumn Lengths\\n' +\n '--------------')\n results.append('\\nColumn Lengths\\n' +\n '--------------')\n for x in range(0, self.tot_col):\n blank = 0\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x] == '':\n blank += 1\n column_count = self.tot_rows - blank\n\n results.append('Column \\'' + self.file_list[0][x] + '\\' length: ' + str(column_count))\n print('Column \\'' + self.file_list[0][x] + '\\' length: ' + str(column_count))",
"def str_len():\n strlen_dict = {}\n # Length of ion name\n strlen_dict['ion'] = 6\n # Length of data file name for line source\n strlen_dict['Source'] = 30\n # Return\n return strlen_dict",
"def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens",
"def __len__(self):\r\n try:\r\n return self._len\r\n except:\r\n self._len = len(self._columns.keys())\r\n return self._len",
"def collatz(value):\r\n assert value >= 1\r\n if value % 2 == 0:\r\n return value/2\r\n else:\r\n return 3 * value + 1",
"def length(self) -> ir.FloatingValue:\n return ops.GeoLength(self).to_expr()",
"def collatz(start):\n counter = 1\n n = start\n while n != 2:\n if n % 2 == 0:\n n /= 2\n else:\n n = (n * 3) + 1\n counter += 1\n\n counter += 1\n return counter",
"def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():",
"def _get_level_width_coord(cube, zg_cube, n_jobs=1):\n try:\n altitude_coord = cube.coord('altitude')\n except iris.exceptions.CoordinateNotFoundError:\n level_widths = _get_level_widths(cube, zg_cube, n_jobs=n_jobs)\n else:\n logger.info(\"Calculating level widths from 'altitude' coordinate\")\n if altitude_coord.bounds is None:\n raise ValueError(\n f\"Height coordinate 'altitude' of cube \"\n f\"{cube.summary(shorten=True)} does not have bounds\")\n level_widths = np.abs(altitude_coord.bounds[..., 1] -\n altitude_coord.bounds[..., 0])\n if level_widths.shape != cube.shape:\n level_widths = np.expand_dims(level_widths, 0)\n level_widths = np.broadcast_to(level_widths, cube.shape)\n\n # Create coordinate\n aux_coord = iris.coords.AuxCoord(level_widths,\n var_name='level_width',\n long_name='Width of vertical layer',\n units='m')\n return aux_coord",
"def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )",
"def length(self) -> ir.IntegerValue:\n return ops.MapLength(self).to_expr()",
"def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def _get_run_length_ac(self):\n self._run_length_ac = []\n for block in self.data:\n self._run_length_ac.extend(\n encode_run_length(tuple(iter_zig_zag(block))[1:])\n )",
"def lengths(self):\n\t\toutput_file = csv.writer(open('contigLengths.csv', 'wb'))\n\t\tfor i,x in enumerate(self.contigsInfo.keys()):\n\t\t\tseq = self.contigsInfo[x]\n\t\t\tl = len(seq)\n\t\t\toutput_file.writerow([i,l])",
"def strlen(self, tuple_data, val):\r\n return len(val)",
"def strlen(self, tuple_data, val):\r\n return len(val)",
"def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength",
"def guess_key_length(self, min_len=1, max_len=9, display=False):\n\n res = {}\n max_ic = 0\n probable_key_length = 0\n # We try different key lengths\n for i in range(min_len, max_len+1):\n\n if self._len < i*2:\n continue\n ics = []\n for j in range(i):\n var = []\n for k in range(self._len//i):\n var.append(self._s[k*i + j])\n text = VigenereLikeCipher(''.join(var))\n ics.append(text.get_ic())\n total_ic = round(sum(ics)/len(ics),4)\n if total_ic > max_ic:\n max_ic = total_ic\n probable_key_length = i\n res[i] = total_ic\n if display:\n print \"\\n[+] Visual key length IC correspondance\"\n for k,v in res.items():\n v = int(round(v*1000,0))\n print str(k) + (int(math.floor(math.log10(len(res))))+1-len(str(k)))*\" \",\n print ''.join(['|' for i in range(v//2)])\n print \"\"\n return probable_key_length",
"def len_literal(self):\n if hasattr(self, '_m_len_literal'):\n return self._m_len_literal if hasattr(self, '_m_len_literal') else None\n\n self._m_len_literal = (self.len_literal_div2 * 2)\n return self._m_len_literal if hasattr(self, '_m_len_literal') else None",
"def _CalculateColumn(self, record):\n if not record:\n return 0\n if isinstance(record, Table):\n add_width = len(record.separator)\n if record.skip_empty:\n if not any(v for _, v in record):\n return 0\n ret = max(len(k) for k, v in record if v) + add_width\n ret = max(ret, 2 + max(self._CalculateColumn(v) for _, v in record))\n return min(ret, self.MAX_MAP_WIDTH)\n elif isinstance(record, Lines):\n return max(self._CalculateColumn(l) for l in record)\n else:\n return 0",
"def vector_length(self, x: float, y: float, z: float) -> float:\n A = 2.0 * (x * y * self.aga + x * z * self.bbe + y * z * self.cal)\n return sqrt(x ** 2 * self.asq + y ** 2 * self.bsq + z ** 2 * self.csq + A)",
"def collatz_sequence_len(n: int) -> int:\n result = 1\n while n != 1:\n if n % 2 == 0:\n n //= 2\n else:\n n = 3 * n + 1\n result += 1\n return result",
"def test_nids_super_res_width():\n f = Level3File(get_test_data('nids/KLZK_H0W_20200812_1305'))\n width = f.map_data(f.sym_block[0][0]['data'])\n assert np.nanmax(width) == 15"
]
| [
"0.63886285",
"0.5896573",
"0.5646013",
"0.56028664",
"0.5522218",
"0.5476281",
"0.54210943",
"0.5352304",
"0.53408134",
"0.52664757",
"0.52570415",
"0.52268237",
"0.5211673",
"0.52104354",
"0.52061903",
"0.5202915",
"0.5193688",
"0.5139088",
"0.5105716",
"0.5094535",
"0.50801796",
"0.50638694",
"0.50638694",
"0.5056354",
"0.5046544",
"0.5040279",
"0.50402516",
"0.5039738",
"0.50349236",
"0.5002971"
]
| 0.8624163 | 0 |
Finds the number with the longest Collatz sequence under 1 million. | def main():
TOP_VAL = 1000000
for i in xrange(1, TOP_VAL):
_ = collatz_length(i) # Seed each collatz length.
# Find the key with largest value in the collatz length dictionary.
value, collatz_len = max(collatz_len_dict.iteritems(),
key=lambda x:x[1])
print 'Value %d has max collatz length: %d' % (value, collatz_len) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def longest_sequence_seed(ubound):\n max_seq_seed = 1\n max_seq_len = 1\n for seed in range(1, ubound):\n seq_len = sum(1 for t in collatz_sequence(seed))\n if seq_len > max_seq_len:\n max_seq_len = seq_len\n max_seq_seed = seed\n return max_seq_seed, max_seq_len",
"def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def no_math_solution(n: int):\n lookup = {1: 1}\n # Calculate the chain's length of all Collatz sequences started below n\n for i in range(2, n):\n cal_chain_length(i, lookup)\n # Find the longest chain\n longestChain = 1\n for i in range(2, n):\n if (lookup[i] > lookup[longestChain]):\n longestChain = i\n\n return longestChain",
"def find_longest_plateau(seq):\n\n start_longest_so_far = 0\n length_longest_so_far = 0\n i = 0\n\n # INVARIANT\n # The longest plateau in seq[0:i] starts at position\n # start_longest_so_far and has a length of\n # length_longest_so_far\n # VARIANT: len(seq) - i\n #\n while len(seq) - i > length_longest_so_far:\n\n length_current_plateau = length_plateau_at(seq, i)\n\n if length_current_plateau > length_longest_so_far:\n start_longest_so_far = i\n length_longest_so_far = length_current_plateau\n\n i += length_current_plateau\n\n return start_longest_so_far",
"def longest_seq(n):\n max_seq = 0\n for i in range(SEQ_LENGTH):\n max_seq = max(max_seq, longest_seq_of_1s(n, i))\n\n return max_seq",
"def find_longest(input):\r\n for thing in input:\r\n print thing\r\n dist_array = [[0 for x in range(rows)] for x in range(cols)] # rows and cols are static variables in main method\r\n for x in xrange(0, len(input), 1):\r\n for y in xrange(0, len(input[x]), 1):\r\n dist_array[x][y] = calculate_longest(dist_array, input, x, y)\r\n for item in dist_array:\r\n print item\r\n return max(max(dist_array))",
"def get_largest_cc(binary):\n cc, n_cc = measure.label(binary)\n max_n = -1\n max_area = 0\n for n in range(1, n_cc + 1):\n area = np.sum(cc == n)\n if area > max_area:\n max_area = area\n max_n = n\n largest_cc = (cc == max_n)\n return largest_cc",
"def longest_seq_of_1s(n, index_to_ignore):\n max_ = 0\n counter = 0\n for i in range(SEQ_LENGTH):\n if i == index_to_ignore or get_bit(n, i):\n counter += 1\n max_ = max(counter, max_)\n else:\n counter = 0\n return max_",
"def collatz_sequence_len(n: int) -> int:\n result = 1\n while n != 1:\n if n % 2 == 0:\n n //= 2\n else:\n n = 3 * n + 1\n result += 1\n return result",
"def probl4():\n\n largest_palindrome = 0\n for i in xrange(101, 1000):\n for j in xrange(101, 1000):\n output = i * j\n if str(output) == str(output)[::-1] and \\\n output > largest_palindrome:\n largest_palindrome = output\n return largest_palindrome",
"def main():\n greatest = 0\n for i in range(1000, 100, -1):\n for j in range(i, 100, -1):\n palindrome = str(j*i)\n if ((palindrome == palindrome[::-1]) and (j*i) > greatest):\n greatest = j*i\n\t\t\t\n return greatest",
"def get_length_of_longest_sub_array(l):\n if len(l) < 1:\n return 0\n\n longest_seen_sequence = 0\n\n this_sequence_length = 1\n\n previous = l[0]\n\n for _, current in enumerate(l):\n\n if current > previous:\n this_sequence_length = this_sequence_length + 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n else:\n this_sequence_length = 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n previous = current\n\n return longest_seen_sequence",
"def find_largest_5_digit_number(digits):\r\n return max(int(digits[i:i + 5]) for i, v in enumerate(digits))",
"def euler39():\n\tcount = [0] * 1001\n\n\tfor a in range(1, 333):\n\t\tfor b in range(a+1, 500):\n\t\t\tc = (a**2 + b**2) ** 0.5\n\t\t\tp = a + b + int(c)\n\t\t\t\n\t\t\tif int(c) != c: continue\n\t\t\tif p > 1000: break\n\t\t\t\n\t\t\tcount[p] += 1\n\t\t\t\n\treturn count.index(max(count))",
"def longest_ORF(dna):\n\n # YOUR IMPLEMENTATION HERE",
"def calcMaxIDX(fls, noct):\n freq_l = fls[-1] / (2.0 ** (1 / (2.0 * noct)))\n max_idx = np.array(abs(fls - freq_l)).argmin()\n return max_idx",
"def longest_increasing_suffix(n):\n m, suffix, k = 10, 0, 1\n while n:\n n, last = n // 10, n % 10\n if remainder // 10 < last:\n m, suffix, k = _____________, last, 10 * k\n else:\n return suffix\n return suffix",
"def max_index_in_focal_zone(z, zone):\n _max = -1e32\n imax = None\n for i, zz in enumerate(z):\n if zone[i] == 1:\n if _max < zz:\n _max = zz\n imax = i\n return imax",
"def collatz_length(val):\r\n assert val >= 1\r\n \r\n # Seed the dictionary with collatz_length(1) = 1.\r\n if val == 1:\r\n collatz_len_dict[1] = 1\r\n return collatz_len_dict[1]\r\n \r\n # Return the collatz length if it exists in the dictionary.\r\n if val in collatz_len_dict:\r\n return collatz_len_dict[val]\r\n \r\n # Make a recursive call to collatz_length() using mapped_val to find this\r\n # val's length.\r\n mapped_val = collatz(val)\r\n collatz_len_dict[val] = 1 + collatz_length(mapped_val)\r\n return collatz_len_dict[val]",
"def four():\r\n \r\n i = 999\r\n j = i\r\n largest = 0\r\n \r\n while i > 0:\r\n while j > 0:\r\n number = str(i * j)\r\n forward = str(number)\r\n reverse = \"\"\r\n for char in number:\r\n reverse = char + reverse\r\n if forward == reverse:\r\n if largest < i * j:\r\n largest = i * j\r\n break\r\n else:\r\n j = j - 1\r\n i = i - 1\r\n j = i\r\n return largest",
"def max_known_number(self):\n return len(self.number_list)-1",
"def longestAwesome(self, s: str) -> int:\n\n # So we are moving right, and reducing length by 1\n # for every time we move right - we start from the longest substring that can be formed to lowest one\n # So the moment, we find something we can instantly breal\n\n max_length = 0\n\n if s == s[::-1]:\n return len(s)\n\n for i in range(0, len(s)):\n left = i\n right = len(s)\n\n if right - left > max_length:\n\n while right > left:\n\n candidate = s[left:right]\n # print(f\"The candidate is: {candidate}\")\n ctr = Counter(candidate)\n\n # initial base check\n odd_cnt = 0\n fl = False\n for k, v in ctr.items():\n if v & 1:\n odd_cnt += 1\n if odd_cnt > 1:\n fl = True\n break\n\n if not fl:\n if max_length < (right - left):\n max_length = right - left\n # max_length = max(max_length, len(candidate))\n\n right -= 1\n\n return max_length",
"def solution(N):\n # write your code in Python 3.6\n bin_number = str(bin(N))[2:]\n new_bin_gap = False\n longest_bin_gap = 0\n bin_gap_counter = 0\n for char in bin_number:\n if char == '1':\n if bin_gap_counter > longest_bin_gap:\n longest_bin_gap = bin_gap_counter\n new_bin_gap = True\n bin_gap_counter = 0\n elif new_bin_gap:\n bin_gap_counter += 1\n return longest_bin_gap",
"def longest_ORF_noncoding(dna, num_trials):\n longest=[]\n for i in range(0,num_trials):\n \tshuffled_str=shuffle_string(dna)\n \tlongest.append(longest_ORF(shuffled_str))\n long_ORF=max(longest,key=len)\n return len(long_ORF)",
"def lengthOfLongestSubstring(s):\n arr = [1] * len(s)\n i = 0\n j = 1\n while j < len(s):\n if s[j] not in s[i:j]:\n arr[i] += 1\n j = j + 1\n else:\n i = i + 1\n j = i + 1\n return max(arr)",
"def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))",
"def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])",
"def _getLongestLength(self, listOfLists):\n\t\tmax = -1\n\t\tfor list in listOfLists:\n\t\t\tif len(list) > max:\n\t\t\t\tmax = len(list)\n\t\treturn max",
"def find_max_tidy_num(s_number):\n\n len_input = len(s_number) - 1\n\n if len_input == 0:\n return s_number\n\n for i in range(0, len_input):\n if int(s_number[i]) > int(s_number[i+1]):\n\n final_str = '9' * (len_input - i)\n s_number = s_number[:(i+1)]\n\n return ''.join([find_max_tidy_num(str(int(s_number)-1)), final_str])\n\n return s_number"
]
| [
"0.75266415",
"0.7198738",
"0.7196087",
"0.717631",
"0.64048016",
"0.63742423",
"0.63255733",
"0.63223904",
"0.6298933",
"0.6236006",
"0.61663425",
"0.60797447",
"0.60653627",
"0.60575485",
"0.60220593",
"0.60206336",
"0.60177255",
"0.6012676",
"0.60087687",
"0.5935851",
"0.5919431",
"0.59016895",
"0.5885273",
"0.5878571",
"0.5870549",
"0.5868833",
"0.58676755",
"0.58665514",
"0.58604574",
"0.58597034"
]
| 0.7428995 | 1 |
convert a matplotlib colormap into a PIL palette | def cmap_to_pil_palette(cmap):
# return (255.*np.array(
# map(lambda x: cmap(x)[0:3], np.linspace(0., 1.,256)))
# .ravel()).astype('int')
return (255. * np.array(
[cmap(x)[:3] for x in np.linspace(0,1,256)]).ravel().astype('int')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def colormap(self):\n palette = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),\n (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232),\n (250, 170, 160), (230, 150, 140), (70, 70, 70),\n (102, 102, 156), (190, 153, 153), (180, 165, 180),\n (150, 100, 100), (150, 120, 90), (153, 153, 153),\n (153, 153, 153), (250, 170, 30), (220, 220, 0),\n (107, 142, 35), (152, 251, 152), (70, 130, 180),\n (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100),\n (0, 0, 230), (119, 11, 32), (0, 0, 142)]\n\n num_colors = self[0][1].shape[-1]\n colormap = np.zeros((num_colors, 3), dtype=int)\n for i in range(num_colors):\n colormap[i, ...] = palette[self._update_labels_dict[i]]\n return colormap",
"def get_mpl_colormap(self):\n return mpl.colors.ListedColormap(self.get_colors().astype(float) / 255.0)",
"def palette_from_mpl_name(name):\n if name in CMAPS:\n return CMAPS[name]\n\n rgba = plt.get_cmap(name)(np.linspace(0, 1, 256))\n palette = [to_hex(color) for color in rgba]\n return palette",
"def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap",
"def test_colormap_discrete():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['r', 'g', 'b'],\n interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_rgb.png\")",
"def plot_cmap(cmap):\n gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n pylab.imshow(gradient, aspect='auto', cmap=cmap)\n pylab.show()",
"def cmap(self):\n return self._palette",
"def cmap(self):\n return self.pixels.get_cmap()",
"def getColorMap(colors):\n # Normalise RGBs\n norm_colors = []\n for color in colors:\n norm_colors.append([val / 255. for val in color])\n # create color map\n cmap = cols.ListedColormap(norm_colors)\n\n return cmap",
"def imagetopalette(image, palcolors):\n assert image.mode == 'L', \"Only grayscale images supported !\"\n pal = [(palcolors[i],palcolors[i+1]) for i in range(len(palcolors)-1)]\n image.putdata([colortopalette(c,pal) for c in list(image.getdata())])",
"def create_pascal_label_colormap():\r\n colormap = np.zeros((256, 3), dtype = int)\r\n ind = np.arange(256, dtype=int)\r\n\r\n for shift in reversed(range(8)):\r\n for channel in range(3):\r\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\r\n ind >>= 3\r\n\r\n return colormap",
"def create_colormap(seg_map):\n\tcolormap = np.zeros((256, 3), dtype=int)\n\tind = np.arange(256, dtype=int)\n\tfor shift in reversed(range(8)):\n\t\tfor channel in range(3):\n\t\t\tcolormap[:, channel] |= ((ind >> channel) & 1) << shift \n\t\tind >>= 3\n\treturn colormap[seg_map]",
"def get_colormap(self):\n return colors.colormaps[self.name]",
"def get_colormap(self):\n return colors.colormaps[self.name]",
"def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap",
"def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap",
"def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arrange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>=3\n\n return colormap",
"def get_colormap(self):\n return file_io.load_viscm_colormap(self.path)",
"def get_colormap(self):\n return file_io.load_viscm_colormap(self.path)",
"def get_bokeh_palette(cmap):\n from bokeh.colors import RGB\n from matplotlib import cm\n\n # Solution adapted from\n # https://stackoverflow.com/questions/31883097/elegant-way-to-match-a-string-to-a-random-color-matplotlib\n m_RGB = (255 * plt.get_cmap(cmap)(range(256))).astype(\"int\")\n return [RGB(*tuple(rgb)).to_hex() for rgb in m_RGB]",
"def label_to_color_image(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label.')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('Label value too large.')\n\n return colormap[label]",
"def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap",
"def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap",
"def labelP_to_color_image(label):\r\n if label.ndim != 2:\r\n raise ValueError('Expect 2-D input label')\r\n\r\n colormap = create_pascal_label_colormap()\r\n\r\n if np.max(label) >= len(colormap):\r\n raise ValueError('label value too large.')\r\n\r\n return colormap[label]",
"def label_to_color_image(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]",
"def label_to_color_image(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]",
"def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors",
"def label_to_color_image(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]",
"def label_to_color_image(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]",
"def make_colormap(colormap = 'rainbow_r', bins = 256, add_alpha = True, invert_alpha = False, cmap_name = 'costum',\n discrete = False, return_cmap = False):\n \n if isinstance(colormap, str): # if input is string (so existent colormap)\n\n # get colormap\n cmap = cm.get_cmap(colormap)\n\n else: # is list of strings\n cvals = np.arange(len(colormap))\n norm = plt.Normalize(min(cvals),max(cvals))\n tuples = list(zip(map(norm,cvals), colormap))\n cmap = colors.LinearSegmentedColormap.from_list(\"\", tuples)\n \n if discrete == True: # if we want a discrete colormap from list\n cmap = colors.ListedColormap(colormap)\n bins = int(len(colormap))\n\n # convert into array\n cmap_array = cmap(range(bins))\n\n # reshape array for map\n new_map = []\n for i in range(cmap_array.shape[-1]):\n new_map.append(np.tile(cmap_array[...,i],(bins,1)))\n\n new_map = np.moveaxis(np.array(new_map), 0, -1)\n \n if add_alpha: \n # make alpha array\n if invert_alpha == True: # in case we want to invert alpha (y from 1 to 0 instead pf 0 to 1)\n _, alpha = np.meshgrid(np.linspace(0, 1, bins, endpoint=False), 1-np.linspace(0, 1, bins))\n else:\n _, alpha = np.meshgrid(np.linspace(0, 1, bins, endpoint=False), np.linspace(0, 1, bins, endpoint=False))\n\n # add alpha channel\n new_map[...,-1] = alpha\n cmap_ext = (0,1,0,1)\n else:\n new_map = new_map[:1,...].copy() \n cmap_ext = (0,100,0,1)\n \n fig = plt.figure(figsize=(1,1))\n ax = fig.add_axes([0,0,1,1])\n # plot \n plt.imshow(new_map,\n extent = cmap_ext,\n origin = 'lower')\n ax.axis('off')\n\n if add_alpha: \n rgb_fn = op.join(op.split(cortex.database.default_filestore)[\n 0], 'colormaps', cmap_name+'_alpha_bins_%d.png'%bins)\n else:\n rgb_fn = op.join(op.split(cortex.database.default_filestore)[\n 0], 'colormaps', cmap_name+'_bins_%d.png'%bins)\n #misc.imsave(rgb_fn, new_map)\n plt.savefig(rgb_fn, dpi = 200,transparent=True)\n\n if return_cmap:\n return cmap\n else:\n return rgb_fn"
]
| [
"0.6967287",
"0.68760455",
"0.68467826",
"0.68011427",
"0.6650869",
"0.6646248",
"0.66290563",
"0.66052055",
"0.6603507",
"0.65802604",
"0.655248",
"0.6547776",
"0.6542165",
"0.6542165",
"0.65381736",
"0.65381736",
"0.6538096",
"0.6535332",
"0.6535332",
"0.6523931",
"0.6514596",
"0.6510193",
"0.6510193",
"0.65012795",
"0.64979666",
"0.64979666",
"0.6464819",
"0.6453473",
"0.6453473",
"0.64330167"
]
| 0.8015995 | 0 |
Save a png image of a colorbar. One can use this code directly, or use it as an example to modify. | def save_colorbar(img=None, vmin=None, vmax=None, cmap="jet",
filename=None, title="Colorbar", lab=""):
fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
ax = fig.add_axes([0.0, 0.05, 0.2, 0.9])
if vmin is None: vmin = np.min(img)
if vmax is None: vmax = np.max(img)
cb = mpl.colorbar.ColorbarBase(
ax, cmap=cmap, norm=mpl.colors.Normalize(vmin=vmin, vmax=vmax))
cb.set_label(title, rotation=-90, color='k', labelpad=20)
if filename is None:
filename = 'colorbar_'+lab+'.png'
fig.savefig(filename, transparent=False, format='png')
return filename, cb | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_colorbar(\n map_path: Path = typer.Option(...),\n colorbar_path: Path = typer.Option(...),\n label: str = typer.Option(...),\n output_path: Path = typer.Option(...),\n):\n img = plt.imread(map_path)\n w, h = 3779 / 1000, 3749 / 1000\n fig, ax = plt.subplots(dpi=100)\n assert isinstance(ax, Axes)\n ax.imshow(img)\n ax.axis(\"off\")\n\n colorbar_ax = fig.add_axes([0.040, 0.113, 0.3, 0.3])\n colorbar_ax.imshow(plt.imread(colorbar_path), alpha=0.9)\n colorbar_ax.axis(\"off\")\n\n # Add label as well\n ax.text(\n x=0.01,\n y=0.95,\n s=label,\n fontdict=dict(\n size=\"xx-large\",\n path_effects=[patheffects.withStroke(linewidth=1, foreground=\"white\")],\n ),\n transform=ax.transAxes,\n )\n\n fig.set_size_inches(w, h)\n fig.savefig(output_path, bbox_inches=\"tight\", dpi=1000)",
"def save_plot(data, title, fname, format='png', cb = True,vmax=None,vmin=None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(data,interpolation='nearest',vmax=vmax,vmin=vmin)\n if cb == True: \n plt.colorbar(im)\n ax.set_title(title)\n plt.savefig(fname+'.'+format,dpi=100)\n plt.close(fig)",
"def drawcolorbar(cmapname, out_file):\n ioff()\n figure(figsize = (1, 10))\n axis(\"off\")\n a = outer(arange(0, 1, 0.01), ones(10))\n imshow(a, aspect = 'auto', cmap = get_cmap(cmapname), origin = \"lower\")\n plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)\n savefig(out_file, dpi=100, bbox_inches='tight')\n subprocess.call(['/usr/bin/convert', '-trim', out_file, out_file ])",
"def cb_save(event):\n fig.savefig('sample.univariate_discrete.py.png', dpi=300, format='png', transparent=True)",
"def save(image, name):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.savefig(name)",
"def create_image(image_data, title, figure_name):\n #print(figure_name)\n #plt.figure()\n fig_ax = plt.gca()\n image_data = np.array(image_data)\n #image_data[image_data1800] = np.min(image_data)\n #image_data = np.abs(image_data)\n image = fig_ax.imshow(image_data[0:1028, :], cmap='nipy_spectral',\n origin='lower', interpolation='none')\n #image = fig_ax.imshow(np.array(image_data), cmap='nipy_spectral',\n #origin='lower', interpolation='none')\n plt.title(title)\n divider = make_axes_locatable(fig_ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(image, cax=cax)\n plt.grid(False)\n plt.savefig(figure_name, dpi=100, bbox_inches=\"tight\")\n #plt.show()\n #plt.pause(0.1)\n #plt.show()\n# plt.draw()\n# plt.pause(0.001)\n# print('OK, Move forward')\n #plt.show(block=False)\n plt.close('all')",
"def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()",
"def save_wavefield_plot(wavefield, kind: str, ext: list, clim: list, output_dir: str):\n fig, ax = plt.subplots(figsize=(32, 12))\n aa = plt.imshow(wavefield, interpolation='nearest', aspect='auto', cmap='seismic', clim=clim,\n extent=ext)\n\n title = kind + ' wavefield'\n ax.set(xlabel='Offset [km]', ylabel='Time [s]', title=title)\n ax.set_aspect('auto')\n # Make a colorbar for the ContourSet returned by the contourf call.\n cbar = fig.colorbar(aa)\n cbar.ax.set_ylabel('Amplitude')\n filepath = os.path.join(output_dir,'wavefield-' + kind + '.png')\n fig.savefig(filepath)",
"def save_to_png(self, tiles, output_dir, channel=None):\n plt.ioff()\n\n for idx, tile in enumerate(tiles):\n save_path = f\"{output_dir}/tile_{idx}\"\n fig = self.build_fig()\n img = np.moveaxis(tile, 0, 2)\n \n if channel is None: \n plt.imshow(img)\n plt.savefig(save_path)\n plt.close()\n else:\n plt.imshow(img[:, :, channel])\n plt.savefig(save_path)\n plt.close()\n\n print(\"done converting to png!\")",
"def save_pic(pic_tensor, title, filename):\n plt.close()\n plt.title(title, size=24)\n xyrgb = pic_tensor.data[0].numpy().transpose((1, 2, 0)) # convert from RGB x X x Y to X x Y x RGB\n if (xyrgb.shape)[-1] != 3: # no rgb image => Plot with color map\n xyrgb = xyrgb[:, :, 0]\n plt.imshow(xyrgb, cmap='gray', interpolation='nearest')\n if (xyrgb.shape)[-1] != 3: # no rgb image => show color bar legend\n cbar = plt.colorbar()\n cbar.ax.tick_params(labelsize=20)\n print(title, xyrgb.shape)\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n plt.savefig(\"./\" + filename + \".pdf\", dpi=300, bbox_inches='tight')",
"def save():\n pl.savefig('/home/filippini/Documents/plot/RUN55/compa'+INFO_RUN+'.png')",
"def save_image(self):\n self.compressed_image_id = str(uuid.uuid4().hex)\n plot.imsave(\n str(\n self.compressed_image_id + \"{}\").format(\n \".png\"), self.compressed_image)\n\n if self.verbose:\n print(\n \"Compressed image saved at \" + (\n str(self.compressed_image_id + \"{}\").format(\".png\")))",
"def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)",
"def save_gradient_plot(gradient, mesh, output_dir: str):\n clim = gradient.min(),gradient.max()\n fig, ax = plt.subplots(figsize=(32,12))\n aa=vis.plot(gradient, mesh, clim=clim)\n ax.set(xlabel='Offset [km]', ylabel='Depth [km]', title='Gradient - iter#1')\n ax.set_aspect('auto')\n # Make a colorbar for the ContourSet returned by the contourf call.\n cbar = fig.colorbar(aa)\n cbar.ax.set_ylabel('Velocity [km/s]')\n fig.savefig(os.path.join(output_dir, 'gradient.png'))",
"def save(self, fn):\n plt.imsave(fn, self.image)",
"def savefig(self, dir_path=\"/Users/thomasaref/Documents/TA_software/\", fig_name=\"test_colormap_plot.png\"):\n print \"saving figure\"\n self.figure.savefig(dir_path+fig_name, dpi=self.dpi,\n bbox_inches='tight',\n transparent=self.transparent)#, format=self.save_type)",
"def save_image(path, image, cmap='gist_earth_r'):\n n_cols = n_rows = 1\n n_pixels = 256\n dpi_of_monitor = 96 # HARDCODED DPI VALUE FROM MY OLD DELL LAPTOP...\n figsize = (n_pixels * n_cols / dpi_of_monitor,\n n_pixels * n_rows / dpi_of_monitor)\n f, ax = plt.subplots(n_rows, n_cols, figsize=figsize)\n f.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)\n ax.axis('off')\n ax.imshow(image, cmap=cmap, vmin=0, vmax=None)\n f.savefig(path, dpi=dpi_of_monitor)\n plt.close(f)",
"def saveImage(self, event):\r\n fileWritten = self.image.writeFile()\r\n self.statusBar.SetStatusText(\"Saved {}\".format(fileWritten))",
"def saveImage(self, file_name='./out.jpg'):\n frame = self.camera.get_frame()\n color = frame.color_image[0]\n cv2.imwrite(file_name, color)\n cv2.imshow('frame', color)\n cv2.waitKey()\n cv2.destroyAllWindows()",
"def save_png(self, filename):\n post_script = self.canvas.postscript().encode()\n img = Image.open(io.BytesIO(post_script))\n img.save(filename, format=\"PNG\")",
"def save_image(image, figsize, save_path, ticks=False, grey=True):\n fig = plt.figure(figsize=figsize)\n if grey:\n plt.imshow(image, cmap=plt.get_cmap('gray'))\n else:\n plt.imshow(image)\n if not ticks:\n plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n fig.savefig(save_path)\n plt.close(fig)\n return",
"def save_picture(canvas, file_name=''):\n _pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, canvas.width,\n canvas.height)\n _pixbuf.get_from_drawable(canvas.canvas.images[0],\n canvas.canvas.images[0].get_colormap(),\n 0, 0, 0, 0, canvas.width, canvas.height)\n if file_name != '':\n _pixbuf.save(file_name, 'png')\n return _pixbuf",
"def save(self):\n fname, _ = getSaveFileName(self, \"Save cluster plot to\", 'cluster_plot.png')\n if fname:\n fname = str(fname) # convert from QString\n image = self.grabFrameBuffer() # defaults to withAlpha=False, makes no difference\n try:\n image.save(fname)\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self.panel, \"Error saving file\", str(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n print('Cluster plot saved to %r' % fname)",
"def main():\n fout_png = 'color0.png'\n _, axis = plt.subplots(1, 1, figsize=(6, 6))\n colors = [\n '#0032ff',\n '#00ebff',\n '#fdfe02',\n '#ff0000',\n '#8500ff',\n ]\n plt_color_text(colors)\n plt.savefig(fout_png)",
"def save_image(image_numpy, image_path, aspect_ratio=1.0, color_map=False):\n if color_map:\n import matplotlib.pyplot as plt\n cm = plt.get_cmap('jet')\n colored_image = cm(image_numpy[:,:,0])[:,:,:3]\n# print_numpy(colored_image, val=True, shp=True) # max 1.0 min 0.0 shape (256,256,3)\n \n image_pil = Image.fromarray((colored_image*255.).astype(np.uint8))\n else:\n# print_numpy(image_numpy, val=True, shp=True)\n image_pil = Image.fromarray(image_numpy)\n h, w, _ = image_numpy.shape\n\n if aspect_ratio > 1.0:\n image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)\n if aspect_ratio < 1.0:\n image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)\n image_pil.save(image_path)",
"def save_file(self, _filename):\n imgsize = (self.__resolution[0], self.__resolution[1])\n print imgsize\n\n if(self.__resolution[2] == 1):\n # grayscale -> convert to RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n # duplicate the channels\n ucharcol = (255 * col[0], 255 * col[0], 255 * col[0])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 3):\n # RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n ucharcol = (255 * col[0], 255 * col[1], 255 * col[2])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 4):\n # RGBA\n bg_white = (255, 255, 255, 255)\n img = Image.new(\"RGBA\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = 255 * self.get_color((x, y))\n ucharcol = (int(col[0]), int(col[1]), int(col[2]), int(col[3]))\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n else:\n raise StandardError, ('supported number of channels are 1, 3, and 4, only.')\n\n img.save(_filename)",
"def graphical_output(self, block=False, save=False, display=True, folder_name='output', image_suffix=None):\n masks = [np.where(self.board == i, True, False) for i in range(self.number_of_colors)]\n output = np.zeros((self.height, self.width, 3), dtype=np.int)\n for mask, color in zip(masks, list(COLORS.values())[:self.number_of_colors]):\n output[mask] = color\n\n image = np.repeat(np.repeat(output, 10, axis=0), 10, axis=1)/255.0\n if save:\n plt.imsave(f'{folder_name}/image{image_suffix}.png', image)\n\n if display:\n plt.imshow(image)\n plt.show(block=block)\n\n return image",
"def save_image(self):\r\n filename = filedialog.asksaveasfilename(title='Save Image As...',\r\n filetypes=((\"Portable Network Graphics (.png)\", \"*.png\"), (\"Portable Document Format(.pdf)\", \"*.pdf\")))\r\n self.graph.savefig(filename, dpi=self.graph.dpi)",
"def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)",
"def write_png(self, output_name):\n self.fig.savefig(output_name)\n return"
]
| [
"0.6722815",
"0.66249746",
"0.6546425",
"0.64947253",
"0.6400522",
"0.62403846",
"0.6202291",
"0.61253536",
"0.61250126",
"0.61182654",
"0.61181164",
"0.60613644",
"0.601523",
"0.599384",
"0.5987088",
"0.5985315",
"0.59652644",
"0.5953611",
"0.5927927",
"0.59235275",
"0.59234643",
"0.58667976",
"0.58624476",
"0.58601934",
"0.58589906",
"0.58101064",
"0.57814664",
"0.5733626",
"0.5730347",
"0.57238954"
]
| 0.7792215 | 0 |
Test a all successful scenario's for TOTP validation. | def test_successful_verification(self):
for i in (-2, -1, 0, 1, 2):
description = "TOTP not verified for `i={0}`".format(i)
calculated = self.algorithm.calculate(self.device.secret, drift=i)
confirmed = self.relate.verify(calculated, save=False)
self.assertTrue(confirmed, description)
self.relate.confirm = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_valid_otp(self, client, valid_otp_data):\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"OK\"",
"def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False",
"def test_valid_credentials(self):\n self.tc_id = \"Ts_001\"\n self.tc_desc = \"Verify user is able to register into the application\"\n self.tc_step = \"TC Start\"\n\n registration = RegistrationPage(self.driver)\n\n self.tc_step = \"Launch the url\"\n registration.launchUrl(self.url)\n\n self.tc_step = \"Enter the basic registration details\"\n registration.clickVendorLogin()\n registration.clickRegisterButton()\n registration.enterBasicRegistrationDetails(self.firstname,self.lastname,self.vendorname,self.fnsnumber,self.primaryemail,self.primaryphno,self.psw,self.cpsw,self.continfo)\n registration.basicRegButton()\n self.tc_step = \"Enter the public registration details\"\n registration.enterPublicRegistrationDetails(self.publicemail,self.publicphno,self.publicwebsite,self.businessdesc,self.products)\n registration.vendorimg(self.vendorPublicImg)\n registration.publicRegButton()\n self.tc_step = \"Enter the location details\"\n registration.clickCSAButton()\n registration.clickFarmStandButton()\n registration.clickFarmersMarketBooth()\n registration.clickMobileMarket()\n registration.clickLocationYesButton()\n registration.csalocationTypeDetails(self.pickupsitename,self.adrs1,self.adrs2,self.zipcode,self.spzl_instruction)\n registration.chooseCity()\n registration.clickmonths()\n registration.clickorder()\n registration.starttimeMonday()\n registration.endtimeMonday()\n registration.saveCsa()\n registration.farmstandlocationTypeDetails(self.farmstand_adrs_1,self.farmstand_adrs_2,self.farmstand_zipcode_textbox_id)\n registration.chooseFarmStandCity()\n registration.saveFarmStand()\n registration.choosefarmersmarketboothCity()\n registration.saveFarmersMarketBooth()\n registration.mobileMarketlocationTypeDetails(self.mobileMarkettextboxid)\n registration.choosemobileMarketCity()\n registration.savemobileMarket()\n\n self.tc_step = \"Verification\"\n self.assertEqual(registration.verifyRegistration(),\"Healthy Incentives Program (HIP)\",\"Login Success\")",
"def test_invalid_otp(self, client, valid_otp_data):\n\n valid_otp_data[\"code\"] += \"1\"\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 401\n assert resp.json()[\"code\"] == \"invalid_otp\"",
"def test_valid_credentials(self):\n self.tc_id = \"Ts_009\"\n self.tc_desc = \"Verify user is able to register into the application with existing email id\"\n self.tc_step = \"TC Start\"\n\n registration = Registrationwithoutemailid(self.driver)\n\n self.tc_step = \"Launch the url\"\n registration.launchUrl(self.url)\n\n self.tc_step = \"Enter the basic registration details\"\n registration.clickVendorLogin()\n registration.clickRegisterButton()\n registration.enterBasicRegistrationDetails(self.firstname,self.lastname,self.vendorname,self.fnsnumber,self.primaryemail,self.primaryphno,self.psw,self.cpsw,self.continfo)\n registration.basicRegButton()\n # self.assertEqual(registration.verifyReg(), \"Public Info\", \"Registration Failed\")",
"def test_case3(self):\n\n email = \"[email protected]\"\n password = \"huehuehuehue\"\n first_name = \"name1\"\n family_name = \"name2\"\n gender = \"Male\"\n city = \"Testcity\"\n country = \"Testcountry\"\n\n bad_token = \"bad token\"\n\n # Sign up with valid data\n response = test_sign_up(email, password, first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], True, sign_up_json[\"message\"])\n\n # Sign in with valid data\n response = test_sign_in(email, password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], True, sign_in_json[\"message\"])\n token = sign_in_json[\"data\"]\n\n # Change password with invalid old password\n new_password = \"qweqweqwe\"\n response = test_change_pwd(token, \"invalid_password\", new_password)\n assert response.status_code == 200\n json_change_pwd = json.loads(response.text)\n self.assertEqual(json_change_pwd[\"success\"], False, json_change_pwd[\"message\"])\n self.assertEqual(json_change_pwd[\"message\"], WRONG_PWD_ERROR_MSG, \"Wrong error message.\")\n\n # Change password with too short new password\n new_password = \"short\"\n response = test_change_pwd(token, password, new_password)\n assert response.status_code == 200\n json_change_pwd = json.loads(response.text)\n self.assertEqual(json_change_pwd[\"success\"], False, json_change_pwd[\"message\"])\n self.assertEqual(json_change_pwd[\"message\"], SHORT_PWD_ERROR_MSG, \"Wrong error message.\")\n\n # Post message with invalid token\n message = \"Testmessage\"\n response = test_post_message(bad_token, email, message)\n assert response.status_code == 200\n json_post_message = json.loads(response.text)\n self.assertEqual(json_post_message[\"success\"], False, json_post_message[\"message\"])\n self.assertEqual(json_post_message[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n # Post message with invalid email\n unknown_email = \"[email protected]\"\n message = \"Testing test test\"\n response = test_post_message(token, unknown_email, message)\n assert response.status_code == 200\n json_post_message = json.loads(response.text)\n self.assertEqual(json_post_message[\"success\"], False, json_post_message[\"message\"])\n self.assertEqual(json_post_message[\"message\"], NO_USER_ERROR_MSG, \"Wrong error message.\")\n\n # get user message by token with invalid token\n response = test_user_messages_by_token(bad_token)\n assert response.status_code == 200\n messages_json = json.loads(response.text)\n self.assertEqual(messages_json[\"success\"], False, messages_json[\"message\"])\n self.assertEqual(messages_json[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n # Get user data with invalid token\n response = test_user_data_by_token(bad_token)\n assert response.status_code == 200\n json_user_data = json.loads(response.text)\n self.assertEqual(json_user_data[\"success\"], False, json_user_data[\"message\"])\n self.assertEqual(json_user_data[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n\n # Sign out with invalid token\n response = test_sign_out(bad_token)\n assert response.status_code == 200\n json_sign_out = json.loads(response.text)\n self.assertEqual(json_sign_out[\"success\"], False, json_sign_out[\"message\"])\n self.assertEqual(json_sign_out[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n # Sign out with valid token\n response = test_sign_out(token)\n assert response.status_code == 200\n json_sign_out = json.loads(response.text)\n self.assertEqual(json_sign_out[\"success\"], True, json_sign_out[\"message\"])",
"def test_verification_failed(self):\n pass",
"def test_all_asserts():\n \n test_remove_punctuation()\n test_prepare_text()\n test_string_concatenator()\n test_list_to_string()\n test_end_chat()\n test_check_link()\n test_check_region()\n test_check_area()\n test_check_city()\n test_check_industry()\n test_check_back()\n test_check_alumni_region()\n test_check_alumni_area()\n test_check_alumni_city()\n test_check_alumni_industry()",
"def test_teacher_check_homework_positive():\n assert opp_teacher.check_homework(result_1)",
"def test_fetch_otp(self):\n otp = self.api.fetch_otp()\n self.assertIn('code', otp)",
"def test_validate_ppsa(session, desc, valid, message_content):\n # setup\n for reg_type in PPSATypes:\n if reg_type.value != 'RL':\n json_data = copy.deepcopy(FINANCING)\n json_data['type'] = reg_type.value\n del json_data['trustIndenture']\n if desc == DESC_INCLUDES_OT_DESC:\n json_data['otherTypeDescription'] = 'TEST OTHER DESC'\n elif desc == DESC_INCLUDES_TI:\n json_data['trustIndenture'] = True\n if reg_type.value == 'SA':\n message_content = None\n if desc != DESC_ALL_LIFE:\n del json_data['lifeYears']\n else:\n if reg_type.value in ('FR', 'LT', 'MH'):\n message_content = validator.LY_NOT_ALLOWED\n else:\n message_content = validator.LIFE_INVALID\n json_data['lifeInfinite'] = True\n\n if reg_type.value in ('FL', 'FA', 'FS'):\n del json_data['vehicleCollateral']\n else:\n del json_data['generalCollateral']\n json_data['vehicleCollateral'][0]['type'] = 'MH'\n\n if desc == DESC_INCLUDES_LA:\n json_data['lienAmount'] = '1000'\n if desc == DESC_INCLUDES_SD:\n json_data['surrenderDate'] = '2030-06-15T00:00:00-07:00'\n\n # print('REG TYPE: ' + str(json_data['type']))\n error_msg = validator.validate(json_data)\n if valid:\n assert error_msg == ''\n elif message_content:\n # print(error_msg)\n assert error_msg != ''\n assert error_msg.find(message_content) != -1",
"def test_tenants_cardtoken(self):\n pass",
"def test_password_strength(self):\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'anothergmail.com', 'asdfasdf')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)",
"def test_76_task_settings_redundancy(self, mock):\r\n # Creat root user\r\n self.register()\r\n self.signout()\r\n # Create owner\r\n self.register(fullname=\"owner\", name=\"owner\")\r\n self.new_application()\r\n self.new_task(1)\r\n url = \"/app/sampleapp/tasks/redundancy\"\r\n form_id = 'task_redundancy'\r\n self.signout()\r\n\r\n # As owner and root\r\n for i in range(0, 1):\r\n if i == 0:\r\n # As owner\r\n self.signin(email=\"[email protected]\")\r\n n_answers = 20\r\n else:\r\n n_answers = 10\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n # Correct values\r\n err_msg = \"There should be a %s section\" % form_id\r\n assert dom.find(id=form_id) is not None, err_msg\r\n res = self.task_settings_redundancy(short_name=\"sampleapp\",\r\n n_answers=n_answers)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Task Redundancy should be updated\"\r\n assert dom.find(id='msg_success') is not None, err_msg\r\n app = db.session.query(App).get(1)\r\n for t in app.tasks:\r\n assert t.n_answers == n_answers, err_msg\r\n # Wrong values, triggering the validators\r\n res = self.task_settings_redundancy(short_name=\"sampleapp\",\r\n n_answers=0)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Task Redundancy should be a value between 0 and 1000\"\r\n assert dom.find(id='msg_error') is not None, err_msg\r\n res = self.task_settings_redundancy(short_name=\"sampleapp\",\r\n n_answers=10000000)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Task Redundancy should be a value between 0 and 1000\"\r\n assert dom.find(id='msg_error') is not None, err_msg\r\n\r\n\r\n self.signout()\r\n\r\n # As an authenticated user\r\n self.register(fullname=\"juan\", name=\"juan\")\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"User should not be allowed to access this page\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # As an anonymous user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"User should be redirected to sign in\"\r\n assert dom.find(id=\"signin\") is not None, err_msg\r\n\r\n # With hidden app\r\n app.hidden = 1\r\n db.session.add(app)\r\n db.session.commit()\r\n self.register(fullname=\"daniel\", name=\"daniel\")\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n self.signout()\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n # Correct values\r\n err_msg = \"There should be a %s section\" % form_id\r\n assert dom.find(id=form_id) is not None, err_msg",
"def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True",
"def _validate_otp(self, otp):\n try:\n if self.ykval_client.verify(otp):\n return True\n return False\n except Exception as err:\n logger.error('OTP Validation failed: %r', err)\n return False",
"def test_validate(self):\n pass",
"def test_case2(self):\n\n valid_email = \"[email protected]\"\n valid_password = \"asdasdasd\"\n valid_firstname = \"name1\"\n valid_familyname = \"name2\"\n valid_gender = \"Female\"\n valid_city = \"Testcity\"\n valid_country = \"Testcountry\"\n\n # Valid sign up\n response = test_sign_up(valid_email, valid_password, valid_firstname, valid_familyname, valid_gender, valid_city,\n valid_country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], True, sign_up_json[\"message\"])\n\n email = \"[email protected]\"\n password = \"asdasdasd\"\n first_name = \"name1\"\n family_name = \"name2\"\n gender = \"Male\"\n city = \"Testcity\"\n country = \"Testcountry\"\n\n invalid_password = \"invalid_password\"\n unknown_email = \"[email protected]\"\n\n\n # Sign up with missing input\n response = test_sign_up(email, password, \"\", family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], False, sign_up_json[\"message\"])\n self.assertEqual(sign_up_json[\"message\"], BAD_SIGNUP_ERROR_MSG, \"Wrong error message.\")\n\n # Sign up with too short password\n response = test_sign_up(email, \"short\", first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], False, sign_up_json[\"message\"])\n self.assertEqual(sign_up_json[\"message\"], BAD_SIGNUP_ERROR_MSG, \"Wrong error message.\")\n\n # Sign up with already existing email\n response = test_sign_up(valid_email, password, first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], False, sign_up_json[\"message\"])\n self.assertEqual(sign_up_json[\"message\"], EMAIL_IN_USE_ERROR_MSG, \"Wrong error message.\")\n\n\n # Sign in with unknown email\n response = test_sign_in(unknown_email, password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], False, sign_in_json[\"message\"])\n self.assertEqual(sign_in_json[\"message\"], BAD_LOGIN_ERROR_MSG, \"Wrong error message.\")\n\n # Sign in with invalid password\n response = test_sign_in(valid_email, invalid_password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], False, sign_in_json[\"message\"])\n self.assertEqual(sign_in_json[\"message\"], BAD_LOGIN_ERROR_MSG, \"Wrong error message.\")",
"def test_authflow(self):\n response = self.client.post('/auth/signup/', {\n 'first_name': 'John',\n 'last_name': 'Doe',\n 'email': '[email protected]',\n 'password': self.password,\n 'gstin': '11AAAAA1111A1A1',\n 'mobile': self.mobile,\n 'business_name': 'busi_ness',\n 'address': {'address_name':'', 'address_line1': '', 'address_line2': '', 'state': '', 'pincode': '209801', 'country': 'INDIA'}\n })\n\n response_data = response.json()\n\n self.assertListEqual(list(response_data.keys()), ['id', 'otp'])\n\n response = self.client.post('/auth/verify-otp/', response_data)\n\n response_data = response.json()\n self.assertListEqual(list(response_data.keys()), ['token', 'refresh_token', 'session_key'])\n self.assertRegexpMatches(response_data['token'], r'[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+')\n self.assertRegexpMatches(response_data['refresh_token'], r'[0-9A-Za-z]{32}')\n self.assertRegexpMatches(response_data['session_key'], r'[0-9A-Za-z]{32}')\n\n response = self.client.post('/auth/signin/', {'id_field': self.mobile, 'password': self.password})\n auth_data = response.json()\n\n refresh_token = auth_data['refresh_token']\n session_key = auth_data['session_key']\n\n response = self.client.post('/auth/refresh/', {'refresh_token': refresh_token}, HTTP_AUTHORIZATION='JWT ' + auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n refreshed_auth_data = response.json() \n response = self.client.get('/auth/handle-sessions/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n active_sessions = response.json()\n self.assertListEqual(list(active_sessions.keys()), ['token_list'])\n\n acitve_sessions_token_list = active_sessions.get('token_list')\n\n # end all other sessions except your own\n for session_key_iter in acitve_sessions_token_list:\n if session_key_iter != session_key:\n self.client.post('/auth/handle-sessions/', {'session_key': session_key_iter}, HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n # log out from own session\n self.client.get('/auth/signout/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)",
"def verify():",
"def test_integration():\n input_values = read_in_range('day04/input.txt')\n n_valid = count_valid_passwords(input_values)\n assert n_valid == 511",
"def test_sucess(self):\n msg = self.user.registration(\"MrShort\",\n \"[email protected]\",\n \"notshort\",\n \"notshort\")\n self.assertEqual(msg, \"Your account is now registered please proceed to login\")",
"def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)",
"def test_case_01(self):\n if True:\n self.fail()",
"def test_success_form_validation(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': '12345678',\n 'password_confirmation': '12345678'\n }\n form = ResetPasswordForm(form_data)\n self.assertTrue(form.is_valid())",
"def test_cadastros_de_registros_no_site_rpa_challenge():",
"def test_case1(self):\n\n email = \"[email protected]\"\n password = \"testtesttest\"\n first_name = \"name1\"\n family_name = \"name2\"\n gender = \"Male\"\n city = \"Testcity\"\n country = \"Testcountry\"\n\n # test sign up\n response = test_sign_up(email, password, first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], True, sign_up_json[\"message\"])\n\n # test sign in\n response = test_sign_in(email, password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], True, sign_in_json[\"message\"])\n token1 = sign_in_json[\"data\"]\n\n # password change\n new_password = \"huehuehuehue\"\n response = test_change_pwd(token1, password, new_password)\n assert response.status_code == 200\n json_change_pwd = json.loads(response.text)\n self.assertEqual(json_change_pwd[\"success\"], True, json_change_pwd[\"message\"])\n\n # test sign in again\n response = test_sign_in(email, new_password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], True, sign_in_json[\"message\"])\n token2 = sign_in_json[\"data\"]\n # token should have been updated\n assert token1 != token2\n\n # Get user data by valid token\n response = test_user_data_by_token(token2)\n assert response.status_code == 200\n json_user_data = json.loads(response.text)\n self.assertEqual(json_user_data[\"success\"], True, json_user_data[\"message\"])\n assert json_user_data[\"data\"][\"email\"] == email\n assert json_user_data[\"data\"][\"firstname\"] == first_name\n assert json_user_data[\"data\"][\"familyname\"] == family_name\n assert json_user_data[\"data\"][\"gender\"] == gender\n assert json_user_data[\"data\"][\"city\"] == city\n assert json_user_data[\"data\"][\"country\"] == country\n\n # Post message on own wall\n message = \"Testmessage\"\n response = test_post_message(token2, email, message)\n assert response.status_code == 200\n json_post_message = json.loads(response.text)\n self.assertEqual(json_post_message[\"success\"], True, json_post_message[\"message\"])\n\n # Get messages\n response = test_user_messages_by_token(token2)\n assert response.status_code == 200\n messages_json = json.loads(response.text)\n self.assertEqual(messages_json[\"success\"], True, messages_json[\"message\"])\n messages = messages_json[\"data\"][0]\n assert messages[\"writer\"] == email\n assert messages[\"content\"] == message\n\n # Sign out\n response = test_sign_out(token2)\n assert response.status_code == 200\n json_sign_out = json.loads(response.text)\n self.assertEqual(json_sign_out[\"success\"], True, json_sign_out[\"message\"])",
"def test_validate_credentials(self):\n pass",
"def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')",
"def test_submit_form_using_valid_data():"
]
| [
"0.74338865",
"0.68149686",
"0.6745631",
"0.67049855",
"0.6556469",
"0.6342859",
"0.6192209",
"0.6178929",
"0.611805",
"0.60867935",
"0.60529476",
"0.5979635",
"0.5956218",
"0.59378344",
"0.5920723",
"0.5908492",
"0.58967924",
"0.5874718",
"0.58681774",
"0.5859935",
"0.5857947",
"0.58493257",
"0.5831382",
"0.5779421",
"0.57777977",
"0.57679147",
"0.5767647",
"0.5766971",
"0.576488",
"0.57647836"
]
| 0.7176029 | 1 |
Test all unsuccessful scenario's for TOTP validation. | def test_unsuccessful_verification(self):
for i in (-4, -3, 3, 4):
description = "TOTP verified for `i={0}`".format(i)
calculated = self.algorithm.calculate(self.device.secret, drift=i)
confirmed = self.relate.verify(calculated, save=False)
self.assertFalse(confirmed, description)
self.relate.confirm = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_otp(self, client, valid_otp_data):\n\n valid_otp_data[\"code\"] += \"1\"\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 401\n assert resp.json()[\"code\"] == \"invalid_otp\"",
"def test_successful_verification(self):\n for i in (-2, -1, 0, 1, 2):\n\n description = \"TOTP not verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertTrue(confirmed, description)\n\n self.relate.confirm = False",
"def test_valid_otp(self, client, valid_otp_data):\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"OK\"",
"def test_verification_failed(self):\n pass",
"def test_case_01(self):\n if True:\n self.fail()",
"def test_case3(self):\n\n email = \"[email protected]\"\n password = \"huehuehuehue\"\n first_name = \"name1\"\n family_name = \"name2\"\n gender = \"Male\"\n city = \"Testcity\"\n country = \"Testcountry\"\n\n bad_token = \"bad token\"\n\n # Sign up with valid data\n response = test_sign_up(email, password, first_name, family_name, gender, city, country)\n assert response.status_code == 200\n sign_up_json = json.loads(response.text)\n self.assertEqual(sign_up_json[\"success\"], True, sign_up_json[\"message\"])\n\n # Sign in with valid data\n response = test_sign_in(email, password)\n assert response.status_code == 200\n sign_in_json = json.loads(response.text)\n self.assertEqual(sign_in_json[\"success\"], True, sign_in_json[\"message\"])\n token = sign_in_json[\"data\"]\n\n # Change password with invalid old password\n new_password = \"qweqweqwe\"\n response = test_change_pwd(token, \"invalid_password\", new_password)\n assert response.status_code == 200\n json_change_pwd = json.loads(response.text)\n self.assertEqual(json_change_pwd[\"success\"], False, json_change_pwd[\"message\"])\n self.assertEqual(json_change_pwd[\"message\"], WRONG_PWD_ERROR_MSG, \"Wrong error message.\")\n\n # Change password with too short new password\n new_password = \"short\"\n response = test_change_pwd(token, password, new_password)\n assert response.status_code == 200\n json_change_pwd = json.loads(response.text)\n self.assertEqual(json_change_pwd[\"success\"], False, json_change_pwd[\"message\"])\n self.assertEqual(json_change_pwd[\"message\"], SHORT_PWD_ERROR_MSG, \"Wrong error message.\")\n\n # Post message with invalid token\n message = \"Testmessage\"\n response = test_post_message(bad_token, email, message)\n assert response.status_code == 200\n json_post_message = json.loads(response.text)\n self.assertEqual(json_post_message[\"success\"], False, json_post_message[\"message\"])\n self.assertEqual(json_post_message[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n # Post message with invalid email\n unknown_email = \"[email protected]\"\n message = \"Testing test test\"\n response = test_post_message(token, unknown_email, message)\n assert response.status_code == 200\n json_post_message = json.loads(response.text)\n self.assertEqual(json_post_message[\"success\"], False, json_post_message[\"message\"])\n self.assertEqual(json_post_message[\"message\"], NO_USER_ERROR_MSG, \"Wrong error message.\")\n\n # get user message by token with invalid token\n response = test_user_messages_by_token(bad_token)\n assert response.status_code == 200\n messages_json = json.loads(response.text)\n self.assertEqual(messages_json[\"success\"], False, messages_json[\"message\"])\n self.assertEqual(messages_json[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n # Get user data with invalid token\n response = test_user_data_by_token(bad_token)\n assert response.status_code == 200\n json_user_data = json.loads(response.text)\n self.assertEqual(json_user_data[\"success\"], False, json_user_data[\"message\"])\n self.assertEqual(json_user_data[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n\n # Sign out with invalid token\n response = test_sign_out(bad_token)\n assert response.status_code == 200\n json_sign_out = json.loads(response.text)\n self.assertEqual(json_sign_out[\"success\"], False, json_sign_out[\"message\"])\n self.assertEqual(json_sign_out[\"message\"], NOT_SIGNED_IN_ERROR_MSG, \"Wrong error message.\")\n\n # Sign out with valid token\n response = test_sign_out(token)\n assert response.status_code == 200\n json_sign_out = json.loads(response.text)\n self.assertEqual(json_sign_out[\"success\"], True, json_sign_out[\"message\"])",
"def test_invalid_password(self):\n pass",
"def test_bad_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n bad_activation_keys = (\n 'emailactivationtokengenerator',\n 'emailactivation-tokengenerator',\n '3rd-bademailactivationkey'\n )\n for key in bad_activation_keys:\n self.assertFalse(token_generator.check_token(user, key))",
"def test_valid_credentials(self):\n self.tc_id = \"Ts_001\"\n self.tc_desc = \"Verify user is able to register into the application\"\n self.tc_step = \"TC Start\"\n\n registration = RegistrationPage(self.driver)\n\n self.tc_step = \"Launch the url\"\n registration.launchUrl(self.url)\n\n self.tc_step = \"Enter the basic registration details\"\n registration.clickVendorLogin()\n registration.clickRegisterButton()\n registration.enterBasicRegistrationDetails(self.firstname,self.lastname,self.vendorname,self.fnsnumber,self.primaryemail,self.primaryphno,self.psw,self.cpsw,self.continfo)\n registration.basicRegButton()\n self.tc_step = \"Enter the public registration details\"\n registration.enterPublicRegistrationDetails(self.publicemail,self.publicphno,self.publicwebsite,self.businessdesc,self.products)\n registration.vendorimg(self.vendorPublicImg)\n registration.publicRegButton()\n self.tc_step = \"Enter the location details\"\n registration.clickCSAButton()\n registration.clickFarmStandButton()\n registration.clickFarmersMarketBooth()\n registration.clickMobileMarket()\n registration.clickLocationYesButton()\n registration.csalocationTypeDetails(self.pickupsitename,self.adrs1,self.adrs2,self.zipcode,self.spzl_instruction)\n registration.chooseCity()\n registration.clickmonths()\n registration.clickorder()\n registration.starttimeMonday()\n registration.endtimeMonday()\n registration.saveCsa()\n registration.farmstandlocationTypeDetails(self.farmstand_adrs_1,self.farmstand_adrs_2,self.farmstand_zipcode_textbox_id)\n registration.chooseFarmStandCity()\n registration.saveFarmStand()\n registration.choosefarmersmarketboothCity()\n registration.saveFarmersMarketBooth()\n registration.mobileMarketlocationTypeDetails(self.mobileMarkettextboxid)\n registration.choosemobileMarketCity()\n registration.savemobileMarket()\n\n self.tc_step = \"Verification\"\n self.assertEqual(registration.verifyRegistration(),\"Healthy Incentives Program (HIP)\",\"Login Success\")",
"def test_pass_times_error_server(self):\n with HTTMock(self.http_wrong):\n self.assertRaises(Exception, self.iss.pass_times, 15,20)",
"def _validate_otp(self, otp):\n try:\n if self.ykval_client.verify(otp):\n return True\n return False\n except Exception as err:\n logger.error('OTP Validation failed: %r', err)\n return False",
"def test_xfailed_but_passed():\n pass",
"def test_errors(self):\n response = self.client.post(\n reverse('users:perform_password_recovery'),\n data={'email': 'invalid@mail'},\n follow=True,\n )\n\n soup = BeautifulSoup(response.content, 'html.parser')\n err = soup.find('p', 'email-error').text\n\n self.assertEqual(err, 'Enter a valid email address.')\n\n response = self.client.post(\n reverse('users:perform_password_recovery'),\n data={'email': '[email protected]'},\n follow=True,\n )\n\n soup = BeautifulSoup(response.content, 'html.parser')\n err = soup.find('p', 'email-error').text\n\n self.assertEqual(err, 'User with this email doesn\\'t exist.')",
"def test_state_after_failure(self):\n pass",
"def test_all_asserts():\n \n test_remove_punctuation()\n test_prepare_text()\n test_string_concatenator()\n test_list_to_string()\n test_end_chat()\n test_check_link()\n test_check_region()\n test_check_area()\n test_check_city()\n test_check_industry()\n test_check_back()\n test_check_alumni_region()\n test_check_alumni_area()\n test_check_alumni_city()\n test_check_alumni_industry()",
"def test_for_html_form_errors(self):\n # PART 1\n response = self.client.post(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': self.confirmed_u.password_recovery.token\n }\n ),\n data={\n 'new_password1': '123',\n 'new_password2': '123',\n },\n follow=True,\n )\n self.assertEqual(response.status_code, 200)\n \n errors = BeautifulSoup(response.content, 'html.parser').find_all('p', 'field_error')\n \n self.assertEqual(errors[0].text, 'This password is too short. It must contain at least 8 characters.')\n self.assertEqual(errors[1].text, 'This password is too common.')\n self.assertEqual(errors[2].text, 'This password is entirely numeric.')\n\n # PART 2 \n response = self.client.post(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': self.confirmed_u.password_recovery.token,\n }\n ),\n data={\n 'new_password1': 'uiuiuiuiu',\n 'new_password2': 'iuiuiuiui',\n },\n follow=True,\n )\n self.assertEqual(response.status_code, 200)\n errors = BeautifulSoup(response.content, 'html.parser').find_all('p', 'field_error')\n\n self.assertEqual(errors[0].text, 'The two password fields didn’t match.')",
"def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))",
"def test_5_sign_in_functionality_invalid_email_password(self):\n self.log.debug(\"Validate email and password fields are present\")\n self.page.validate_sign_in_window_elements()\n email = \"[email protected]\" #Invalid email id\n password = \"123456\" #Invalid password\n self.log.debug(\"enter invalid email and password\")\n self.page.validate_sign_functionality(email, password)\n self.log.debug(\"Verify alert 'Invalid Email or password.' messsage \")\n self.page.validate_alert_message_if_entered_wrong_email_password()",
"def test_invalid(self):\n args = [SIMPLE_TEMPLATE, SIMPLE_CANDIDATE_INVALID]\n result = self.runner.invoke(main, args)\n self.assertEqual(-1, result.exit_code)",
"def test_default_unsuccessful_verify_request(self, cred):\n # make the initial request\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n assert len(resp.json()['request_id']) <= 32\n # now enter invalid verify code 3 times to terminate verification process\n # first invalid code check\n request_id = resp.json()['request_id']\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '16'\n assert resp.json()['request_id'] == request_id\n assert resp.json()['error_text'] == code_does_not_match_msg\n # second invalid check\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '16'\n assert resp.json()['request_id'] == request_id\n assert resp.json()['error_text'] == code_does_not_match_msg\n # third invalid check\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '17'\n assert 'request_id' not in resp.json().keys()\n assert resp.json()['error_text'] == workflow_terminated_msg",
"def test_teacher_check_homework_negative_if_solution_is_not_ok():\n assert not opp_teacher.check_homework(result_3)",
"def test_valid_login_form_but_failed_authentication(self):\n\n\n\t\tpass",
"def test_wrong_answers(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Incorrect'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')",
"def test_invalid_password(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)",
"def test_failed_submit(self):\n\n form_data = {}\n form = ResetPasswordForm(form_data)\n self.assertFalse(form.submit())",
"def test_task_add_invalid_form():\n pytest.fail('Not implemented yet.')",
"def on_fail(utterance):\n\n self.log.info(\"Utterance: {}\".format(utterance))\n\n user_response = self.ask_yesno('try.again')\n\n return 'try.again' if user_response is None else 'okay'",
"def test_password_strength(self):\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'anothergmail.com', 'asdfasdf')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)",
"def test_valid_credentials(self):\n self.tc_id = \"Ts_009\"\n self.tc_desc = \"Verify user is able to register into the application with existing email id\"\n self.tc_step = \"TC Start\"\n\n registration = Registrationwithoutemailid(self.driver)\n\n self.tc_step = \"Launch the url\"\n registration.launchUrl(self.url)\n\n self.tc_step = \"Enter the basic registration details\"\n registration.clickVendorLogin()\n registration.clickRegisterButton()\n registration.enterBasicRegistrationDetails(self.firstname,self.lastname,self.vendorname,self.fnsnumber,self.primaryemail,self.primaryphno,self.psw,self.cpsw,self.continfo)\n registration.basicRegButton()\n # self.assertEqual(registration.verifyReg(), \"Public Info\", \"Registration Failed\")",
"def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")"
]
| [
"0.70181113",
"0.68967783",
"0.6756452",
"0.65170574",
"0.6170026",
"0.61201423",
"0.6116117",
"0.6086188",
"0.6060815",
"0.6015356",
"0.6000249",
"0.5992706",
"0.5935468",
"0.5925306",
"0.59237534",
"0.5915284",
"0.5909458",
"0.5885173",
"0.5880219",
"0.58797127",
"0.587615",
"0.5864974",
"0.5824819",
"0.5823759",
"0.58149153",
"0.5813564",
"0.58107597",
"0.5804856",
"0.57954824",
"0.57933474"
]
| 0.7493148 | 0 |
changes hue of image | def change_hue(image, delta):
imHueChange = tf.image.adjust_hue(image, delta=delta, name=None)
return imHueChange | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def adjust_hue(image, delta):\r\n image[..., 0] = np.mod(image[..., 0] + delta * 180, 180)\r\n return image",
"def adjustHue(img, fac):\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV_FULL)\n hsv[..., 0] += np.uint8(fac * 255)\n img2 = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)\n return img2",
"def adjust_hue(img, hue_factor):\n if not(-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))\n\n check_type(img)\n\n input_mode = img.mode\n assert img.mode not in {'L', '1', 'I', 'F'}, \\\n \"Input image mode should not be {'L', '1', 'I', 'F'}\"\n\n h, s, v = img.convert('HSV').split()\n\n np_h = np.array(h, dtype=np.uint8)\n # uint8 addition take cares of rotation across boundaries\n with np.errstate(over='ignore'):\n np_h += np.uint8(hue_factor * 255)\n h = Image.fromarray(np_h, 'L')\n\n img = Image.merge('HSV', (h, s, v)).convert(input_mode)\n return img",
"def adjust_hue(img, hue_factor):\n _assert_image_tensor(img, 'CHW')\n assert (\n hue_factor >= -0.5 and hue_factor <= 0.5\n ), \"hue_factor should be in range [-0.5, 0.5]\"\n channels = _get_image_num_channels(img, 'CHW')\n if channels == 1:\n return img\n elif channels == 3:\n dtype = img.dtype\n if dtype == paddle.uint8:\n img = img.astype(paddle.float32) / 255.0\n\n img_hsv = _rgb_to_hsv(img)\n h, s, v = img_hsv.unbind(axis=-3)\n h = h + hue_factor\n h = h - h.floor()\n img_adjusted = _hsv_to_rgb(paddle.stack([h, s, v], axis=-3))\n\n if dtype == paddle.uint8:\n img_adjusted = (img_adjusted * 255.0).astype(dtype)\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return img_adjusted",
"def adjust_hue(img, hue_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if hue_factor < -255 or hue_factor > 255:\n raise ValueError(\n f'hue_factor({hue_factor}) is outside of the expected value range (-255 <= x <= 255)')\n\n aug = iaa.color.AddToHue(value=hue_factor, from_colorspace='RGB')\n img = aug.augment_image(img)\n return img",
"def adjust_hue(img, hue_factor):\n if not(-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n\n im = img.astype(np.uint8)\n hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV_FULL)\n hsv[..., 0] += np.uint8(hue_factor * 255)\n\n im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)\n return im.astype(img.dtype)",
"def applyHSV(img):\n\treturn applyColorMap(img, \"hsv\")",
"def filterToHue( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n hue = int(255*HSL[0]//360) # convert to 0-255 range\n bmp.pixels[h][w] = (hue,hue,hue)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp",
"def test_colormap_single_hue():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=get_colormap('single_hue', 255),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_hue.png\")",
"def hue_shift(input_image, degrees):\n\timage = input_image if 180 + degrees <= 255 else input_image.astype('uint16')\n\timage[:, :, 0] += degrees\n\timage[:, :, 0] %= 180\n\treturn image",
"def shift_hue_saturation(image, hue = -90, saturation = 0.65): \n\tcopy = image.copy()\n\tld = copy.load()\n\twidth, height = copy.size\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\tpixel = ld[x,y]\n\t\t\tr = pixel[0]\n\t\t\tg = pixel[1]\n\t\t\tb = pixel[2]\n\t\t\t\n\t\t\th,s,v = colorsys.rgb_to_hsv(r/255., g/255., b/255.)\n\t\t\th = (h + hue/360.0) % 1.0\n\t\t\ts = s**saturation\n\t\t\tr,g,b = colorsys.hsv_to_rgb(h, s, v)\n\t\t\tld[x,y] = (int(r * 255.9999), int(g * 255.9999), int(b * 255.9999))\n\treturn copy",
"def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img",
"def setHue ( self, newhue ):\n if isinstance( newhue, int ):\n newhue /= 360.0\n if newhue > 1.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n self.h = newhue\n self.hsl[0] = newhue\n self.hsla[0] = newhue\n self.updateFromHsl()",
"def whatsgreen2(image):\n green = image.hueDistance(color= Color('green'), minvalue=40).binarize()\n return green",
"def compute_new_hsv(im):\n eps = 1e-10\n r,g,b = np.array(cv2.split(im)) + eps\n traditional_hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV)\n numerator = np.log(r) - np.log(g)\n denominator = np.log(r) + np.log(g) - 2*np.log(b) + eps\n new_hue = np.clip(np.round(numerator/denominator).astype(np.uint8), 0, 180)\n new_hsv = np.zeros_like(traditional_hsv).astype(np.uint8)\n new_hsv[:, :, 0] = new_hue\n new_hsv[:, :, 1] = traditional_hsv[:, :, 1]\n new_hsv[:, :, 2] = traditional_hsv[:, :, 2]\n return new_hsv",
"def test_hue(self):\n thispath = os.path.dirname(__file__)\n impath = os.path.join(\"test\", \"737.jpg\")\n impath2 = os.path.join(\"test\", \"738.jpg\")\n \n img = cv2.imread(os.path.join(thispath, impath))\n img2 = cv2.imread(os.path.join(thispath, impath2))\n colorextr = ColorFeatureExtracter(img)\n colorextr2 = ColorFeatureExtracter(img2)\n print(colorextr.CompareFeatures(colorextr2.ComputeFeatures(),colorextr.ComputeFeatures()))\n # ... and then evaluate the output",
"def hue_mask(img, minHue, maxHue, minSaturation, maxSaturation, minValue, maxValue):\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n (h,s,v) = cv2.split(hsv)\n ret, h_minthresh = cv2.threshold(h, minHue, 255, cv2.THRESH_BINARY)\n ret, h_maxthresh = cv2.threshold(h, maxHue, 255, cv2.THRESH_BINARY_INV)\n ret, s_minthresh = cv2.threshold(s, minSaturation, 255, cv2.THRESH_BINARY)\n ret, s_maxthresh = cv2.threshold(s, maxSaturation, 255, cv2.THRESH_BINARY_INV)\n ret, v_minthresh = cv2.threshold(v, minValue, 255, cv2.THRESH_BINARY)\n ret, v_maxthresh = cv2.threshold(v, maxValue, 255, cv2.THRESH_BINARY_INV)\n #account for hue values wrapping around at red\n if minHue < maxHue:\n h_thresh = cv2.bitwise_and(h_minthresh, h_maxthresh)\n else:\n h_thresh = cv2.bitwise_or(h_minthresh, h_maxthresh)\n s_thresh = cv2.bitwise_and(s_minthresh, s_maxthresh)\n v_thresh = cv2.bitwise_and(v_minthresh, v_maxthresh)\n result = cv2.bitwise_and(h_thresh, s_thresh)\n result = cv2.bitwise_and(result, v_thresh)\n detector = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR) #display image result\n return result",
"def enhance_hue(self, delta, p=None):\n if self._max_aug_nums>0:\n if self._nums>self._max_aug_nums:\n return self\n self._nums += 1\n if p is None:\n p = self._p\n self.image = enhance_hue(self.image, delta, p)\n return self",
"def preprocess_image(img):\r\n\r\n hsvImg = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n hsvImg[...,1] = hsvImg[...,1]*1.75 #increase saturation by 175%\r\n\r\n image_f =cv2.cvtColor(hsvImg,cv2.COLOR_HSV2BGR)\r\n\r\n return image_f",
"def grayscale(img):\n for pixel in img:\n x, y, col = pixel\n r, g, b = col\n \n r = (r + g + b)/3\n r = g = b\n \n new_color = create_color(r, g, b)\n set_color(img, x, y, new_color)",
"def reduce_color(image):\n\n # http://stackoverflow.com/questions/5906693/how-to-reduce-the-number-of-colors-in-an-image-with-opencv-in-python\n w, h, _ = image.shape\n for row in xrange(h-1):\n for col in xrange(w-1):\n #pi = row * w * 3 + col * 3\n pixel = image[col][row]\n pixel[0] = __reduceColorValue(pixel[0])\n pixel[1] = __reduceColorValue(pixel[1])\n pixel[2] = __reduceColorValue(pixel[2])\n return image",
"def augment_brightness(image):\n rand_brightness = .25 + np.random.uniform()\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n image[:, :, 2] = image[:, :, 2] * rand_brightness\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\n return image",
"def enhance_contrast(img):\n # CLAHE (Contrast Limited Adaptive Histogram Equalization)\n clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(8, 8))\n\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space\n l, a, b = cv2.split(lab) # split on 3 different channels\n\n l2 = clahe.apply(l) # apply CLAHE to the L-channel\n\n lab = cv2.merge((l2, a, b)) # merge channels\n img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR\n\n return img2",
"def read_img(img): #X\n im = plt.imread(img)\n im = im[:, :, :3]\n if im.max()>200:\n im = im/255.\n return rgb_to_hsv(im)-0.5",
"def adjust_saturation(img, saturation_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img",
"def _rgb_to_hsv(img):\n maxc = img.max(axis=-3)\n minc = img.min(axis=-3)\n\n is_equal = paddle.equal(maxc, minc)\n one_divisor = paddle.ones_like(maxc)\n c_delta = maxc - minc\n # s is 0 when maxc == minc, set the divisor to 1 to avoid zero divide.\n s = c_delta / paddle.where(is_equal, one_divisor, maxc)\n\n r, g, b = img.unbind(axis=-3)\n c_delta_divisor = paddle.where(is_equal, one_divisor, c_delta)\n # when maxc == minc, there is r == g == b, set the divisor to 1 to avoid zero divide.\n rc = (maxc - r) / c_delta_divisor\n gc = (maxc - g) / c_delta_divisor\n bc = (maxc - b) / c_delta_divisor\n\n hr = (maxc == r).astype(maxc.dtype) * (bc - gc)\n hg = ((maxc == g) & (maxc != r)).astype(maxc.dtype) * (rc - bc + 2.0)\n hb = ((maxc != r) & (maxc != g)).astype(maxc.dtype) * (gc - rc + 4.0)\n h = (hr + hg + hb) / 6.0 + 1.0\n h = h - h.trunc()\n return paddle.stack([h, s, maxc], axis=-3)",
"def equalizeHist_color(img):\n image = np.empty(img.shape)\n for c in range(img.shape[2]):\n channel = img[:, :, c]\n channel = channel.astype(np.uint8)\n\n # CLAHE\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(2, 2))\n channel = clahe.apply(channel)\n\n # http://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html\n channel = cv2.equalizeHist(channel)\n try:\n image[:, :, c] = channel\n except Exception as e:\n print(str(e))\n return image",
"def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return image",
"def adjust_hsv(image, delta_h = 0, delta_s = 0, delta_v = 0):\r\n\r\n\tassert image.shape[-1] == 3\r\n\tassert 0 <= delta_h <= 1 and 0 <= delta_s <= 1 and 0 <= delta_v <= 1\r\n\r\n\timage = rgb_to_hsv(image / 255.0)\r\n\r\n\timage[:, :, 0] += delta_h\r\n\timage[:, :, 1] += delta_s\r\n\timage[:, :, 2] += delta_v\r\n\r\n\timage = hsv_to_rgb(image) * 255\r\n\r\n\r\n\treturn image.astype(\"uint8\")",
"def change_light(image, value, channel=\"v\"):\n\n channelDic = {\"h\": 0, \"s\":1, \"v\":2}\n # \"translate\" image channel to channel index\n if not channel in channelDic:\n raise AttributeError(\"invalid channel value. Valid values are h, s, or v\")\n\n # which format (ConvNet (3, w, h) vs. Normal (w, h, 3)\n reshape = False\n prevShape = image.shape\n \n if image.shape[0] == 3 or image.shape[0] == 1:\n reshape = True\n if image.shape[0] == image.shape[1] or (image.shape[0] == 1 and image.shape[1] == 3): # grayscale 1L, 1L, h, w OR color 1L, 3L, h, w\n reshapeVector = (image.shape[2], image.shape[3], image.shape[1]) \n else: \n reshapeVector = (image.shape[1], image.shape[2], image.shape[0]) # single row color or grayscale 1L/3L, h, w\n image = image.reshape(reshapeVector)\n \n #print \"Shape\",image.shape\n #print \"dtype\",image.dtype\n # convert to hsv\n hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n # hsv[:,:,2] += value - would be way faster but this does not prevent overflow (a high value gets even higher and becomes 0)\n channels = cv.split(hsv)\n for row in xrange(len(channels[channelDic[channel]])):\n for col in xrange(len(channels[channelDic[channel]][0])):\n channels[channelDic[channel]][row][col] = max(min(255, channels[channelDic[channel]][row][col]*value),0)\n\n image = cv.cvtColor(cv.merge(channels), cv.COLOR_HSV2BGR)\n\n # reshape back\n if reshape: \n image = image.reshape(prevShape)\n return image"
]
| [
"0.7590641",
"0.7467261",
"0.74252427",
"0.7315436",
"0.72571546",
"0.7229215",
"0.70048535",
"0.6902539",
"0.6804992",
"0.6753325",
"0.6750485",
"0.67452693",
"0.6604627",
"0.6576694",
"0.6470038",
"0.6399022",
"0.6370358",
"0.6367779",
"0.6312419",
"0.6231982",
"0.6156993",
"0.6105176",
"0.60688156",
"0.6052902",
"0.6039261",
"0.5996892",
"0.5991632",
"0.5974214",
"0.59666693",
"0.59608954"
]
| 0.7747147 | 0 |
Removes all Tkinter widgets from a master window [master], skipping widgets of type [ignore] | def removeWidgets(master, ignore=None):
for w in master.winfo_children():
if w.winfo_class() != ignore:
w.destroy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_screen(self):\r\n lst_grid = self.root.grid_slaves()\r\n for widget in lst_grid:\r\n widget.destroy()\r\n lst_pack = self.root.pack_slaves()\r\n for widget in lst_pack:\r\n widget.destroy()",
"def removeWidgets(self): \n for widget in self.activeWidget:\n if widget in self.window.children:\n self.window.remove_child(widget)\n widget.destroy()\n self.activeWidget = []",
"def delete_widgets_from(layout):\n for i in reversed(range(layout.count())):\n widgetToRemove = layout.itemAt(i).widget()\n # remove it from the layout list\n layout.removeWidget(widgetToRemove)\n # remove it from the gui\n widgetToRemove.setParent(None)",
"def destroy_all(self):\n\n for k in self.widgets:\n self.widgets[k].destroy()\n self.widgets = {}\n self.window.destroy()\n self.window = tk.Frame(self.root)\n self.window.pack(side=\"top\", fill=\"both\", expand=True)",
"def clearFrame(self, event=None):\n for widget in self.winfo_children():\n widget.destroy()\n del self.tiles[:]",
"def clearwin(event=None):\r\n # for child in mframe.winfo_children():\r\n # child.destroy()\r\n global mframe\r\n mframe.destroy()\r\n mframe = tkinter.Frame(main, width=800, height=600, background='pink')\r\n mframe.pack(fill=\"both\", expand=True, padx=20, pady=20)",
"def reset_inputs(self):\n for widget in self.frame.winfo_children():\n if isinstance(widget, tkinter.Entry):\n widget.delete(0, tkinter.END)\n widget.insert(0, \"\")\n elif isinstance(widget, tkinter.Checkbutton):\n widget.deselect()",
"def remove_old_graphs(self):\r\n widgets = self.winfo_children()\r\n graph_frames = []\r\n\r\n for widget in widgets:\r\n if type(widget) == tk.Frame:\r\n graph_frames.append(widget)\r\n\r\n for frame in range(len(graph_frames) - 1):\r\n graph_frames[frame].destroy()",
"def keep_widgets(self,builder,widgets):\n keep = {}\n for widget in widgets:\n w = builder.get_object(widget)\n if w != 0: keep[widget] = w\n return keep",
"def unblock_widgets(self):\n\n for element in self.widget_elements:\n element.setDisabled(False)\n\n self.listWidget.setFocus()",
"def reset_widgets(self):\n\n widgets = [\n self.test_input,\n self.results_input\n ]\n\n for widget in widgets:\n clear_text(widget)",
"def clean_gui():\n pass",
"def reset(self):\r\n self.abort_load = True\r\n for textbox in self.textboxes:\r\n textbox.config(state = tk.NORMAL)\r\n textbox.delete('1.0', tk.END)\r\n for tag in TAGS:\r\n textbox.tag_remove(tag, \"1.0\", tk.END)",
"def cleanWorkspace(self):\n self.window.labelMessage.setText(\"\")\n\n if self.inspectinoAnalyzer:\n del self.analyzerWidget\n self.inspectinoAnalyzer = False\n\n for index in reversed(range(self.window.layoutDepthermInpesction.count())):\n layoutItem = self.window.layoutDepthermInpesction.itemAt(index)\n widgetToRemove = layoutItem.widget()\n print(\"found widget: \" + str(widgetToRemove))\n widgetToRemove.setParent(None)\n self.window.layoutDepthermInpesction.removeWidget(widgetToRemove)",
"def _remove_buttons(self, gui):\n gui.greet_button.pack_forget()\n gui.close_button.pack_forget()\n gui.buttons_on.set(False)",
"def removeFrame(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n frame.pack_forget()",
"def destroy_widgets(self) -> None:\n for elem in self.widgets:\n try:\n for el in elem:\n el.destroy()\n except TypeError:\n elem.destroy()",
"def clear_frame(self, table):\n for widget in table.winfo_children():\n widget.destroy()",
"def clearButtons(self):\n for ch in self.cboxes:\n ch.hide()\n for tbx in self.tboxes:\n tbx.hide()\n for btnum in reversed(range(self.flowLayout.layout.count())):\n item = self.flowLayout.layout.itemAt(btnum)\n if item is not None:\n self.flowLayout.layout.removeItem(item)\n r, c = self.flowLayout.items[item.widget()]\n del self.flowLayout.items[item.widget()]\n del self.flowLayout.rows[r][c]\n item.widget().hide()\n self.flowLayout.update()",
"def clear_widgets(self):\n self.json_progress = None\n self.progress_message_bar = None\n self.json_progress_message_bar = None\n if self.progress_message_bar_widget:\n self.iface.messageBar().popWidget(self.progress_message_bar_widget)\n self.progress_message_bar_widget = None\n if self.json_progress_message_bar_widget:\n self.iface.messageBar().popWidget(self.json_progress_message_bar_widget)\n self.json_progress_message_bar_widget = None",
"def reset(self, event):\n #Resets the current puzzle\n self.w.delete('all') #Deletes all widgets/components \n self.resetnums() #Call restnums() to reset self.movelist\n\n #Destroys all buttons on GUI\n #self.buttonlist.append(self.lbl)\n for i in range(len(self.buttonlist)):\n self.buttonlist[i].destroy()\n\n self.create_widgets(self.counter) #Calls the create_widgets() to redisplay all widgets and buttons\n self.lbl2[\"text\"] = \"\" #Clears any text (e.g. instructions or check) if there is any.",
"def unloadAllFrames(self, event=None):\n for idx, frame in enumerate(self.frameList):\n frame.clearFrame()\n self.frameBtnList[idx].config(state=\"disabled\")",
"def clear_all(cls):\n del cls.buttons[:]",
"def clearScreen(self):\n self.removeFrame(self.frame1)\n self.removeFrame(self.frame2)\n self.separator.destroy()\n #Here, the app will lose the row and column configuration and does not\n #apply new configuration. Don't know why?. So that, I destroy the\n #parent (in this case, a frame), create a new frame and set it again.\n self.parent.destroy()\n mainFrame = tk.Frame(self.store[\"root\"], bg=\"#FFF\")\n self.parent = mainFrame\n self.parent.grid(column=0, row=0, sticky=\"nsew\")",
"def clean(self):\n for i in self.winfo_children():\n i.destroy()",
"def disable_tk(self):\n self.clear_inputhook()",
"def reset(self):\n for lane in self.lanes.values():\n lane.puck_area.clear_widgets()\n lane.patrons = list()\n lane.disabled = False\n lane.beers = list()\n\n self.message_holder.remove_widget(self.you_lose_label)\n self.message_holder.remove_widget(self.you_win_label)",
"def closeAllControlPanel():\n for node in nuke.allNodes():\n node.hideControlPanel()\n if node.Class() == 'Group':\n node.begin()\n for child in nuke.allNodes():\n child.hideControlPanel()\n child['selected'].setValue(False)\n node.end()",
"def removeExistWidget(self, layout):\n for index in range(layout.count()):\n if layout.itemAt(index).widget():\n layout.itemAt(index).widget().deleteLater()",
"def clear(self):\r\n\r\n # Clear the widgets list\r\n self.widgets_list = []\r\n\r\n # Refresh the scroll area\r\n self._refresh()"
]
| [
"0.6616854",
"0.65503854",
"0.63473904",
"0.6290107",
"0.6270392",
"0.6248559",
"0.6188864",
"0.6032273",
"0.60007054",
"0.59946984",
"0.5981343",
"0.5974938",
"0.59474486",
"0.59302163",
"0.5915207",
"0.5860621",
"0.5817283",
"0.5775139",
"0.57666093",
"0.574026",
"0.57241154",
"0.57197356",
"0.56485677",
"0.56379616",
"0.5614544",
"0.56138813",
"0.56049234",
"0.554978",
"0.54967713",
"0.5489488"
]
| 0.8474203 | 0 |
Create a new player and write data to the filename. | def create_player (self, username = None):
# Get unique username if needed
if (username == None):
username = "default_username" + str (time.time ())
self.username = username
r = requests.post (self.url_endpoint, data = {"new_player": self.username})
if (r.status_code != 201):
print ("Failed to create user:\n", r.text)
return r
play_data = json.loads (r.text)
self.secret = play_data['player_secret']
with open (self.filename, "w") as f:
f.write (f"username {self.username}\nsecret {self.secret}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_player(self,player_name, attr = None, team_file = None):\n player_first, player_last = player_name.split(\" \")\n player_file = player_name.replace(\" \", \"\") + '.json'\n if(os.path.exists(self.player_path + player_file)):\n return(False)\n else:\n with open(self.player_path + player_file, 'x') as new_file:\n with open(self.player_template_path, 'r') as template:\n data = json.load(template)\n data['player_name'] = player_first + ' ' + player_last\n json.dump(data, new_file)\n template.close()\n new_file.close()\n\n\n if attr: # If the user inputed new data, add the data, else use template\n try:\n self.update_player_attribute(player_file, attr)\n except:\n os.remove(player_file)\n\n if team_file: #if the user selected a team, add the player to the team\n self.add_team_player(team_file, player_file)\n\n return(True)",
"def savePlayerInfo(self):\n if self.__filename == \"\":\n self.__setPlayerFilename()\n try:\n #f = open(self.__filename, \"w\")\n pickle.dump(self, open(self.__filename, \"w\"))\n return True\n #f.close()\n except IOError:\n raise PlayerIOError(\"Unable to write player info to file.\")",
"def write_new_player(player_name, player_fifa_api_id, birthday, height, weight, player_api_id=None):\n print(\"Inserting new player\", player_name, player_api_id, player_fifa_api_id)\n player_diz = dict()\n\n player_diz[\"player_name\"]= player_name\n if not util.is_None(player_fifa_api_id):\n player_diz[\"player_fifa_api_id\"] = player_fifa_api_id\n if not util.is_None(birthday):\n player_diz[\"birthday\"] = birthday\n if not util.is_None(height):\n player_diz[\"height\"] = height\n if not util.is_None(weight):\n player_diz[\"weight\"] = weight\n if not util.is_None(player_api_id):\n player_diz[\"player_api_id\"] = player_api_id\n\n SQLLite.get_connection().insert(\"Player\", player_diz)\n return read_by_fifa_api_id(player_fifa_api_id)",
"def newPlayer():\r\n pass",
"def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata",
"def save_player(self, serialized_player):\n self.player_table.insert(serialized_player)",
"def __setPlayerFilename(self):\n if self.__playerName != \"???\":\n l=self.__playerName.rsplit(\" \")\n nameWithoutSpaces=\"_\".join(l)\n self.__filename = fileLocs.playerProfiles+\"\\\\\"+nameWithoutSpaces+r\".p\"",
"def save_player(user):\n with shelve.open('myfile') as savefile:\n savefile[user.name] = user",
"def newfile(self) :\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\t\tglobal configurer\n\n\t\tfd,name = mkstemp(suffix='.blend')\n\t\tos.close(fd)\n\t\tself.name = name\n\t\tfd = open(name,'wb', configurer.get('ServerBufferSize'))\n\t\tself.fd = fd\n\t\tprint name\n\t\treturn 1",
"def store_new_character(player_character: dict):\r\n filename = 'username.json'\r\n with open(filename, 'w') as f_obj:\r\n json.dump(player_character, f_obj)\r\n print(\"Thanks for playing! We'll remember you when you come back, \" + player_character['Name'])",
"def write_to_file(file, name):\n with open(file, \"a\") as player_list:\n player_list.writelines(name)",
"def write_winner(self, player):\n try:\n with open(self.file_path, mode='a') as winner_file:\n winner_writer = csv.writer(winner_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n winner_writer.writerow([datetime.now(), player.name, player.score])\n except IOError as io:\n print('Failed to open file.\\n{}'.format(str(io)))\n self.sense.show_message(str(io), scroll_speed=0.04)",
"def save_game(player_location, filename):\n\tsave_data = {\"location\": player_location}\n\twith open(os.path.normpath(\"save_files/\" + filename + \".txt\"), \"w\") as outfile:\n\t\tjson.dump(save_data, outfile)\n\tprint(\"\\nYour cluster and player data has been saved.\")",
"def create(self):\n self.create_file()",
"def setup_by_filename(self, filename: str):\n self.filename = filename if filename is not None else \"tmp\"\n self.save_file = mcpython.common.world.SaveFile.SaveFile(self.filename)",
"def create_player(\n self, plr_id, last_name, first_name, position,\n alternate_last_names=[], alternate_first_names=[],\n alternate_positions=[], capfriendly_id=None):\n # initiliazing player object\n # TODO: remove alternate options (if necessary)\n plr = Player(\n plr_id, last_name, first_name, position,\n alternate_last_names=alternate_last_names,\n alternate_first_names=alternate_first_names,\n alternate_positions=alternate_positions)\n if capfriendly_id:\n plr.capfriendly_id = capfriendly_id\n\n commit_db_item(plr, True)\n\n return Player.find_by_id(plr_id)",
"def put(self, player_name, data):\n if isinstance(data, dict):\n player = self._get_player(player_name)\n if player == None:\n player = {'name': player_name}\n player['data'] = data\n self._collection.insert(player)\n #TODO: TypeError exception or something simmilar",
"def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def save_game(player, data):\n\n data = {\n \"rooms\": data[\"rooms\"],\n \"maze\": data[\"maze\"],\n }\n\n file_name = f\"./dork/saves/{player}.yml\"\n with open(file_name, \"w\") as save_file:\n yaml.safe_dump(\n data, save_file,\n indent=4, width=80,\n )\n\n return f\"Your game was successfully saved as {player}.yml!\"",
"def create_newfile():\n date = datetime.today().strftime('%d_%m_%Y').replace(\" \", \"_\")\n file_name = screen_name + '_' + date + \".json\"\n with io.FileIO(file_name, \"w\") as file:\n file.write(\"Json\")\n file.close()\n return file_name",
"def create_player(dct, player_name, place):\n dct.update({player_name: ['N/A'] * place})",
"def create_existing_player():\n logic_test = True\n data = \"\"\n while logic_test:\n try:\n player_choice = view.select_player_view(select_players())\n data = select_players()[player_choice]\n logic_test = False\n except IndexError as error:\n view.show(error)\n continue\n return data",
"def post(self):\n args = player_parser.parse_args()\n print(args)\n unique_player = DBPlayer.query.filter_by(nickname=args['nickname']).first()\n if unique_player:\n return get_response(409, 'player already existed!')\n try:\n new_player = DBPlayer(**args)\n db.session.add(new_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(201, 'done!')",
"def create_player(self, request):\n if request.player_name:\n if Player.query(Player.name == request.player_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n else:\n raise endpoints.BadRequestException('verify the name that you are sending in the request')\n if request.email:\n if gameutils.get_regex(request.email) == None:\n print(' ERROR - invalid email, please try again')\n raise endpoints.ConflictException(\n 'invalid email, please try again!')\n else:\n raise endpoints.BadRequestException('verify the email that you are sending in the request')\n\n player = Player(name=request.player_name, email=request.email)\n player.put()\n\n return StringMessage(message='Player created!'.format(request.player_name))",
"def save(self, game):\n try:\n with open(self.filename, mode='w+') as file:\n # First char in the file is the next player\n file.write(game.next_player)\n # Then the board as a string of 64 characters\n file.write(str(game.board))\n\n except IOError as err:\n print(f\"Error saving file: {err}\")",
"def write_player_names_to_outfile(self) -> None:\n if not self.outfile:\n return\n\n with open(self.outfile, \"w+\") as outfile:\n # write the player names to the outfile\n outfile.write(f\"{' '.join(self.player_names)}\\n\")",
"def make_player(self, page):\r\n player = Player()\r\n face = page.find(\"div\",id=\"info_content\").find_all(\"td\")\r\n player.name = face[0].get_text().strip()\r\n player.club = face[1].get_text().strip()\r\n player.nation = face[2].get_text().strip()\r\n player.league = face[3].get_text().strip()\r\n player.sf = int(face[4].get_text().strip())\r\n player.wf = int(face[5].get_text().strip())\r\n player.ir = int(face[6].get_text().strip())\r\n player.foot = face[7].get_text().strip()\r\n player.height = float(face[8].get_text().split(\"|\")[0].strip(\"cm \"))\r\n player.weight = float(face[9].get_text().strip(\"\"))\r\n player.version = face[10].get_text().strip()\r\n player.def_wr = face[11].get_text().strip()\r\n player.att_wr = face[12].get_text().strip()\r\n player.added_on = datetime.strptime(face[13].get_text().strip()[2:], \"%y-%m-%d\")\r\n player.real_face = face[15].get_text().strip()==\"icon-checkmark text-success\"\r\n player.body_type = face[16].get_text().strip()\r\n player.age = face[17].get_text().strip(\" years old \\n\\r\")\r\n player.rating = self.make_rating([sub for sub in page.find(\"div\",id=\"stats_box\").find(class_=\"stats-inner col-md-12\").find(class_=\"row\").children])\r\n player.href = \"/\"+page.find(id=\"share_player_link\")[\"value\"].strip(\"https://www.futbin.com/\")\r\n player.pid = int(page.find(id=\"page-info\")[\"data-player-resource\"])\r\n return player",
"def create_file(self):\n with open(self.get_path(), 'w', encoding='utf8') as file:\n print(\"- {}\".format(time2str(self.start)), file=file)"
]
| [
"0.7191583",
"0.67326915",
"0.6686919",
"0.6426288",
"0.62403584",
"0.6238124",
"0.62195104",
"0.6184011",
"0.6181962",
"0.61628854",
"0.61118764",
"0.607156",
"0.6037864",
"0.5991602",
"0.5968651",
"0.5889733",
"0.58838624",
"0.5861497",
"0.5849546",
"0.5849546",
"0.57562464",
"0.5752104",
"0.57507145",
"0.5750522",
"0.5735873",
"0.57296205",
"0.5728667",
"0.57276726",
"0.57264364",
"0.5708101"
]
| 0.765587 | 0 |
Create a new game, return the game_name for others to join. | def create_new_game (self, game_name = None, ai_game = False):
if (game_name == None):
game_name = "default_game_name" + str (time.time ())
self.cur_game_name = game_name
data =\
{
"new_game": True,
"player_secret": self.secret,
"game_name": self.cur_game_name
}
if (ai_game):
data['ai_game'] = True
r = requests.post (self.url_endpoint, data)
if (r.status_code != 201):
print ("Failed to create game:\n", r.text)
return r
# Not sure if there is any need for this--editing perhaps? Unimplemented.
game_data = json.loads (r.text)
self.cur_game_secret = game_data['game_secret']
return self.cur_game_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_game(self, request):\n player = Player.query(Player.name == request.player_name).get()\n if not player:\n raise endpoints.NotFoundException(\n 'A Player with that name does not exist!, '\n 'we need one player in order to create the game')\n try:\n game = Game.new_game(player)\n except ValueError:\n raise endpoints.BadRequestException('sarasa')\n\n # Use a task queue to update the average attempts remaining.\n # This operation is not needed to complete the creation of a new game\n # so it is performed out of sequence.\n\n return game.to_form('Game created!, we only need one player '\n 'to join in order to start the game', player.name)",
"def new_game(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist.')\n game = Game.new_game(user.key)\n print game.target\n return game.to_form('Good luck playing Silicon Valley Hangman!!')",
"def game_created(self, pname, game):\n logging.debug('Game Created:')\n logging.debug(game)\n g = self.games.get(game['matchid'], None)\n if g:\n g.roomid = game['roomid']\n g.tableindex = game['tableindex']\n self.comm.game_ready(g)",
"def create_new_game(game_name, player_name, points_to_win=POINTS_TO_WIN,\n min_players=MIN_PLAYERS, max_players=MAX_PLAYERS):\n do_house_keeping()\n if not can_create_new_game():\n return {}\n game_name = game_name or generate_game_name()\n player_name = player_name or generate_player_name()\n points_to_win = points_to_win or POINTS_TO_WIN\n min_players = min_players or MIN_PLAYERS\n max_players = max_players or MAX_PLAYERS\n if min_players < 2:\n min_players = 2\n if max_players > 10:\n max_players = 10\n game_id = generate_id(GAME_ID_LENGTH)\n game_data = {\n 'id': game_id,\n 'name': game_name,\n 'deck': create_deck(),\n 'stack': [],\n 'created_at': serialize_datetime(datetime.utcnow()),\n 'started_at': None,\n 'ended_at': None,\n 'active': False,\n 'reverse': False,\n 'min_players': min_players,\n 'max_players': max_players,\n 'players': [],\n 'points_to_win': points_to_win\n }\n add_player_to_game(game_data, player_name, True)\n msg = make_info_message(\n 'Click \"Start\" after all player(s) have joined')\n flash_broadcast(game_data, msg)\n result = save_state(game_data)\n if result:\n return game_data\n return {}",
"def _get_new_game(self, name_one=\"good golly\", name_two=\"my mummy\"):\n first_user, second_user = self._get_two_players(name_one, name_two)\n \n #create a new game\n game = games.new_game(first_user.key, second_user.key)\n return (game, first_user, second_user)",
"def start_new_game(cls, name, max_players):\n new_game_id = str(uuid.uuid4())\n new_game = {\n \"game_id\": new_game_id,\n \"board\": [[cls.EMPTY for i in range(cls.BOARD_ROWS)]\n for j in range(cls.BOARD_COLS)],\n \"game_status\": cls.OPEN,\n \"players\": [name],\n \"turn\": name,\n \"max_players\": max_players,\n }\n db.save_game(new_game_id, new_game)\n return new_game_id",
"def create_game(game_ID):\n\n if r.exists(\"state:\" + game_ID) == 1:\n raise Exception(\"Game exists already\")\n\n new_game = {\n \"winner\": \"none\",\n \"turn\": \"blue\",\n \"action\": \"spymaster\",\n \"hint\": \"\",\n \"attemptsLeft\": 0,\n \"redPoints\": 0,\n \"bluePoints\": 0,\n }\n words = create_board()\n set_fields = r.hset(\"state:\" + game_ID, mapping=new_game)\n set_fields += r.hset(\"words:\" + game_ID, mapping=words)\n\n if set_fields == 32:\n return {\"playerState\": new_game, \"wordsState\": words}\n else:\n raise Exception(\"Could not make Game\")",
"def create_new(user, type):\n # make the game's name from the username and the number of\n # games they've created\n game_type = GameType.objects.get(type=type)\n hash = hashlib.sha1(\n ('U' + str(user.id) + 'T' + str(game_type.id) + 'D' + str(datetime.now())).encode(\"UTF-8\")).hexdigest()\n game_code = str(hash[:10]).upper()\n new_game = Game(creator=user, game_type=game_type, game_code=game_code)\n new_game.save()\n user_profile = UserProfile.objects.get(user=user)\n\n new_player = GamePlayer(\n game=new_game,\n player=user,\n life=new_game.game_type.starting_life,\n avatar_img=user_profile.avatar_img\n\n )\n\n new_player.save()\n # put first log into the GameLog\n new_game.add_log('Game created by {0}'.format(new_game.creator.username))\n\n return new_game",
"def new_game(blank_game, user_id=None):\n if user_id:\n g.db.remove({'_id': user_id}, justOne=True)\n new_id = g.db.insert({'game': blank_game.serialise()})\n flash('New user successfully created')\n return new_id",
"def create_new_game(self):\r\n global game_instance\r\n game_instance = game.Game()\r\n game_instance.set_word(db.get_random_word())\r\n print(\"\\n---------NEW GAME---------\")\r\n self.current_word = \"----\"",
"def newGame():\n result = cs411_game.newGame()\n return prepJSON(result)",
"def game_new():\n posted = request.get_json()\n\n # Parse out the settings from the post\n settings = {}\n possible_settings = Game.DEFAULT_SETTINGS.keys()\n for setting in possible_settings:\n if setting in posted:\n settings[setting] = int(posted[setting])\n\n # Create and start the game -- the start is called immediately for now\n # as we do not have a way to change settings or add players in the UI\n game = Game({\"settings\": settings})\n game.start()\n\n # Save the game to the database\n queries.insert_game(game)\n queries.insert_game_event(game.game_id, {\"type\": \"start\"})\n\n response = json.jsonify(game=game.get_game_state())\n return response",
"def create_new_game(request):\n\n if request.method == 'POST':\n form = NewGameForm(request.POST, request.FILES)\n if form.is_valid():\n # Save known fields\n game = Game()\n game.name = request.POST['game_name']\n\n # Check if there exist another game wuth that name\n games = Game.objects.filter(name=game.name)\n if len(games) > 0:\n error_msg = 'There already exists a game with that name!'\n return render_to_response('gaming/new_game.html',\n {\n 'form': form,\n 'error_msg': error_msg,\n },\n context_instance=RequestContext(request))\n\n game.rules_file = request.FILES['game_rules']\n game.judge_source_file = request.FILES['game_judge']\n game.max_players = request.POST['max_players']\n game.judge_lang = request.POST['judge_language']\n game.save()\n game.moderators.add(request.user)\n game.compile_judge()\n \n return HttpResponseRedirect('/game_details/' + str(game.id) + '/')\n else:\n form = NewGameForm()\n return render_to_response('gaming/new_game.html',\n {\n 'form': form,\n },\n context_instance=RequestContext(request))",
"def create_new(creator=None, opponent=None):\n # Create new game with user as creator\n new_game = Game(creator=creator, opponent=opponent, current_turn=creator)\n new_game.save()\n\n # Initialize cells\n for row in range(new_game.rows):\n for col in range(new_game.cols):\n new_cell = GameCell(\n game = new_game,\n row = row,\n col = col\n )\n new_cell.save()\n\n # Put first log into GameLog\n new_game.add_log(f'Game created by {new_game.creator}')\n return new_game",
"def create_game(current_user, data):\n\n game = Game(user_id=current_user,\n last_saving=data,\n status=data[\"status\"],\n timing=data[\"timing\"],\n score=data[\"score\"])\n\n db.session.add(game)\n db.session.commit()\n\n return game",
"def do_create_game(self):\n\t\tself.nickname = self.e_nickname.text\n\n\t\tself.hide_all()\n\t\tself.show_create()\n\t\tself.renderer.color = (255, 255, 255, 0)",
"async def new(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n host = ctx.message.author\n if room not in tod_games:\n tod_games[room] = {'host': host.name, 'host_id': host.name, 'participants': {}, 'last': None}\n tod_games[room]['current'] = host.name\n tod_games[room]['last'] = host.name\n tod_games[room]['participants'][host.name.lower()] = {'spins': 0}\n await amor_manager.say(\"New Game of Truth Or Dare started in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Truth or Dare already in progress in {}. Game host: {}\".format(room, host))",
"def new_game(self):\n dialog = CreateGameDialog(self.root, \"New Game\")\n self.create_player_grid(dialog.grid_size)\n players_dialog = PlaceShipsDialog(\n self.root, title=\"Add Player\", game=self.game, grid_size=dialog.grid_size)",
"def create_table(self, game_name_part):\n # Try to close any logged-in session gracefully\n lower_game_name = re.sub(r\"[^a-z0-9]\", \"\", game_name_part.lower())\n self.quit_table()\n self.quit_playing_with_friends()\n games, err_msg = get_game_list()\n if len(err_msg) > 0:\n return -1, err_msg\n lower_games = {}\n for game in games:\n lower_name = re.sub(r\"[^a-z0-9]\", \"\", game.lower())\n lower_games[lower_name] = games[game]\n # If name is unique like \"race\" for \"raceforthegalaxy\", use that\n games_found = []\n game_name = \"\"\n for game_i in list(lower_games.keys()):\n if game_i == lower_game_name: # if there's an exact match, take it!\n game_name = lower_game_name\n elif game_i.startswith(lower_game_name):\n games_found.append(game_i)\n if len(game_name) == 0:\n if len(games_found) == 0:\n err = (\n f\"`{lower_game_name}` is not available on BGA. Check your spelling \"\n f\"(capitalization and special characters do not matter).\"\n )\n return -1, err\n elif len(games_found) > 1:\n err = f\"`{lower_game_name}` matches [{','.join(games_found)}]. Use more letters to match.\"\n return -1, err\n game_name = games_found[0]\n game_id = lower_games[game_name]\n url = self.base_url + \"/table/table/createnew.html\"\n params = {\n \"game\": game_id,\n \"forceManual\": \"true\",\n \"is_meeting\": \"false\",\n \"dojo.preventCache\": str(int(time.time())),\n }\n url += \"?\" + urllib.parse.urlencode(params)\n resp = self.fetch(url)\n try:\n resp_json = json.loads(resp)\n except json.decoder.JSONDecodeError:\n logger.error(\"Unable to decode response json:\" + resp)\n return -1, \"Unable to parse JSON from Board Game Arena.\"\n if resp_json[\"status\"] == \"0\":\n err = resp_json[\"error\"]\n if err.startswith(\"You have a game in progress\"):\n matches = re.match(r\"(^[\\w !]*)[^\\/]*([^\\\"]*)\", err)\n err = matches[1] + \"Quit this game first (1 realtime game at a time): \" + self.base_url + matches[2]\n return -1, err\n table_id = resp_json[\"data\"][\"table\"]\n return table_id, \"\"",
"def join_game (self, game_name):\n r = requests.post (self.url_endpoint,\n data = {\"join_game\": True, \"player_secret\": self.secret, \"game_name\": game_name})\n if (r.status_code != 201):\n print (f\"ERROR: Failed to join game <{game_name}>:\\n\", r.text)\n return r\n\n join_data = json.loads (r.text)\n self.cur_game_name = game_name\n self.cur_game_secret = join_data ['game_name']",
"def add_game(request):\n\treturn add_handler(request, GameCreationForm, 'game')",
"def new_game(sid):\n games = Game.objects.filter(full=False)\n if (len(games) > 0):\n game = games[0]\n game.full = True\n game.save()\n sio.emit('join_game', {'data': serializers.serialize(\n 'json', [game], fields=('created', 'uuid'))}, room=sid)\n join_room(sid, game.uuid)\n else:\n game = Game(created=datetime.now())\n board = Board()\n\n game.game_json = json.dumps(board.__dict__, cls=BoardEncoder)\n game.save()\n\n sio.emit('create_game', {'data': serializers.serialize(\n 'json', [game], fields=('created', 'uuid'))}, room=sid)\n join_room(sid, game.uuid)",
"def Network_sendNameOfGame(self, data):\n self.Send({\"action\": \"defineGame\", \"ruleset\": self._server.ruleset})",
"def received_CREATE(self, message):\n\n\t\t_, rival_name, order = message.split(' ')\n\n\t\tplayer_token = \"X\" if order == \"first\" else \"O\"\n\t\trival_token = \"O\" if order == \"first\" else \"X\"\n\n\t\tself.player_model.player.token = player_token\n\n\t\t#Initializing rival player\n\t\tself.player_model.rival_player.name = rival_name\n\t\tself.player_model.rival_player.token = rival_token\n\t\tself.player_model.current_player = self.player_model.player if order == \"first\" else self.player_model.rival_player\n\n\t\tself.create_game()\n\n\t\tif order == \"first\":\n\t\t\tself.play()\n\n\t\telse:\n\t\t\tself.wait_to_play()\n\n\t\tself.player_frame.message_screen.write(f\"New game against {rival_name}\")\n\t\tself.player_frame.message_screen.write(f\"You go {order}\")",
"def make_game(self):\n game = Game(self.data['gamename'])\n self.game = game\n return game",
"def new_game(self, req):\n return models.BattleShip.create(req.left, req.right)",
"def testCreateGame(self):\n game_name = 'test_game' \n c = Client()\n response = c.post('/create_game', {'name': game_name})\n self.assertEquals(200, response.status_code)\n\n game_qs = Game.objects.filter(name=game_name)\n\n # get the current game object\n self.assertEquals(1, game_qs.count())\n\n # get teams\n\n teams_qs = Team.objects.filter(game__name=game_name)\n\n self.assertEquals(4, teams_qs.count())",
"def create_game_record(context):\n\n assert isinstance(context, dict)\n assert 'games' in context\n\n game = dict(\n result=[None] * 4,\n hands=[],\n players=[None] * 4,)\n context['games'].append(game)\n\n assert context['games'][-1] == game\n return game",
"def create_game(sid):\n game = Game(created=datetime.now())\n board = Board()\n\n game.game_json = json.dumps(board.__dict__, cls=BoardEncoder)\n game.save()\n\n print(game.game_json)\n\n sio.emit('create_game', {'data': serializers.serialize(\n 'json', [game], fields=('created', 'uuid'))}, room=sid)\n join_room(sid, game.uuid)",
"def create(self, user, game):\n _id = rest.database.db.games.insert_one(game).inserted_id\n\n LOGGER.debug(\"Creating meta game reference.\")\n meta_game_id = self.meta_game_dao.create({\n 'game_id': _id,\n 'turn': game['turn'],\n 'game_name': game['name'],\n 'num_hints': game['num_hints'],\n 'num_errors': game['num_errors'],\n 'owner': user,\n 'num_players': len(game['players']),\n 'players': [user]\n })\n\n LOGGER.debug(\"Adding game to users list of owned games.\")\n\n try:\n self.user_dao.update(\n _id=user, as_model=True).owns(\n own_data={\n 'game': ObjectId(_id),\n 'player_id': 0,\n 'meta_game': ObjectId(meta_game_id)})\n except exceptions.UserNotFound as unf:\n LOGGER.debug(\"User could not be found. Deleting the game.\")\n self.delete(user, _id=_id)\n raise unf\n\n return str(_id)"
]
| [
"0.74982727",
"0.7224595",
"0.71563494",
"0.7064729",
"0.7029593",
"0.6942317",
"0.6867906",
"0.6801306",
"0.67754155",
"0.67094946",
"0.6696803",
"0.66419804",
"0.6577166",
"0.65620756",
"0.65488946",
"0.65466297",
"0.63818413",
"0.63808215",
"0.63724446",
"0.6359087",
"0.63202274",
"0.6286768",
"0.6275276",
"0.62359333",
"0.62099576",
"0.617718",
"0.6140935",
"0.61089164",
"0.60947555",
"0.60546017"
]
| 0.7971833 | 0 |
Join the game given by game_name. | def join_game (self, game_name):
r = requests.post (self.url_endpoint,
data = {"join_game": True, "player_secret": self.secret, "game_name": game_name})
if (r.status_code != 201):
print (f"ERROR: Failed to join game <{game_name}>:\n", r.text)
return r
join_data = json.loads (r.text)
self.cur_game_name = game_name
self.cur_game_secret = join_data ['game_name'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def join(self, game):\n self.game = game\n self.game.join(self)\n return self.game",
"def join_game(game_id, name):\n name = name or generate_player_name()\n game_data = load_state(game_id)\n if not game_data:\n return None\n if game_data['active']:\n return None\n if game_data['ended_at']:\n return None\n player = add_player_to_game(game_data, name)\n if player:\n msg = make_info_message('You have joined the game')\n alt_msg = make_info_message(\n '{} has joined the game'.format(player['name']))\n flash_player(game_data, player, msg, alt_msg)\n save_state(game_data)\n return player",
"def join(self, game):\n self.game = game\n self.game.dealer_join(self)\n return self.game",
"def join(self, name):\n \n if name in self.roomList:\n pass\n else:\n self.sendCommand(\"global /join\",name)",
"def join_game(game):\n game = int(game)\n if 0 > game or game > len(games):\n return \"Not a valid gameBike\"\n if games.join_game(game):\n return \"Registration done\"\n else:\n return \"Not valid registration\"",
"def join_game(self, request):\n player = Player.query(Player.name == request.player_name).get()\n print player\n if not player:\n raise endpoints.NotFoundException(\n 'A Player with that name does not exist!, '\n 'we need a second player in order to join the game')\n try:\n game = gameutils.get_by_urlsafe(request.urlsafe_key, Game)\n game.player2 = player.key\n game.put()\n except ValueError:\n raise endpoints.BadRequestException('please verify the information '\n 'of the second player')\n\n # Use a task queue to update the average attempts remaining.\n # This operation is not needed to complete the creation of a new game\n # so it is performed out of sequence.\n\n return game.to_form('Second Player Joined the Game, we are ready to start the game!', player.name)",
"def do_join_game(self):\n\t\titem = self.li_servers.get_selected()[0]\n\n\t\tself.nickname = self.e_nickname.text\n\t\tself.server_uuid = item.server.uuid\n\t\tself.game_name = item.server.name\n\t\tself.num_players = item.server.num_players\n\t\tself.boardsize = item.server.boardsize\n\n\t\td = {\"state\": be.S_JOIN,\n\t\t\t\t\"uuid\": self.server_uuid,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)",
"def join_game(gameid,hostplayer):\n\tuser_db = auth.current_user_db()\n\tname = user_db.name\n\ttoken = channel.create_channel(name + gameid) \n\ttemplate_values = {\n\t\t\t\t\t\t\"gameid\":gameid,\n\t\t\t\t\t\t\"token\": channel.create_channel(name + gameid),\n\t\t\t\t\t\t\"yourname\": name,\n\t\t\t\t\t\t\"hostplayer\":hostplayer\n\t\t\t\t\t\t}\n\treturn render_template(\"player.html\", values=template_values)",
"def _join(self, req):\n orig_game = None\n if self.game:\n orig_game = self.game\n game_id = req.pop(0)\n self.game, self.player = self.server.join_game(game_id, self)\n if orig_game:\n orig_game.leave(self)",
"def join_game(players_cursor, states_cursor, user, room_id):\n # Make sure player isn't already in the game\n joined_query = '''SELECT * FROM players_table WHERE user = ? AND room_id = ?;'''\n joined = players_cursor.execute(joined_query, (user, room_id)).fetchall()\n if len(joined) > 0:\n # TODO: Return proper message for already in game\n raise KeyError\n\n # Check if the game is already full\n players_query = '''SELECT * FROM players_table WHERE room_id = ?;'''\n players = players_cursor.execute(players_query, (room_id,)).fetchall()\n if len(players) == MAX_PLAYERS:\n # TODO: Return proper message for joining full game\n raise ValueError\n\n # Since the game is not full, add the player to the game\n insert_player = '''INSERT into players_table VALUES (?,?,?,?,?,?,?);'''\n players_cursor.execute(insert_player,\n (user, STARTING_STACK, 0, 0, \"\", len(players), room_id))\n \n FRAMES.append(display_game(players_cursor, states_cursor, user, room_id))",
"def join_game(sid, msg):\n if (msg != None and 'uuid' not in msg):\n games = Game.objects.filter(full=False)\n elif (msg != None and 'uuid' in msg):\n games = Game.objects.filter(uuid=msg['uuid'])\n if (len(games) > 0):\n game = games[0]\n game.full = True\n game.save()\n sio.emit('join_game', {'data': serializers.serialize(\n 'json', [game], fields=('created', 'uuid'))}, room=sid)\n join_room(sid, game.uuid)\n else:\n sio.emit('error', {\n 'data': 'No currently joinable game'\n }, room=sid)",
"def join_room(self, room_name): \r\n logging.debug('Joining room {ro}'.format(ro=room_name))\r\n\r\n for room in self.rooms:\r\n if room.name == room_name:\r\n room.add_user(self)\r\n self._rooms[room_name] = room\r\n room.welcome(self)\r\n break\r\n else:\r\n room = Room(room_name)\r\n self.rooms.append(room)\r\n self._rooms[room_name] = room\r\n room.add_user(self)",
"async def join(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n player = ctx.message.author.name\n if player.lower() in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}... you're already playing Truth or Dare here!\".format(room))\n else:\n tod_games[room]['participants'][player.lower()] = {'spins': 0}\n await amor_manager.say(\"{} has joined Truth or Dare!\".format(player))",
"def player_join(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tplayer_ID = args[0] # IndexError\r\n\t\t\tteam_name = args[1] # IndexError\r\n\t\t\tteam_type = self.team_get_type_by_name(team_name) # ValueError\r\n\t\texcept IndexError:\t# Invaild arguments\r\n\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\"The arguments for join the game are invaild.\")\r\n\t\texcept ValueError:\t# Invaild team name\r\n\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\"Specified team name {0} is not found.\".format(team_name))\r\n\t\telse:\r\n\t\t\t# If the player has already joined\r\n\t\t\tif self._teammates.get(player_ip) is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\t\"IP {0} has already joined the game.\".format(player_ip))\r\n\t\t\t\treturn\r\n\r\n\t\t\t# Check if the player ID is used in the team\r\n\t\t\tplayer_info = self._teams[team_type].get_player_info_by_ID(player_ID)\r\n\t\t\tif player_info is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\t\"Player \\\"{0}\\\" is already in the team.\".format(player_ID))\r\n\t\t\t\treturn\r\n\r\n\t\t\tplayer_info = self._teams[team_type] \\\r\n\t\t\t\t.add_player_info(player_ip, player_ID, team_name)\r\n\r\n\t\t\tself._teammates[player_ip] = team_type\r\n\t\t\tself._handlers[\"player-join\"].invoke(player_info, team_type)\r\n\r\n\t\t\tself._comm_server.send_message(player_ip, \"join ok\")\r\n\r\n\t\t\t_logger.info(\"Player \\\"{0}\\\" from {1} joins the team \\\"{2}\\\".\" \\\r\n\t\t\t\t.format(player_info.ID, player_info.IP, player_info.team_name))",
"def enter_game_played(self, players_names, winners_names, game, date, group):\n try:\n game_played = GamePlayed()\n game_played.game = Game.objects.get(name__exact=game)\n game_played.date = date\n game_played.group = group\n game_played.save()\n\n for player in players_names:\n game_played.players.add(Player.objects.get(user__first_name__exact=player))\n for winner in winners_names:\n game_played.winners.add(Player.objects.get(user__first_name__exact=winner))\n except:\n print(\"Error entering game\", game)\n pass",
"async def join(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\tif interaction.user.id == self.ctx.author.id:\n\t\t\tawait interaction.response.send_message(\n\t\t\t\tcontent='You have already joined the game. You can add AI players or start the game early with the other two buttons.',\n\t\t\t\tephemeral=True,\n\t\t\t)\n\t\t\treturn\n\t\tself.players.append(interaction.user)\n\t\tself.start.disabled = False\n\t\tif len(self.players) >= self.max_players:\n\t\t\tview = None\n\t\t\tself.stop()\n\t\telse:\n\t\t\tview = self\n\t\tawait interaction.response.edit_message(content=self.generate_message(), view=view)",
"async def join(self, ctx):\n if lobby.count(f\"{ctx.author.mention}\") == 0:\n add(lobby, ctx.author.mention)\n await ctx.channel.send(\"You've been added to the queue!\")\n else:\n await ctx.channel.send(\"You're already queued for a match!\")\n await ctx.channel.send(embed=lobby_list())\n if len(lobby) == teamSizeMax:\n if roster:\n await ctx.channel.send(\n \"There is currently a match being picked right now, please try again after picking is finished\")\n else:\n assign_captains()",
"def Network_sendNameOfGame(self, data):\n self.Send({\"action\": \"defineGame\", \"ruleset\": self._server.ruleset})",
"def join(data):\n username, room = data['username'], data['room']\n join_room(room)",
"def game_created(self, pname, game):\n logging.debug('Game Created:')\n logging.debug(game)\n g = self.games.get(game['matchid'], None)\n if g:\n g.roomid = game['roomid']\n g.tableindex = game['tableindex']\n self.comm.game_ready(g)",
"async def join(self, gid):\n\t\tif self.group != None:\n\t\t\tif self.group.gid == gid:\n\t\t\t\traise exceptions.ClientError('IN_GROUP')\n\n\t\tif gid and not utilities.validate_string(gid):\n\t\t\traise exceptions.ClientError('INVALID_STRING')\n\n\t\tif gid:\n\t\t\tgroup = Group.register(gid)\n\t\telse:\n\t\t\ttries = 0\n\t\t\twhile 1:\n\t\t\t\tif tries >= 5:\n\t\t\t\t\traise exceptions.ClientError('INVALID_GROUP')\n\t\t\t\tgid = utilities.random_string(16)\n\t\t\t\tgroup = Group.register(gid)\n\t\t\t\tif len(group.members) == 0:\n\t\t\t\t\tbreak\n\t\t\t\ttries += 1\n\n\t\tif group.in_game:\n\t\t\traise exceptions.ClientError('IN_GAME')\n\n\t\tawait group.add(self)",
"def join_in_play(self, join_in_play):\n\n self._join_in_play = join_in_play",
"def create_new_game (self, game_name = None, ai_game = False):\n if (game_name == None):\n game_name = \"default_game_name\" + str (time.time ())\n self.cur_game_name = game_name\n data =\\\n {\n \"new_game\": True,\n \"player_secret\": self.secret,\n \"game_name\": self.cur_game_name\n }\n if (ai_game):\n data['ai_game'] = True\n r = requests.post (self.url_endpoint, data)\n if (r.status_code != 201):\n print (\"Failed to create game:\\n\", r.text)\n return r\n # Not sure if there is any need for this--editing perhaps? Unimplemented.\n game_data = json.loads (r.text)\n self.cur_game_secret = game_data['game_secret']\n return self.cur_game_name",
"def do_add_to_game(game):\n if not game:\n raise ValueError(\"Tried to do_add_to_game without game\")\n # looking to add themselves to this game\n # check whether this is allowed.\n c = common_db.Common_DB()\n this_session = c.common_Sessionmaker()\n \n\n action_result, message = game.add_players_to_game(game.state.this_player_id)\n if action_result:\n # added to the game. Check if the game is ready\n if game.ready_to_start:\n # do the deal\n game.deal()\n action_result, message = game.save(this_session)\n else:\n action_result, message = game.save(this_session)\n if action_result:\n message = \"Added you to the game. Now sit tight and wait for enough other players to join.\"\n if action_result:\n this_session.commit()\n else:\n this_session.rollback()\n this_session.close()\n\n return action_result, message",
"async def join(self, room_id, *, delay=0, lifespan=math.inf):\n assert type(room_id) is str, \"Paramater room_id should be a string.\"\n await self.add_output(\n \"|/join {}\".format(room_id), delay=delay, lifespan=lifespan\n )",
"def do_start_joined(self):\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": False,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)",
"def join(self, username=None, password=None):\n if username is not None:\n logging.debug(\"Ignored username parameter on join(), it is unsupported on this back-end.\")\n if password is None:\n password = \"\"\n room = str(self)\n\n self.connection.join(room, key=password)\n holder.bot.callback_room_joined(self)\n logging.info(\"Joined room {}\".format(room))",
"def joinGame(self, playerID, startFreshP):\n\n # Log the join attempt\n logStrF = \"joinGame called w/ playerID %d (fresh game requested?: %s)\"\n TournamentSystem._logger.debug(logStrF, playerID, str(startFreshP))\n\n # Add the player to a pending game if one exists\n for gameID, game in self.games.iteritems():\n if game.status == ChessMatch.STATUS_PENDING:\n color = game.join(playerID, p2ReqFreshStart=startFreshP)\n if color:\n logStrF = \"Added player %d to existing game %d (sfP=%s)\"\n TournamentSystem._logger.debug(logStrF,\n playerID,\n gameID,\n str(startFreshP))\n return (True, {\"gameID\": gameID,\n \"startFreshP\": startFreshP})\n\n # Add a player to a new game otherwise\n newMatch = ChessMatch(firstPlayerID=playerID,\n p1ReqFreshStart=startFreshP)\n newID = _getUniqueInt(self.games.keys())\n self.games[newID] = newMatch\n TournamentSystem._logger.debug(\"Added player %d to new game %d\",\n playerID, newID)\n return (True, {\"gameID\": newID})",
"def join(var, wrapper, message):\n # keep this and the event in fjoin() in sync\n evt = Event(\"join\", {\n \"join_player\": join_player,\n \"join_deadchat\": join_deadchat,\n \"vote_gamemode\": vote_gamemode\n })\n if not evt.dispatch(var, wrapper, message, forced=False):\n return\n if var.PHASE in (\"none\", \"join\"):\n if wrapper.private:\n return\n if var.ACCOUNTS_ONLY:\n if wrapper.source.account is None:\n wrapper.pm(messages[\"not_logged_in\"])\n return\n if evt.data[\"join_player\"](var, wrapper) and message:\n evt.data[\"vote_gamemode\"](var, wrapper, message.lower().split()[0], doreply=False)\n\n else: # join deadchat\n if wrapper.private and wrapper.source is not wrapper.target:\n evt.data[\"join_deadchat\"](var, wrapper.source)",
"async def tod_join(self, ctx, *args):\n if ctx.author not in self.players:\n self.players.append(ctx.author)\n message = f\"{ctx.author.mention} has been added to the game!\"\n await ctx.send(message)\n else:\n message = f\"{ctx.author.mention} has already joined!\"\n await ctx.send(message)\n\n # Updates the role if channel exists\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)\n return\n\n # Creates the channel if it doesn't exist\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n bots = discord.utils.get(ctx.guild.roles, name=\"Bots\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False),\n bots: discord.PermissionOverwrite(read_messages=True, send_messages=True),\n role: discord.PermissionOverwrite(read_messages=True, send_messages=True, connect=True, speak=True)\n }\n await ctx.guild.create_text_channel('truth-or-dare', overwrites=overwrites)\n await ctx.guild.create_voice_channel('secret-voice', overwrites=overwrites)\n\n # Adds the role\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)"
]
| [
"0.7661626",
"0.74558544",
"0.7195359",
"0.7126481",
"0.68551546",
"0.679203",
"0.67057157",
"0.66459024",
"0.6494704",
"0.64837074",
"0.6400737",
"0.633987",
"0.6150461",
"0.57934856",
"0.5774604",
"0.5773208",
"0.5761066",
"0.5674457",
"0.56051105",
"0.5584419",
"0.5577556",
"0.5560805",
"0.5522376",
"0.5522252",
"0.5515444",
"0.5508026",
"0.5481486",
"0.5448042",
"0.54345304",
"0.5433183"
]
| 0.8584529 | 0 |
Play a random number in the current game | def play_random_number (self):
if (self.cur_game_secret == ""):
print ("ERROR: No current game, join a game.")
return 1
play_value = self.hand.pop (random.randint (0, len (self.hand) - 1))
r = requests.post (self.url_endpoint, data = {"play_game": True, "game_name": self.cur_game_name,
"player_secret": self.secret, "play_value": play_value})
# Check if play was accepted
if (r.status_code != 201):
return [1, r]
else:
return [0, r] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play_game():\n pass",
"def play_card(self, rnd: PlayerRound) -> int:\n # play random\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # select a random card\n return np.random.choice(np.flatnonzero(valid_cards))",
"def play_against_random(self, color, game_count):\n\n q_player = tournament.QNetPlayer(self.target_network)\n random_player = tournament.RandomPlayer()\n score = tournament.play_one_color(game_count, q_player, color, random_player)\n return score",
"def play_once(human_plays_first):\n # This is all dummy scaffolding code right at the moment\n import random\n rng =random.Random()\n # Pick a random result -1 and 1\n result = rng.randrange(-1,2)\n print(\"Human plays first ={0}, winner={1} \". format(human_plays_first,result))\n return result",
"def play_game():\n pass",
"def play_game(starting_num, n):\n g = Game(starting_num)\n last_spoken = starting_num[-1]\n while g.turn <= n:\n last_spoken = g.speak_number(last_spoken)\n g.turn += 1\n\n return last_spoken",
"def demo_a_number(random_number):",
"def oneGame():\n playOneGame()",
"def play_game(self):\n player = Player(input(\"What is your name?\"))\n while player.health > 0:\n input(\"Press t to start another turn\")\n n = random.randint(0, 3)\n if n == 0:\n if self.monster_attack(player):\n break\n elif n == 1:\n self.find_gold(player)\n else:\n print(\"Nothing happened!\")",
"def make_music_rand():\n pass",
"def play(self):\n print('Playing game...')",
"def tellGameNumber(self):\n t = time.time() - self.start_time\n d = self.start_duration\n if t < d:\n c = int(255 * (1 - (t / d)))\n self.window.alert(\"Starting game number \" + str(self.game_number))",
"def random_play(state, player):\n import random\n actions = YoteRules.get_player_actions(state, player, reward_move=state.rewarding_move)\n choice = random.choice(actions)\n return choice",
"def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()",
"def game_play(self):",
"def play(self):\n b = int(input(\"玩几把:\"))\n flag = 0\n money = []\n while True:\n flag += 1\n histroy_, bet = self.draw()\n money.append(histroy_)\n if histroy_ < self.bet:\n print('您只能玩个 %s 把...' % flag)\n break\n\n if flag == b:\n print('次数已用完!!!')\n break\n\n self.plot(flag, money)",
"def auto_play_random(self, player=None):\r\n if player is None:\r\n player = self.get_player()\r\n legal_list = self.get_legal_list()\r\n next_move = legal_list.rand_obj()\r\n self.new_edge(next_move)",
"def select(self, *args):\n self.cur_pl = self.rightwin\n if self.cur == Win.right:\n self.cur_pl.cur_song = self.rightwin.highlighted()\n else:\n self.cur_pl.cur_song = random.choice(self.cur_pl.data)\n\n next_song = self.cur_pl.cur_song.data\n self.player.play(next_song)\n self.cur_pl.remake_gen()",
"def choose_first():\n rand = random.randint(1, 2)\n print(f\"The first is Player-{rand}\")\n return rand",
"def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the number is {self.secret_number}\")\n break\n else:\n print(f\"Wrong Guess!! , Please try again.\")\n return self.ans",
"async def random(self, ctx):\n response = await self.api.random()\n await ctx.send(embed=self._build_embed(response))",
"def play(self):\n # log.debug(\"{0} is playing...\".format(self.label))\n legal_cards = []\n for c in self.hand:\n if self.is_legal_play(c):\n legal_cards.append(c)\n chosen_card_pos = random.randint(0, len(legal_cards)-1)\n # log.debug(str(legal_cards))\n chosen_card = legal_cards[chosen_card_pos]\n self.send_play(chosen_card)",
"def new_game():\n global secret_number, turn_count\n import random\n print (\"\\n\" + \"New game!\")\n if r1000:\n secret_number = random.randrange(0, 1000)\n turn_count = 10\n print (\"Guess a number between 0 and 1000.\")\n print (\"Number of guesses left: \" + str(turn_count))\n else:\n secret_number = random.randrange(0, 100)\n turn_count = 7\n print (\"Guess a number between 0 and 100.\")\n print (\"Number of guesses left: \" + str(turn_count))",
"def react_positively(self) -> None:\n positive_reactions = [\n Triggers.MajorWin,\n Triggers.CodeLabHappy,\n Triggers.CodeLabYes,\n Triggers.CodeLabAmazed,\n Triggers.CodeLabCelebrate\n ]\n\n num = randint(0, 4)\n if num == 0:\n self.speak(\"That is Perfect!\")\n self.__play_animation(positive_reactions[num])\n elif num == 1:\n self.__play_animation(positive_reactions[num])\n self.speak(\"Thank you!\")\n elif num == 2:\n self.__play_animation(Triggers.CodeLabCurious)\n self.__play_animation(positive_reactions[num])\n elif num == 3:\n self.__play_animation(positive_reactions[num])\n else:\n self.speak(\"Yes, you got it!\")\n self.__play_animation(positive_reactions[num])",
"def play_game(self):\n # print(\"Playing a random game!\")\n for round_num in range(1, self.rounds_to_play + 1):\n # print(\"Play Round No. {}\".format(round_num))\n round = Round(round_num, self.players)\n score = round.play_round()\n # print(len(round.played_cards))\n for i in range(self.num_players):\n self.scores[i] += score[i]\n # print(\"Scores: {}\".format(self.scores))\n # print(\"Final scores: {}\".format(self.scores))\n for player in self.players:\n player.reset_score()\n return self.scores",
"def play(self, player, game): \n super().play(player, game)\n game.set_action(\"SLEEP_CODER\")",
"def play(self, n, echo=False):\n\n # reset bandit\n self.bandit.reset()\n\n # play the game n times by choosing a random arm\n for i in range(n):\n arm_number = self._select_arm()\n score = self.bandit.pull(arm_number)\n # print score of the game if echo is asked\n if echo:\n print(\"Pull \" + str(self.bandit.total_pulls) +\n \": arm \" + str(arm_number) +\n \" gives score \" + str(score))\n\n # print total score if echo is asked\n if echo:\n print(\"TOTAL SCORE: \" + str(self.bandit.total_score))\n\n # return total score\n return self.bandit.total_score",
"def start_game(self, num_atom):\r\n\r\n # Reset the game statistics\r\n self._stats.set_status(\"playing\")\r\n self.update_screen()\r\n if type(num_atom) == str:\r\n atom_list = self.manual_input()\r\n self.update_board_atoms(atom_list)\r\n else:\r\n atom_list = []\r\n while len(atom_list) < num_atom:\r\n atom_tup = randint(1, 8), randint(1, 8)\r\n if atom_tup not in atom_list:\r\n atom_list.append(atom_tup)\r\n self.update_board_atoms(atom_list)",
"def play_random_video(self):\n num_videos = len(self._video_library.get_all_videos())\n videos = self._video_library.get_all_videos()\n random_index = randint(0, num_videos-1)\n self.play_video(videos[random_index].video_id)\n # print(\"play_random_video needs implementation\")",
"def i_random():\n global randrsl, randcnt\n\n r = randrsl[randcnt]\n randcnt += 1\n if (randcnt > 255):\n isaac_()\n randcnt = 0\n\n return r"
]
| [
"0.67719114",
"0.6715166",
"0.65946126",
"0.657753",
"0.6477629",
"0.64705646",
"0.64638734",
"0.6418923",
"0.63751155",
"0.63736075",
"0.6312471",
"0.6293785",
"0.62644374",
"0.62609655",
"0.6227182",
"0.62094754",
"0.61644715",
"0.6161215",
"0.6155693",
"0.6143119",
"0.6083825",
"0.60772216",
"0.6074516",
"0.6073064",
"0.60650176",
"0.60620254",
"0.60462075",
"0.6033155",
"0.60326725",
"0.60302156"
]
| 0.73658437 | 0 |
Create the file if it does not exist The file name or directory after the main_work_directory is needed | def createFile(file):
file_ = os.path.join(os.getcwd(),file)
if not(os.path.isfile(file_)):
with open(file_,"a") as f:
f.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_file_if_needed(self):\n if not os.path.exists(self._file.filename()):\n old_umask = os.umask(0o177)\n try:\n open(self._file.filename(), 'a+b').close()\n finally:\n os.umask(old_umask)",
"def create_file(self, content=\"\"):\n if (self.exists()):\n raise IOError(\"A file at '{}' already exists.\".format(self.location))\n with open(self.location, 'w') as f:\n f.write(content)",
"def create_file(self, key=None):\n self.make_directory()\n open(self.file_path(key), 'w').close()",
"def create(self):\n self.create_file()",
"def mkfile(path):\n if os.path.exists(path):\n print(\"{} already exists.\".format(path))\n else:\n try:\n parent = os.path.abspath(os.path.join(path, os.pardir))\n os.makedirs(parent, exist_ok=True)\n open(path, 'a').close()\n except OSError:\n print(\"Uh oh - something went awry!\")\n else:\n print(\"Successfully created {}\".format(path))",
"def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp",
"def _write_file(template, localcontext, output_path, name):\n output = template.render(localcontext)\n filename = os.sep.join((output_path, name))\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(output)\n print u' [ok] writing %s' % filename",
"def create(self, fn: str) -> None:\n # Make the directories as needed.\n theDir = g.os_path_dirname(fn)\n if theDir:\n ok = g.makeAllNonExistentDirectories(theDir)\n # #1453: Don't assume the directory exists.\n if not ok:\n g.error(f\"did not create directory: {theDir}\")\n return\n # Create the file.\n try:\n f = open(fn, mode='wb')\n f.close()\n g.note(f\"created: {fn}\")\n except IOError:\n g.error(f\"can not create: {fn}\")\n except Exception:\n g.error(f\"unexpected error creating: {fn}\")\n g.es_exception()",
"def create_file(self, name, content=u'', folder=None):\n if folder is None:\n folder = self.rootdir\n\n path = os.path.join(folder, name)\n\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n\n with open(path, 'w', encoding='utf-8') as fhd:\n fhd.write(content)\n\n return path",
"def output_file(path):\n path = os.path.abspath(path)\n dirname = os.path.dirname(path)\n\n if not os.access(dirname, os.W_OK):\n raise IOError('File %s cannot be created (check your permissions).'\n % path)\n return path",
"def create_file(self, value=None):\n if not path.isdir(\"Project\"):\n system(\"mkdir Project\")\n string_to_systemize = \"echo \\\"#!/usr/bin/python\\n\" + \\\n \"# Please use fp = open(\\'Project/yourfile.*\\') \" + \\\n \"when opening YOUR files\\n\" + \\\n \"# to not lose YOUR file in the jumble of OTHER files.\\n\" + \\\n \"# Also, do NOT delete the very first comment line.\\n\" + \\\n \"# \\'logs.txt\\' is your friend for your error logs.\\\"\" + \\\n \"> Project/myfile.py\"\n system(string_to_systemize)\n system(\"chmod +x Project/myfile.py\")\n self.open_file()",
"def _create_file(self, filepath):\n folder, _filename = os.path.split(filepath)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n file = h5py.File(filepath, 'a')\n return file",
"def createFile(self, fileName):\n\n with open(fileName, \"w+\") as f:\n return f",
"def output_file(path):\n\n path = os.path.abspath(path)\n dirname = os.path.dirname(path)\n\n if not os.access(dirname, os.W_OK):\n raise IOError('File %s cannot be created (check your permissions).'\n % path)\n return path",
"def create_file(file_name: str, startup_text: str) -> None:\n with open(file_name, 'w') as f:\n f.write(startup_text)",
"def create_file(path):\n open(path, \"w\").close()",
"def create_file(path):\n command = ['touch', TEST_FILE]\n file_operation(path, command)",
"def create_file_directory():\n\n # Verify if directory exist.\n # If yes, delete it and every thing inside and create it again.\n # If not, just create it.\n\n if os.path.isdir('./file'):\n\n shutil.rmtree('./file')\n\n os.mkdir('./file')",
"def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)",
"def _create_file(self, rel_path, text):\n # FIXME: There are better/more secure APIs for creating tmp file paths.\n file_path = self.filesystem.join(self._temp_dir, rel_path)\n self.filesystem.write_text_file(file_path, text)\n return file_path",
"def create_file(self, name: str, content: str) -> None:\n file_path = self.path + os.path.sep + name\n with open(file_path, \"w+\") as file:\n file.write(content)\n file.close()",
"def create_file():\r\n with open(fileName.strftime(\"%Y-%m-%d-%H-%M\")+\".txt\",\"w\") as file:\r\n file.write(\"\")",
"def create_file(dir, path, contents):\n\n fullpath = os.path.join(dir, path)\n fulldir = os.path.dirname(fullpath)\n\n if fulldir:\n try:\n os.makedirs(fulldir)\n except OSError:\n pass\n\n with open(fullpath, 'w') as file:\n file.write(contents)",
"def create_file(cls, relpath, contents='', mode='w'):\r\n with safe_open(os.path.join(cls.build_root, relpath), mode=mode) as fp:\r\n fp.write(contents)",
"def make_test_file(path: Path, name: str = None):\n if not name:\n name = \"test.txt\"\n\n path.mkdir(parents=True, exist_ok=True)\n test_file = path.joinpath(name)\n test_file.touch()\n mtime = (datetime.today() - timedelta(2)).timestamp()\n os.utime(test_file, (mtime, mtime))\n return test_file",
"def create_file():\n with open(\"example.txt\", \"w\") as file:\n file.write(\"\")",
"def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)",
"def makeFile(self, path=None, content=b''):\n if path is None:\n path = self.mktemp()\n with open(path, 'wb') as file:\n file.write(content)\n return path",
"def createFile():\n with open(filename.strftime(\"%Y-%m-%d-%H\") + \".txt\", \"w\") as file:\n file.write(\"\")",
"def check_file(path):\n if not os.path.exists(path):\n os.makedirs(path)"
]
| [
"0.73009753",
"0.7079987",
"0.68976206",
"0.68703544",
"0.6857016",
"0.68501914",
"0.6773079",
"0.6750972",
"0.67404485",
"0.6667307",
"0.66664904",
"0.66058165",
"0.65952826",
"0.6595119",
"0.6497435",
"0.64764035",
"0.6461944",
"0.64312977",
"0.6428579",
"0.6413308",
"0.6391475",
"0.6388815",
"0.63687044",
"0.6344464",
"0.6343802",
"0.63209933",
"0.63189197",
"0.63188654",
"0.6299359",
"0.62947476"
]
| 0.7875238 | 0 |
This function is to remove duplicates of csv file that stores all links | def unqiueList(csvfile):
links_array = genfromtxt(getFulldirAddress(csvfile), delimiter='\n', dtype="unicode")
links_array_2 = np.unique(links_array)
np.savetxt(getFulldirAddress(csvfile), links_array_2, fmt='%s') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_dupes(infile):\n filename = infile.replace('.csv', '-unique.csv')\n s = set()\n with open(filename, 'w') as outfile:\n for line in open(infile):\n if line not in s:\n outfile.write(line)\n s.add(line)",
"def removeDuplicateUrl(inputfile, outputfile):\n\t\n\tlines_seen = set()\n\toutfile = open(outputfile, \"w\")\n\tfor line in open(inputfile, \"r\"):\n \t\tif line not in lines_seen:\n\t\t\toutfileput.write(line)\n\t\t\tlines_seen.add(line)\n\n\toutputfile.close()",
"def reduce_data(old_file, new_file):\n links_list = list()\n\n with open(old_file, \"r\") as file:\n for line in file:\n link = line.replace('\\n', '')\n links_list.append(link)\n\n result_list = list(set(links_list)) # eliminate duplicate links\n\n with open(new_file, \"w\") as file:\n for link in result_list:\n file.write(link + \"\\n\")",
"def remove_duplicated_lines():\n\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n unique_lines = []\n # compare line be line\n with open(os.path.join(work_folder, \"tempfile.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\")) as infile:\n for line in infile:\n if line not in unique_lines:\n outfile.write(line)\n unique_lines.append(line)\n # replace files\n shutil.copyfile(os.path.join(work_folder, 'tempfile.csv'), os.path.join(\n work_folder, \"filtered_merged_history_KMDW.csv\"))\n # remove temp file\n os.remove(os.path.join(work_folder, \"tempfile.csv\"))",
"def dataDedup_csv(infile, outfile=None):\n if fpath.isfile(infile):\n \n dataset = pd.read_csv(infile, sep=',', dtype='unicode')\n dedup_dataset = dataset.drop_duplicates()\n \n if outfile!=None:\n dedup_dataset.to_csv(outfile, \n encoding='utf-8', index=False,\n header=False)\n \n return dedup_dataset\n \n else:\n print(\"file \\\"%s\\\" does not exist... or is not a file...\" %(infile))",
"def urls(self):\n header = \"URL,Linked From,Discovery Date\"\n gcsv = self.read()\n if gcsv[0] != header:\n raise Exception(\"Unexpected CSV format\")\n urls = set()\n for line in gcsv[1:]:\n # Get everything before the first commar (just the URL)\n line = line[:line.find(\",\")]\n urls.add(line)\n return urls",
"def remove_duplicates(file):\n file_tmp = 'tmp'\n with open(file) as f, open(file_tmp, 'w') as o:\n for line in unique_everseen(f):\n o.write(line)\n # rename file_tmp to file\n os.remove(file)\n os.rename(file_tmp, file)",
"def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_False=np.where(dup==False)\n\t\n no_dup=data.loc[dup_False]\n\n len_no_dup=no_dup.shape[0]\n len_dup_False_indx=len(dup_False[0])\n\n try:\n assert len_no_dup == len_dup_False_indx\n except AssertionError:\n print(\"Removal of duplicates and creation of new output failed.\")\n print(\"Length of no duplicated indices does not match the subsampled main dataframe... function failiure :(\")\n\n\t\n if header_rows !=0: \n frames = [header, no_dup]\n no_dup = pd.concat(frames)\n\n if sep_type==\"\":\n no_dup.to_csv(out_file, sep=\"\\t\", header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))\n else:\n no_dup.to_csv(out_file, sep=sep_type, header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))",
"def process_links():\n from pymongo import Connection\n conn = Connection()\n db = conn['mchs']\n# db.drop_collection('svodki')\n coll = db['svodki']\n coll.ensure_index(\"url\")\n f = open('alllinks.csv', 'r')\n for l in f:\n parts = l.strip().split('\\t')\n if len(parts) < 4: continue\n year, month, day, url = parts\n o = coll.find_one({'url' : url})\n if o is not None: \n print url, 'passed'\n continue\n u = urllib2.urlopen(url)\n data = u.read()\n u.close()\n data = data.decode('cp1251')\n record = {'year' : int(year), 'month' : int(month), 'day' : int(day), 'url' : url, 'text' : data.encode('utf8')}\n coll.save(record)\n # MCHS site is badly designed and it could block us if we will download pages too often\n time.sleep(5)\n print url, 'processed'",
"def readLinkoCSV(file):\n # define the linkograph that will be returned.\n linkograph = Linkograph()\n\n # define a variable to collect the labels.\n labels = set()\n with open(file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n\n # Define a cache for backlinks found in the forelink list.\n backlinks = {}\n\n # The current line count.\n count = 0\n for line in reader:\n # Check for backlinks that are already present.\n currentBacklinks = backlinks.get(count)\n\n if not currentBacklinks:\n currentBacklinks = set()\n\n currentLabels = set(line[0].strip().split(' '))\n labels = labels.union(currentLabels)\n\n forelinks = set()\n\n # Loop through the for link list extracting the links.\n for n in [n for n in line[1:] if n != '']:\n forelinks.add(int(n))\n\n # Every forelink corresponds to a backlink\n # For example, if line 0 has a forelink to line 4\n # then line 4 has a backlink to line 0. So cache\n # the fact that line 4 has this backlink.\n if backlinks.get(int(n)):\n # Backlinks have already been added for line int(n)\n # so just add the current backlink.\n backlinks.get(int(n)).add(count)\n else:\n # No prior backlinks have been cached, so create\n # a backlink set containing the current backlink.\n backlinks[int(n)] = {count}\n\n # Added the new entry to the linkograph.\n linkograph.append((currentLabels, currentBacklinks, forelinks))\n count = count + 1\n\n # Set the labels for the Linkograph.\n linkograph.labels.extend(labels)\n linkograph.labels.sort()\n\n return linkograph",
"def removeDuplicates():\n da = open(file_da, 'r').read().splitlines()\n sv = open(file_sv, 'r').read().splitlines()\n no = open(file_no, 'r').read().splitlines()\n tweets = (da + sv + no)\n original_length = len(tweets)\n print \"Loaded %d tweets from file\" % original_length\n print \"Removing duplicates ...\"\n tweets = list(set(tweets))\n\n # Remove tweets without location data\n remainingTweets = []\n for tweet in tweets:\n try:\n js = json.loads(tweet)\n if (not ('place' in js)) or js['place'] == None:\n continue\n elif (not ('full_name' in js['place'])):\n continue\n elif (not ('geo' in js)) or js['geo'] == None:\n continue\n elif (not ('coordinates' in js['geo'])):\n continue\n remainingTweets.append(tweet)\n except ValueError:\n pass\n tweets = remainingTweets\n print \"%d duplicates removed\" % (original_length - len(tweets))\n open(noDuplicatesFilename, 'w').write('\\n'.join(tweets))\n print \"Wrote %d tweets to file\" % len(tweets)",
"def remove_duplicated_url_entries(self):\n\n # based on the data in the WebSite table create a data frame with all the kvk which\n # we have already included. These can be removed from the data we have just read\n nr = self.url_df.index.size\n self.logger.info(\"Removing duplicated kvk/url combinies. Data read at start: {}\".format(nr))\n self.logger.debug(\"Getting all sql websides from database\")\n kvk_list = list()\n url_list = list()\n name_list = list()\n query = (self.CompanyTbl\n .select()\n .prefetch(self.WebsiteTbl)\n )\n for cnt, company in enumerate(query):\n kvk_nr = company.kvk_nummer\n naam = company.naam\n for web in company.websites:\n kvk_list.append(kvk_nr)\n url_list.append(web.url)\n name_list.append(naam)\n\n kvk_in_db = pd.DataFrame(\n data=list(zip(kvk_list, url_list, name_list)),\n columns=[KVK_KEY, URL_KEY, NAME_KEY])\n kvk_in_db.set_index([KVK_KEY, URL_KEY], drop=True, inplace=True)\n\n # drop all the kvk number which we already have loaded in the database\n self.logger.debug(\"Dropping all duplicated web sides\")\n kvk_to_remove = self.url_df.set_index([KVK_KEY, URL_KEY])\n kvk_to_remove = kvk_to_remove.reindex(kvk_in_db.index)\n kvk_to_remove = kvk_to_remove[~kvk_to_remove[NAME_KEY].isnull()]\n try:\n self.url_df = self.url_df.set_index([KVK_KEY, URL_KEY]).drop(index=kvk_to_remove.index)\n except KeyError:\n self.logger.debug(\"Nothing to drop\")\n else:\n self.url_df.reset_index(inplace=True)\n\n self.logger.debug(\"Getting all companies in Company table\")\n kvk_list = list()\n name_list = list()\n for company in self.CompanyTbl.select():\n kvk_list.append(int(company.kvk_nummer))\n name_list.append(company.naam)\n companies_in_db = pd.DataFrame(data=list(zip(kvk_list, name_list)),\n columns=[KVK_KEY, NAME_KEY])\n companies_in_db.set_index([KVK_KEY], drop=True, inplace=True)\n\n self.logger.debug(\"Dropping all duplicated companies\")\n comp_df = self.url_df.set_index([KVK_KEY, URL_KEY])\n comp_df.drop(index=companies_in_db.index, level=0, inplace=True)\n self.url_df = comp_df.reset_index()\n\n nr = self.url_df.index.size\n self.logger.debug(\"Removed duplicated kvk/url combies. Data at end: {}\".format(nr))",
"def remove_duplicates_phase_data():\n print(\"Removing any duplicates...\")\n merged_phases_data = pd.read_csv(results_folder + 'phases/raw/merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n df = pd.DataFrame(merged_phases_data)\n clean_df = df.drop_duplicates()\n clean_df.to_csv(results_folder + 'phases/processed/clean_merged_phases.csv', sep=',', index=False)\n print(\"Duplicates removed!\")",
"def process_links(conn: Connection, path: Path) -> None:\n sql = \"INSERT OR IGNORE INTO Links (src, dest, annotation) VALUES (?, ?, ?)\"\n run_sql_on_csv(conn, path, sql, (int, int, str))",
"def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")",
"def filter_unique_ticker(state: State):\n if state.events.extract_company_list + state.events.load_company_list == 200:\n try:\n state.files.combined_exchanges.columns = map(str.lower, state.files.combined_exchanges.columns)\n\n # Following line is dropping duplicates but there's not?\n state.output = state.files.combined_exchanges[[\"symbol\", 'name', 'lastsale', 'marketcap', 'ipoyear', 'sector', 'industry']].drop_duplicates()\n state.output.to_csv(f\"{PATH}/data/combined_exchanges.csv\")\n state.events.transform_company_list = 100\n except Exception as e:\n state.output = None\n LOGGER.warning(f\"Could not transform company data , error: {e}\")\n\n else:\n state.output = pd.read_csv(f\"{PATH}/data/combined_exchanges_sample.csv\")\n LOGGER.warning(f\"Using old company ticker file\")",
"def _check_duplicate_id_csv(self):\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True",
"def del_results_csv(request):\n if request.method == \"POST\":\n try:\n sources = set()\n dataset = request.FILES['dataset']\n handle_uploaded_file(dataset, 'temp/del_rels_csv.csv')\n df = pd.read_csv('temp/del_rels_csv.csv')\n for i, row in df.iterrows():\n rel_id = row['rel_id']\n objs = ExtractedRelation.objects.filter(rel_id=rel_id)\n for o in objs:\n sources.add(o.source)\n objs.delete()\n for s in sources:\n if len(ExtractedRelation.objects.filter(source=s)) == 0:\n Source.objects.filter(source_id=s.source_id).delete()\n except Exception as e:\n print(str(e))\n tb = traceback.format_exc()\n print(tb)\n \n return HttpResponse(\n json.dumps({\"status\": \"error\"}),\n content_type=\"application/json\"\n )\n \n return HttpResponse(\n json.dumps({\"status\": \"success\"}),\n content_type=\"application/json\"\n )",
"def test_duplicate_URLS(self):\n url = [\"https://docs.travis-ci.com/user/languages/python\",\n \"https://github.com/pydata/pandas/issues/10153\",\n \"https://github.com/pydata/pandas/issues/10153\"]\n with self.assertRaises(AssertionError):\n requester.batch_url_to_csv(url,fnames=['travis','travis2','travis3'])",
"def make_urls(csvfile):\n result = []\n with open(csvfile, 'rU') as infile: \n reader = csv.DictReader(infile, dialect=csv.excel,\n fieldnames=['ID','URL','Latitude','Longitude'])\n for row in reader:\n idnum = row['ID']\n url = row['URL']\n lat = row['Latitude']\n lon = row['Longitude']\n result.append((url, idnum, lat, lon))\n return result",
"def _filter_duplicate_urls(urls: list) -> set:\n clean_urls = set()\n for url in urls:\n cleaned_url = url.split(\"&sa=U\")[0]\n clean_urls.add(cleaned_url)\n return clean_urls",
"def save_csv(csv_path: str, duplicates: pd.DataFrame) -> None:\n csv_file = os.path.join(csv_path, 'duplicates.csv')\n duplicates.to_csv(csv_file, index=False)",
"def purify(self, csv_list):\n for row in csv_list:\n if row[0].strip(\" \") == \"ID\":\n return csv_list[csv_list.index(row) + 1:]\n return csv_list",
"def check_errors(csv_file):\n\n logger.info(\"Checking %s.\", csv_file)\n\n errors_found = False\n errors_file = f\"{os.path.splitext(csv_file)[0]}_errors.csv\"\n deduplicated_file = f\"{os.path.splitext(csv_file)[0]}_deduplicated.csv\"\n\n with open(csv_file, 'r', encoding=\"UTF-8\") as input_file,\\\n open(deduplicated_file, 'w', encoding=\"UTF-8\") as dedup,\\\n open(errors_file, 'w', encoding=\"UTF-8\") as errors:\n\n reader = csv.reader(input_file, delimiter=',')\n dedup_writer = csv.writer(dedup)\n error_writer = csv.writer(errors)\n line = 1\n entries = set()\n for row in reader:\n\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n # Record any incorrect classifications.\n if not row[1].lower() == \"normal\" and not row[1].lower() == \"anomaly\":\n error_writer.writerow(\n [line, row[0], row[1], \"INVALID_CLASSIFICATION\"])\n errors_found = True\n\n # Write first image entry to dedup file and record duplicates.\n key = row[0]\n if key not in entries:\n dedup_writer.writerow(row)\n entries.add(key)\n else:\n error_writer.writerow([line, row[0], row[1], \"DUPLICATE\"])\n errors_found = True\n line += 1\n\n if errors_found:\n logger.info(\"Errors found check %s.\", errors_file)\n else:\n os.remove(errors_file)\n os.remove(deduplicated_file)\n\n return errors_found",
"def unique(list_of_links):\n return list(set(list_of_links))",
"def output_dupimgs(duplicate_img_fd, duplicate_images_urls):\n cs = csv.writer(duplicate_img_fd)\n cs.writerow([\"URL\", \"md5\"])\n dp_imgs = defaultdict(lambda: [])\n for (h, u) in duplicate_images_urls:\n dp_imgs[h].append(u)\n\n for h, urls in dp_imgs.items():\n if len(urls) > 1:\n for u in urls:\n cs.writerow([u, h])",
"def clean_duplicated_identifiers(rows):\n\n logger.info('Cleaning duplicates')\n unique_identifiers = []\n c = 0\n for row in rows:\n c += 1\n idf = row['identifier']\n logger.info(f'Searching duplicates {c} {idf}')\n if idf not in unique_identifiers:\n unique_identifiers.append(idf)\n yield row\n else:\n row['is_duplicate'] = True\n logger.info(f'{idf} is duplicated')\n yield row",
"def unique(fname):\n addresses = []\n with gzip.open(fname, \"rb\") as f:\n lines = f.readlines()\n for line in lines:\n #print(\"[\"+line.split()[1]+\"]\")\n if line.split()[0] not in addresses:\n addresses.append(line.split()[0])\n return addresses",
"def dupCheck(doc, col):\n\tdocList = list()\n\twith open(doc) as f:\n\t\tfor l in f.readlines():\n\t\t\tname = l.split(',')\n\t\t\tdocList.append(name[col])\n\t\tif len(docList) != len(set(docList)):\n\t\t\tprint(\"Duplicates Detected\")",
"def isolate_subreddit(csv_location, subreddit):\r\n\r\n individual_subreddit_csvs = csv_location + \"_\" + subreddit + '.*.csv'\r\n\r\n df = dd.read_csv(csv_location + \".csv\", header=0, sep='\\t')\r\n sub_df = df.loc[df['subreddit'] == subreddit]\r\n\r\n sub_df.to_csv(individual_subreddit_csvs)\r\n filenames = glob(individual_subreddit_csvs)\r\n with open(csv_location + \"_\" + subreddit + '.csv', 'w') as out:\r\n for fn in filenames:\r\n with open(fn) as f:\r\n out.write(f.read())\r\n os.remove(fn)"
]
| [
"0.7300982",
"0.7081345",
"0.67904526",
"0.65356153",
"0.63604176",
"0.6315184",
"0.62329036",
"0.6186843",
"0.61564404",
"0.61082506",
"0.6049242",
"0.5962678",
"0.5925597",
"0.5874987",
"0.58611417",
"0.58346677",
"0.5809679",
"0.57887757",
"0.5729091",
"0.5712264",
"0.56842864",
"0.56773764",
"0.5664989",
"0.56582046",
"0.56544024",
"0.56292844",
"0.5608113",
"0.5592044",
"0.5556537",
"0.5542492"
]
| 0.72341794 | 1 |
Ingests temperature and precipitation values for nClimDiv datasets. Uses a matching soil constants file from open source indices_python github repository. | def _ingest_netcdf(output_netcdf, # pragma: no cover
release_date,
temp_var_name,
precip_var_name,
awc_var_name):
try:
# parse the soil constant (available water capacity)
soil_url = 'https://raw.githubusercontent.com/monocongo/indices_python/master/example_inputs/pdinew.soilconst'
# use a temporary file that we'll remove once no longer necessary
tmp_file = "tmp_soil_for_ingest_nclimdiv.txt"
urllib.request.urlretrieve(soil_url, tmp_file)
soil_file = open(tmp_file, 'r')
# parse the soil constant (available water capacity)
divs_to_awc, divs_to_lats, divs_to_bs, divs_to_hs = _parse_soil_constants(soil_file, awc_var_name)
# remove the soil file
soil_file.close()
os.remove(tmp_file)
# parse both the precipitation and the temperature datasets
p_divs_to_arrays, p_divs_to_minmax_years, p_min_year, p_max_year = _parse_climatology(release_date, p_or_t='P')
t_divs_to_arrays, t_divs_to_minmax_years, t_min_year, t_max_year = _parse_climatology(release_date, p_or_t='T')
# determine the number of times and divisions for each (should match?)
total_months = (p_max_year - p_min_year + 1) * 12
if total_months == ((t_max_year - t_min_year + 1) * 12):
# use the intersection set of division IDs (all IDs in both temperature and precipitation)
division_ids = list(set(list(set(list(p_divs_to_arrays)).intersection(t_divs_to_arrays))).intersection(divs_to_awc))
else:
raise ValueError("Unequal number of time steps between the two climatological datasets")
# for each climatology or indicator we'll parse out dictionaries to contain a) divisional arrays (full time series
# arrays for all divisions), b) divisional min/max year ranges, and c) overall minimum/maximum years
divisional_arrays = {temp_var_name: t_divs_to_arrays,
precip_var_name: p_divs_to_arrays}
divisional_minmax_years = {temp_var_name: t_divs_to_minmax_years,
precip_var_name: p_divs_to_minmax_years}
variable_minmax_years = {temp_var_name: [t_min_year, t_max_year],
precip_var_name: [p_min_year, p_max_year]}
# parse the indicator datasets
for variable in ['zndx', 'sp01', 'sp02', 'sp03', 'sp06', 'sp12', 'sp24', 'pdsi', 'phdi', 'pmdi']:
# get the relevant US climate divisions ASCII file from NCEI
#TODO replace this hard coded path with a function parameter, taken from command line pylint: disable=fixme
file_url = 'ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/climdiv-{0}dv-v1.0.0-{1}'.format(variable, release_date)
# use a temporary file that we'll remove once no longer necessary
tmp_file = "tmp_climatology_for_ingest_nclimdiv.txt"
urllib.request.urlretrieve(file_url, tmp_file)
div_file = open(tmp_file, 'r')
var_name = 'cmb_' + variable
# parse the index values into corresponding dictionaries, arrays, etc.
divisional_array, minmax_years, min_year, max_year = _parse_results(div_file)
divisional_arrays[var_name] = divisional_array
divisional_minmax_years[var_name] = minmax_years
variable_minmax_years[var_name] = [min_year, max_year]
# remove the climatology file
div_file.close()
os.remove(tmp_file)
# write the values as NetCDF
_create_netcdf(output_netcdf,
division_ids,
divisional_arrays,
divisional_minmax_years,
variable_minmax_years,
divs_to_awc,
divs_to_lats,
divs_to_bs,
divs_to_hs,
total_months,
temp_var_name,
precip_var_name,
awc_var_name,
p_min_year)
print('\nMonthly nClimDiv NetCDF file: {0}'.format(output_netcdf))
except:
_logger.exception('Failed to complete', exc_info=True)
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readFiles(opt, path, pathCopyData,minlat, maxlat, minlon, maxlon , variables, estaciones):\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n dirr = pathCopyData\n patron2 = re.compile(date)\n print(dirr + 'tfile.txt')\n tempfile = df.read_csv(dirr + 'tfile.txt')\n tempbase = df.read_csv(dirr + 'tbase.txt')\n tfile = list(tempfile.values.flatten())\n tbase = list(tempbase.values.flatten())\n tfileCopy = list(tempfile.values.flatten())\n tbaseCopy = list(tempbase.values.flatten())\n l = len(tfile)\n for i in range(l):\n tfil = tfile[i]\n tbas = tbase[i]\n ls = tbas + '/' + tfil\n f = patron2.findall(tfil)\n cadena = clearString(tfil)\n print(cadena)\n try:\n #net = open_netcdf(ls, tfil, cadena, pathCopyData)\n net = Dataset(ls)\n for xs in range(len(estaciones)):\n minlat1 = minlat[xs]\n maxlat1 = maxlat[xs]\n minlon1 = minlon[xs]\n maxlon1 = maxlon[xs]\n estacion = estaciones[xs]\n #checkFile(net, tfil, f[0], opt, path, minlat1, maxlat1, minlon1, maxlon1, variables, estacion)\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat1):int(maxlat1),int(minlon1):int(maxlon1)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], f[0], opt, path, estacion)\n tfileCopy.remove(tfil)\n tbaseCopy.remove(tbas)\n except (OSError, EOFError) as e:\n print(e)\n fdata = df.DataFrame(tfileCopy, columns=['nameFile'])\n fbas = df.DataFrame(tbaseCopy, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbas.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)\n if os.path.exists(pathCopyData + cadena):\n os.remove(pathCopyData + cadena)\n sys.exit()\n # readFiles(1);\n except tarfile.ReadError:\n print('error2')\n # fdata = df.DataFrame(tfile,columns=['nameFile']);\n # fbas = df.DataFrame(tbase,columns=['nameBase']);\n # fdata.to_csv(dirr+'tfile.txt',encoding='utf-8',index=False);\n # fbas.to_csv(dirr+'tbase.txt',encoding='utf-8',index=False);\n # readFiles(1);\n except (KeyError, FileNotFoundError):\n print('ERROR DE LECTURA')",
"def inputs_netCDF(ID, fname, data):\n\n from netCDF4 import Dataset #, date2num, num2date\n from datetime import datetime\n\n print('**** creating SpaFHy input netCDF4 file: ' + fname + ' ****')\n \n # create dataset & dimensions\n ncf = Dataset(fname, 'w')\n ncf.description = 'SpatialData from : ' + str(ID)\n ncf.history = 'created ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n ncf.source = 'SpaFHy v.1.0 inputs'\n \n dlat, dlon = np.shape(data['cmask'])\n\n ncf.createDimension('dlon', int(dlon))\n ncf.createDimension('dlat', int(dlat))\n ncf.createDimension('scalar', 1)\n\n # create variables \n # call as createVariable(varname,type,(dimensions))\n cellsize = ncf.createVariable('cellsize', 'f4', ('scalar',))\n cellsize.units = 'm'\n lat = ncf.createVariable('lat', 'f4', ('dlat',))\n lat.units = 'ETRS-TM35FIN'\n lon = ncf.createVariable('lon', 'f4', ('dlon',))\n lon.units = 'ETRS-TM35FIN'\n\n cellsize[0] = data['cellsize']\n lon[:] = data['lon0']\n lat[:] = data['lat0']\n \n # required inputs\n cmask = ncf.createVariable('cmask', 'i4', ('dlat','dlon',))\n cmask.units = 'integer inside catchment, Nan outside'\n LAI_conif = ncf.createVariable('LAI_conif', 'f4', ('dlat','dlon',))\n LAI_conif.units = 'conifer LAI (m2m-2)'\n LAI_decid = ncf.createVariable('LAI_decid', 'f4', ('dlat','dlon',))\n LAI_decid.units = 'deciduous annual max LAI (m2m-2)' \n hc = ncf.createVariable('hc', 'f4', ('dlat','dlon',))\n hc.units = 'canopy height m' \n cf = ncf.createVariable('cf', 'f4', ('dlat','dlon',))\n cf.units = 'canopy closure (-)' \n \n soilclass = ncf.createVariable('soilclass', 'i4', ('dlat','dlon',))\n soilclass.units = 'soil class (1 - 5)'\n \n flowacc = ncf.createVariable('flowacc', 'f4', ('dlat','dlon',))\n flowacc.units = 'flow accumualtion area m2'\n slope = ncf.createVariable('slope', 'f4', ('dlat','dlon',))\n slope.units = 'local slope (deg)' \n \n for k in ['LAI_conif', 'LAI_decid', 'hc', 'cf', 'soilclass', 'flowacc', 'slope']:\n ncf[k][:,:] = data[k]\n \n print('**** done ****')",
"def _ion_densities_datafiles(self):\n ne = self.ne_in\n nD = self.ni_in[0,:]\n nC = (ne-nD)/6.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC",
"def set_constants(self, data=[1.4493e+00,3.8070e-01,9.9000e-03, \\\n 1.0420e-01,7.9000e-03,1.6920e-01, \\\n 1.5100e-02] ):\n self.D1 = data[0]\n self.D2 = data[1]\n self.A1 = data[2]\n self.A2 = data[3] \n self.F1 = data[4]\n self.F2 = data[5] \n self.S12 = data[6]\n self.R1 = self.A1 + self.S12\n self.KINF = (self.F1 + self.F2 * self.S12 / self.A2) / self.R1 \n self.M2 = self.D1/self.R1 + self.D2/self.A2",
"def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk",
"def Flux_init(self, flns, oversample=None, sigma=None, tophat=None, thin=None, wave_cut=None, temp_cut=None, logg_cut=None, convert=None, linlog=False, verbose=False):\n ## Reading the parameter information about the spectra\n lst = []\n for i in np.arange(len(flns)):\n print(flns[i])\n ## Get the logg and temp value from the filename\n hdr = pyfits.getheader(flns[i], ext=0)\n temp = hdr['PHXTEFF']\n logg = hdr['PHXLOGG']\n if temp_cut is None or (temp >= temp_cut[0] and temp <= temp_cut[1]):\n print(' temp_cut')\n if logg_cut is None or (logg >= logg_cut[0] and logg <= logg_cut[1]):\n print(' logg_cut')\n lst.append( [i, logg, temp] )\n\n ## Reading the mu values\n self.mu = np.array(pyfits.getdata(flns[0], ext=1), dtype=float)\n n_mu = self.mu.size\n\n ## Sorting the grid by temperature and then logg\n print(lst)\n Utils.Misc.Sort_list(lst, [2,1])\n lst = np.array(lst)\n print(lst)\n\n ## Extracting the temperature values\n self.logtemp = np.log(np.unique(lst[:,2]))\n self.logtemp.sort()\n n_teff = self.logtemp.size\n\n ## Extracting the logg values\n self.logg = np.unique(lst[:,1])\n self.logg.sort()\n n_logg = self.logg.size\n\n ## If there is a mismatch and the grid is not rectangular, then the function aborts\n if n_teff*n_logg != lst.shape[0]:\n print( \"Number of temperature points: {}\".format(n_teff) )\n print( \"Number of logg points: {}\".format(n_logg) )\n print( \"Number of grid points: {}\".format(lst.shape[0]) )\n for teff in self.logtemp:\n for logg in self.logg:\n missing = True\n for l in lst:\n if np.log(l[2]) == teff and l[1] == logg:\n missing = False\n if missing:\n print(\"Missing -> logg: {:3.1f}, temp: {:5.0f}\".format(logg,np.exp(teff)))\n raise Exception( \"There is a mismatch in the number of log(g) and teff grid points!\" )\n return\n\n ## Extracting the data\n grid = []\n wav = []\n if verbose: print( \"Starting to read atmosphere grid files\" )\n for i,l in enumerate(lst[:,0]):\n if verbose: sys.stdout.write( \"Reading {} ({}/{})\\r\".format(flns[int(l)], i+1, lst.shape[0]) ); sys.stdout.flush()\n tmp = Read_AGSS(flns[int(l)], oversample=oversample, sigma=sigma, tophat=tophat, thin=thin, wave_cut=wave_cut, convert=convert, linlog=linlog)\n grid.append(tmp[0])\n wav.append(tmp[1])\n self.z0 = tmp[2]\n logger.log(8, \"Number of wavelength points: {}, range: [{}, {}]\".format(tmp[1].size, tmp[1][0], tmp[1][-1]) )\n if verbose: print( \"\\nFinished reading atmosphere grid files\" )\n try:\n wav = np.array(wav)\n if wav.std(0).max() > 1.e-6:\n raise Exception( \"The wavelength grid is not uniform!\" )\n return\n else:\n wav = wav[0]\n except:\n raise Exception( \"The wavelength grid has an inconsistent number of elements!\" )\n return\n if verbose: print( \"Transforming grid data to array\" )\n grid = np.asarray(grid)\n if verbose: print( \"Addressing the grid data shape\" )\n grid.shape = n_teff, n_logg, n_mu, wav.size\n self.wav = wav\n if verbose: print( \"Making the grid a class attribute\" )\n self.grid = grid\n\n ## Calculating the grid log-to-linear weights\n if linlog:\n self.wav_linear = Utils.Series.Resample_loglin(self.wav)\n self.wav_delta = self.wav_linear[1] - self.wav_linear[0]\n self.wav_frac, self.wav_inds = Utils.Series.Getaxispos_vector(self.wav, self.wav_linear)\n return",
"def preprocess():\n #get a list of all sentinel-image filenames\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n #read in a csv-file with information about the cluster\n csvpath = os.path.abspath(os.path.join(os.path.abspath(__file__),\"../../dataResearch/Data_with_Pooled.csv\"))\n df = pd.read_csv(csvpath)\n #get the min and max values per band \n minmaxlist = minmax()\n timelist = []\n print(\"STEP 2/2\")\n print(\"CREATING TFRECORDS\")\n for i in s2files:\n start = time.time()\n s2file = s2path + \"/\" + i\n #Get Features out of the Dataframe\n #get the name of the label (equals the SurveyID in the data)\n labelname = i.replace(\".tif\",\"\")\n #get the index of the entry to get the information out of the dataframe\n index = df.ID[df.ID == labelname].index\n wealthpooled = float(df['wealthpooled'].loc[index].max().replace(\",\",\".\"))\n wealthpooled5country = float(df['wealthpooled5country'].loc[index].max().replace(\",\",\".\"))\n country = bytes(df['country'].loc[index].max(), 'utf-8')\n urbanrural = bytes(df['URBAN_RURA'].loc[index].max(), 'utf-8')\n csvlat = float(df['LATNUM'].loc[index].max().replace(\",\",\".\"))\n csvlon = float(df['LONGNUM'].loc[index].max().replace(\",\",\".\"))\n year = int(df['year'].loc[index].max())\n wealth = float(df['wealth'].loc[index].max().replace(\",\",\".\"))\n #Get all Bands out of the GEOTIFF File\n s2raster = gdal.Open(s2file)\n bandlist = []\n for n in range(s2raster.RasterCount):\n f = n+1\n if n not in [13,14,15]:\n s2band = s2raster.GetRasterBand(f)\n s2band = s2band.ReadAsArray()\n s2band = np.resize(s2band,(1050,1050)).flatten()\n min = minmaxlist[n][0]\n max = minmaxlist[n][1]\n s2band = (s2band-min)/(max-min)\n bandlist.append(s2band.flatten())\n #get the Nightlight Band out of the GEOTIFF File\n nlfile = nlpath + \"/\" + i\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n nlband = nlband.ReadAsArray()\n nlband = np.resize(nlband,(1050,1050)).flatten()\n min = minmaxlist[13][0]\n max = minmaxlist[13][1]\n nlband = (nlband-min)/(max-min)\n bandlist.append(nlband)\n #create a TFRecords-File with the TFRecordWriter\n with tf.io.TFRecordWriter(exportpath + '/' + labelname + '.tfrec') as writer:\n example = serialize_example(B1=bandlist[0],\n B2=bandlist[1],\n B3=bandlist[2],\n B4=bandlist[3],\n B5=bandlist[4],\n B6=bandlist[5],\n B7=bandlist[6],\n B8=bandlist[7],\n B8A=bandlist[8],\n B9=bandlist[9],\n B10=bandlist[10],\n B11=bandlist[11],\n B12=bandlist[12],\n NL=bandlist[13],\n wealth=wealth,\n wealthpooled=wealthpooled,\n wealthpooled5country=wealthpooled5country,\n country=country,\n urbanrural=urbanrural,\n lon_coord=csvlon,\n lat_coord=csvlat,\n year=year)\n writer.write(example)\n end = time.time()\n timelist.append(end-start)\n print(\"Done!\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%d:%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))",
"def run(self):\r\n #print 'WriteFITS_IDI.run'\r\n\r\n # construct the name of the file\r\n readfits = self.previous_results['readfits']\r\n obs_date = readfits['obs date']\r\n idifitsfile = '%s.idi.fits' % obs_date\r\n\r\n configxml = 'firi.xml'\r\n\r\n # midnight on date to Julian day\r\n obs_date_midnight = astro_time.Time('%s-%s-%sT00:00:00' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:8]), format='isot')\r\n obs_date_midnight = obs_date_midnight.jd\r\n\r\n rdate = astro_time.Time(obs_date_midnight, format='jd',\r\n out_subfmt='date')\r\n rdate = rdate.iso\r\n\r\n # number of days after midnight at obs start\r\n obs_date_time = astro_time.Time('%s-%s-%s:%s:%s' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:11], obs_date[11:13],\r\n obs_date[13:]), format='isot')\r\n obs_date_time = obs_date_time.jd - obs_date_midnight\r\n\r\n # get specific items from the results that will be need in\r\n # the reduction\r\n reduce_interferogram = self.previous_results['reduceinterferogram']\r\n data_quality = reduce_interferogram['data_quality']\r\n scan_uvspectra = reduce_interferogram['scan_uvspectra']\r\n\r\n wavenumber = scan_uvspectra[0].wavenumber\r\n\r\n # construct lists of the values to be stored in each Table column\r\n n_uvspectra = max(scan_uvspectra.keys()) + 1\r\n mcomplex = 3\r\n mstokes = 1\r\n mfreq = len(wavenumber)\r\n mra = 1\r\n mdec = 1\r\n\r\n uv_data = np.zeros([n_uvspectra, mdec, mra, mfreq, mstokes, mcomplex])\r\n u = np.zeros([n_uvspectra])\r\n v = np.zeros([n_uvspectra])\r\n w = np.zeros([n_uvspectra])\r\n dates = np.zeros([n_uvspectra])\r\n times = np.zeros([n_uvspectra])\r\n baselines = np.zeros([n_uvspectra], dtype=np.int)\r\n freqid = np.ones([n_uvspectra], dtype=np.int)\r\n\r\n for k,val in scan_uvspectra.items():\r\n uv_data[k,0,0,:,0,0] = val.spectrum.real\r\n uv_data[k,0,0,:,0,1] = val.spectrum.imag\r\n uv_data[k,0,0,:,0,2] = np.ones(val.spectrum.real.shape)\r\n u[k] = np.mean(val.baseline_x)\r\n v[k] = np.mean(val.baseline_y)\r\n w[k] = np.mean(val.baseline_z)\r\n dates[k] = obs_date_midnight\r\n times[k] = obs_date_time + (np.mean(val.time) / (3600 * 24))\r\n baselines[k] = 258\r\n\r\n # external_params is referred to inside config.xml and can be\r\n # used to set parameters there\r\n light_speed = constants.c.to('m/s').value\r\n external_params = {'NCHAN':len(wavenumber),\r\n 'RDATE':rdate,\r\n 'REF_FREQ':0.0 * 100 * light_speed,\r\n 'CHAN_BW':np.abs(wavenumber[1] - wavenumber[0]) * \\\r\n 100 * light_speed}\r\n\r\n print \"Out: %s\\nConfig: %s\"%(idifitsfile, configxml)\r\n\r\n print('\\nConfiguring Array geography')\r\n print('--------------------------')\r\n # Meaningless numbers, hopefully not needed by any CASA method \r\n # that we want to use\r\n (latitude, longitude, elevation) = ('00:00:00.00', '00:00:00.00', 0)\r\n now = datetime.datetime.now()\r\n\r\n # Make ourselves an Array (pyEphem observer)\r\n array_geometry_m = np.array([\r\n [0.0, 0.0, 0.0],\r\n [0.0, 80.0, 0.0]], dtype = 'float32')\r\n beach = Array(lat=latitude, long=longitude, elev=elevation, date=now,\r\n antennas=array_geometry_m)\r\n\r\n print('\\nConfiguring phase source')\r\n print('--------------------------')\r\n # The source is our phase centre for UVW coordinates\r\n line = \"%s,f,%s,%s,%s,%d\" % ('Deep Space', '00:00:00',\r\n '00:00:00', '1', 2000)\r\n source = ephem.readdb(line)\r\n source.compute(beach)\r\n print \"Name: %s \\nRA: %s \\nDEC: %s\"%(source.name, source.ra, source.dec)\r\n\r\n # Make a new blank FITS HDU\r\n print('\\nCreating PRIMARY HDU')\r\n print('------------------------------------')\r\n hdu = make_primary(config=configxml, external_params=external_params)\r\n print repr(hdu.header)\r\n\r\n # Go through and generate required tables\r\n print('\\nCreating ARRAY_GEOMETRY')\r\n print('------------------------------------')\r\n tbl_array_geometry = make_array_geometry(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_array_geometry = config_array_geometry(tbl_array_geometry,\r\n array_geometry_m)\r\n print repr(tbl_array_geometry.header)\r\n\r\n print('\\nCreating FREQUENCY')\r\n print('------------------------------------')\r\n tbl_frequency = make_frequency(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_frequency = config_frequency(tbl_frequency,\r\n external_params=external_params)\r\n print repr(tbl_frequency.header)\r\n\r\n print('\\nCreating SOURCE')\r\n print('------------------------------------')\r\n tbl_source = make_source(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_source = config_source(tbl_source, source)\r\n print repr(tbl_source.header)\r\n\r\n print('\\nCreating ANTENNA')\r\n print('------------------------------------')\r\n tbl_antenna = make_antenna(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_antenna = config_antenna(tbl_antenna)\r\n print repr(tbl_antenna.header)\r\n\r\n print('\\nCreating UV_DATA')\r\n print('------------------------------------')\r\n\r\n print 'Data dimensions: %i dumps, %i chans, %i pols, %i data' % (\r\n n_uvspectra, mfreq, mstokes, mcomplex)\r\n\r\n print('Generating blank UV_DATA rows...')\r\n tbl_uv_data = make_uv_data(config=configxml, num_rows=n_uvspectra,\r\n external_params=external_params)\r\n\r\n timesorted = np.argsort(times)\r\n\r\n for k in timesorted:\r\n tbl_uv_data.data[k]['FLUX'] = uv_data[k,0,0,:,0,:].ravel()\r\n tbl_uv_data.data[k]['UU'] = u[k] / light_speed\r\n tbl_uv_data.data[k]['VV'] = v[k] / light_speed\r\n tbl_uv_data.data[k]['WW'] = w[k] / light_speed\r\n tbl_uv_data.data[k]['BASELINE'] = baselines[k]\r\n tbl_uv_data.data[k]['DATE'] = dates[k]\r\n tbl_uv_data.data[k]['TIME'] = times[k]\r\n tbl_uv_data.data[k]['SOURCE'] = 1\r\n tbl_uv_data.data[k]['FREQID'] = 1\r\n tbl_uv_data.data[k]['INTTIM'] = 3\r\n\r\n print repr(tbl_uv_data.header)\r\n \r\n hdulist = pyfits.HDUList(hdus=\r\n [hdu,\r\n tbl_array_geometry,\r\n tbl_source, \r\n tbl_frequency,\r\n tbl_antenna,\r\n tbl_uv_data])\r\n\r\n print('Verifying integrity...') \r\n hdulist.verify()\r\n \r\n if(os.path.isfile(idifitsfile)):\r\n print('Removing existing file...')\r\n os.remove(idifitsfile)\r\n print('Writing to file...')\r\n hdulist.writeto(idifitsfile)\r\n\r\n print('Done.')\r\n\r\n self.result['idifitsfile'] = idifitsfile\r\n\r\n return self.result",
"def getCl(filename):\n powSpec = pf.getdata(filename,1)\n temps = powSpec.field('TEMPERATURE')\n ell = np.arange(temps.size)\n return ell,temps",
"def main(config_file, pre_nfiles=None, post_nfiles=None,\n cutoffs=[12.0, 60.0]):\n imu_complement_fig = (config_file[:-4] +\n \"_IMU_complementary_spectra.pdf\")\n imu_spectra_fig = (config_file[:-4] + \"_IMU_spectra.pdf\")\n nperseg1 = 60 * 5 * 10\n nperseg2 = 60 * 10\n fig, axs, leg = check.plot_IMU_euler_angles_spectra(config_file,\n nperseg=nperseg2,\n nfiles=pre_nfiles)\n fig.savefig(imu_complement_fig, bbox_extra_artists=(leg,),\n bbox_inches=\"tight\")\n plt.close(fig)\n fig_rate, _, fig_acc, _ = check.plot_IMU_spectra(config_file,\n nperseg=nperseg2,\n nfiles=pre_nfiles)\n with PdfPages(imu_spectra_fig) as pdf:\n pdf.savefig(fig_rate, bbox_inches=\"tight\")\n plt.close(fig_rate)\n pdf.savefig(fig_acc, bbox_inches=\"tight\")\n plt.close(fig_acc)\n\n # Look at individual periods\n config = db_flux.parse_config(config_file)\n input_files = config[\"EC Inputs\"][\"input_files\"]\n if post_nfiles is not None and post_nfiles < len(input_files):\n input_files = np.random.choice(input_files, post_nfiles,\n replace=False)\n imu2anem_pos = config[\"EC Motion Correction\"][\"imu2anemometer_pos\"]\n sample_freq_hz = config[\"EC Inputs\"][\"sample_frequency\"]\n Tcf, Ta = cutoffs\n for ifile in input_files:\n file_idx = ifile.index(ifile)\n ec_prep, _ = db_flux.prepare_period(ifile, config)\n wind = ec_prep[[\"wind_speed_u\", \"wind_speed_v\", \"wind_speed_w\"]]\n acceleration = ec_prep[[\"acceleration_x\",\n \"acceleration_y\",\n \"acceleration_z\"]]\n rate = ec_prep[[\"rate_x\", \"rate_y\", \"rate_z\"]]\n ec_corr = db_flux.wind3D_correct(wind.values, acceleration.values,\n rate.values,\n ec_prep.heading.values,\n ec_prep.speed_over_ground.values,\n imu2anem_pos, sample_freq_hz,\n Tcf, Ta)\n fig, _, leg = check.plot1_IMU_complementary_spectra(config_file,\n file_idx,\n Tcf=Tcf,\n Ta=Ta,\n nperseg=nperseg1)\n fig.savefig((ifile[:-4] + \"_euler_angle_spectra.pdf\"),\n bbox_extra_artists=(leg,), bbox_inches=\"tight\")\n plt.close()\n fig, axs, leg = check.plot_wind3D_spectra(wind.values,\n ec_corr.uvw_ship,\n nperseg=nperseg2)\n fig.savefig((ifile[:-4] + \"_uvw_spectra.pdf\"),\n bbox_extra_artists=(leg,), bbox_inches=\"tight\")\n plt.close()\n # Plot corrected wind cospectra\n fig, _, leg = check.plot_wind3D_cospectra(wind.values,\n ec_corr.uvw_ship,\n nperseg=nperseg2)\n fig.savefig((ifile[:-4] + \"_uvw_cospectra.pdf\"),\n bbox_extra_artists=(leg,), bbox_inches=\"tight\")\n plt.close()",
"def mri_dixon_analysis(data_objects, working_dir, settings):\n\n logger.info(\"Running Dixon analysis Calculation\")\n logger.info(\"Using settings: %s\", settings)\n\n output_objects = []\n\n fat_obj = None\n water_obj = None\n for data_obj in data_objects:\n\n if data_obj.meta_data[\"image_type\"] == \"fat\":\n fat_obj = data_obj\n\n if data_obj.meta_data[\"image_type\"] == \"water\":\n water_obj = data_obj\n\n if fat_obj is None or water_obj is None:\n logger.error(\"Both Fat and Water Images are required\")\n return []\n\n # Read the image series\n fat_load_path = fat_obj.path\n if fat_obj.type == \"DICOM\":\n fat_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(fat_obj.path)\n fat_img = sitk.ReadImage(fat_load_path)\n\n water_load_path = water_obj.path\n if water_obj.type == \"DICOM\":\n water_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(water_obj.path)\n water_img = sitk.ReadImage(water_load_path)\n\n # Cast to float for calculation\n fat_img = sitk.Cast(fat_img, sitk.sitkFloat32)\n water_img = sitk.Cast(water_img, sitk.sitkFloat32)\n\n # Let's do the calcuation using NumPy\n fat_arr = sitk.GetArrayFromImage(fat_img)\n water_arr = sitk.GetArrayFromImage(water_img)\n\n # Do the calculation\n divisor = water_arr + fat_arr\n fat_fraction_arr = (fat_arr * 100) / divisor\n fat_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n water_fraction_arr = (water_arr * 100) / divisor\n water_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n\n fat_fraction_img = sitk.GetImageFromArray(fat_fraction_arr)\n water_fraction_img = sitk.GetImageFromArray(water_fraction_arr)\n\n fat_fraction_img.CopyInformation(fat_img)\n water_fraction_img.CopyInformation(water_img)\n\n # Create the output Data Objects and add it to output_ob\n fat_fraction_file = os.path.join(working_dir, \"fat.nii.gz\")\n sitk.WriteImage(fat_fraction_img, fat_fraction_file)\n water_fraction_file = os.path.join(working_dir, \"water.nii.gz\")\n sitk.WriteImage(water_fraction_img, water_fraction_file)\n\n fat_data_object = DataObject(type=\"FILE\", path=fat_fraction_file, parent=fat_obj)\n output_objects.append(fat_data_object)\n\n water_data_object = DataObject(type=\"FILE\", path=water_fraction_file, parent=water_obj)\n output_objects.append(water_data_object)\n\n return output_objects",
"def import_clean_process():\n # loading the co2 emissions data for the Earth, I'm only interested in the\n # total emissions and the year\n global_co2 = pd.read_csv(\n \"datasets/Global CO2 Emissions.csv\",\n usecols=[\n \"Year\",\n \"Total\"\n ],\n parse_dates=[\"Year\"],\n index_col=\"Year\"\n )\n # creating the global temperature dataframe\n global_temp_data = open(\n \"datasets/CRUTEM.4.6.0.0.global_n+s\",\n \"r\"\n )\n global_temp = pd.DataFrame(\n {\n \"global_temp\": [],\n }\n )\n for line in global_temp_data:\n # each line in the file is an observation for the year, the first\n # column being the year, the second being the temperature measurement\n data = line.split()\n global_temp.at[pd.to_datetime(data[0]), \"global_temp\"] = float(data[1])\n global_temp_data.close()\n # loading the co2 emissions data for the UK\n uk_co2 = pd.read_csv(\n \"datasets/UK carbon dioxide emissions between 1858 to 2017 .csv\",\n parse_dates=[\"Date\"],\n index_col=\"Date\"\n )\n # creating the dataframe for the UK temperature data\n uk_temp = pd.DataFrame(\n {\n \"uk_temp\": [],\n }\n )\n # this file consists of monthly and seasonal averages for the UK surface\n # temperature\n uk_tmean = open(\n \"datasets/UK Mean Temperature (Degrees C)\",\n \"r\"\n )\n for index, line in enumerate(uk_tmean):\n # the data begins on the eigth line in the file\n if index > 7:\n data = line.split()\n # the monthly temperatures are from the 2nd and 13th columns\n month_temps = np.array(data[1:13]).astype(float)\n # the first reading is the year, I've taken the average of all the\n # months to get an annual average\n uk_temp.at[pd.to_datetime(data[0]), \"uk_temp\"] = month_temps.mean()\n uk_tmean.close()\n # removing the temperature reading for 2019 as it isn't averaged over the\n # whole year (this program was written in 06/2019)\n uk_temp = uk_temp[:-1]\n # merging the temperature and co2 emissions dataframes for the Earth\n global_data = pd.merge(\n global_temp,\n global_co2,\n left_index=True,\n right_index=True,\n how=\"outer\"\n )\n # merging the temperature and co2 emissions dataframes for the UK\n uk_data = pd.merge(\n uk_temp,\n uk_co2,\n left_index=True,\n right_index=True,\n how=\"outer\"\n )\n # merging the global and UK dataframes\n df_data = pd.merge(\n global_data,\n uk_data,\n left_index=True,\n right_index=True,\n how=\"outer\"\n )\n # rename some of the columns to make them more clear\n df_data = df_data.rename(\n columns={\n \"Total\": \"global_co2\",\n \"CO2 Emissions\": \"uk_co2\"\n }\n )\n return df_data",
"def icsd_init(self, survey):\n # create directories\n\n print(\"initiation ICSD\")\n self.createdirs(survey)\n\n # load virtual sources coordinates\n if self.type == \"2d\":\n self.coord_x, self.coord_y, survey.coord = load_coord(\n survey.path2load, self.coord_file, dim=2,\n )\n else:\n self.coord_x, self.coord_y, self.coord_z, survey.coord = load_coord(\n survey.path2load, self.coord_file, dim=3,\n )\n\n # load observations resistances b\n survey.b = load_obs(survey.path2load, survey.obs)\n # load simulated resistances A (i.e. Green function)\n survey.A = load_sim(survey.path2load, survey.sim)\n\n print(\"log transformation: \" + str(self.logTrans))\n # Log transformation before inversion\n if self.logTrans == True:\n # check the min\n\n # IF data and model are in the same range\n # TranslateMIN= np.min(np.array([min(survey.b),min(survey.A)]))\n # survey.A = np.log(survey.A + 1 - TranslateMIN) # translate, then transform */\n # survey.b = np.log(survey.b + 1 - TranslateMIN) # translate, then transform */\n\n # IF data is very small compare to the model\n\n TranslateMIN_A = min(survey.A)\n survey.A = np.log(\n survey.A + 1 - TranslateMIN_A\n ) # translate, then transform */\n TranslateMIN_B = min(survey.b)\n survey.b = np.log(\n survey.b + 1 - TranslateMIN_B\n ) # translate, then transform */\n\n # load observations electrode coordinates\n if self.plotElecs == True:\n (\n survey.RemLineNb,\n survey.Injection,\n survey.coordE,\n survey.pointsE,\n ) = load_geom(\n self.path2load\n ) # geometry file containing electrodes position includinf remotes\n\n # check vector sizes\n survey.nVRTe = check_nVRTe(survey.A, survey.b, survey.coord)\n\n # reshape A vector into matrix A of nVRTE collumns\n survey.A = reshape_A(survey.A, survey.nVRTe)\n\n print(\"obs_err: \" + str(self.obs_err))\n # define mode to weights the data (const, sqrt or reciprocal)\n survey.obs_w = obs_w_f(self.obs_err, survey.b, self.errRmin, sd_rec=None)\n\n # set constrain (curent conservation)\n survey.con_A = con_A_f(survey.A)\n survey.con_b = con_b_f(survey.b)\n survey.con_w = con_w_f(self.wc)\n\n # append spatial regularization (add lines to the matrice)\n survey.reg_A = self._parseModelReg(survey)\n survey.reg_b = regularize_b(self.reg_A)\n\n # stack data, constrain, and regularization\n survey.A_s = stack_A(survey.A, survey.con_A, survey.reg_A)\n survey.b_s = stack_b(survey.b, survey.con_b, survey.reg_b)",
"def CollectingData(boundaries, target_lon, target_lat, files, basepath, \\\n CAMS_path, apply_land_sea_mask, use_wind_rotations, \\\n incorporate_cams):\n # Setting the time of starting the script\n start = datetime.now()\n \n # Reading daily csv files for specified area and day as np.arrays\n daily_data = {}\n for i, file in enumerate(files): \n # Reading daily csv's as input array\n daily_data[i] = inpt.CSVtoArray(file, boundaries, target_lon, target_lat)\n \n # Remove background, by CAMS observations\n if incorporate_cams:\n dates = [daily_data[i]['day'], daily_data[i]['month'], daily_data[i]['year']]\n bbox = [daily_data[i]['lat_min'], daily_data[i]['lat_max'], daily_data[i]['lon_min'], daily_data[i]['lon_max']]\n xres = int((110 * (bbox[3]-bbox[2])) / len(daily_data[i]['CO_ppb'][0]))\n yres = int((110 * (bbox[1]-bbox[0])) / len(daily_data[i]['CO_ppb']))\n cams_arr = cams.FetchCams(CAMS_path, dates, bbox, xres, yres)\n daily_data[i]['CO_excl_background'] = daily_data[i]['CO_ppb'] - cams_arr\n \n # Filter measurements taken above the oceans (higher uncertainty)\n if apply_land_sea_mask:\n daily_data[i]['CO_ppb'] = mask.land_sea_mask(daily_data[i]['CO_ppb'], boundaries)\n daily_data[i]['count_t'] = mask.land_sea_mask(daily_data[i]['count_t'], boundaries)\n \n # collect meteodata via ECMWF CDS API:\n if use_wind_rotations:\n u_wind, v_wind = wind.FetchWindData(daily_data[i], pressure=700, timerange=6, basepath=basepath)\n daily_data[i]['u_wind'] = u_wind\n daily_data[i]['v_wind'] = v_wind\n \n print('Total time elapsed reading data: {}'.format(datetime.now()-start))\n\n return daily_data",
"def run(self):\n prefactor = (self.x_section / (8 * np.pi * self.prim_flux.mDM ** 2))\n int_flux = self.prim_flux.table_model.integral(\n emin=self.energy_range[0],\n emax=self.energy_range[1],\n )\n data = self.jfact_map.quantity.copy()\n data *= prefactor\n data *= int_flux\n data = data.to('cm-2 s-1')\n\n self._fluxmap = WcsNDMap(data=data, geom=self.jfact_map.geom)",
"def ImportTemps(cls, mfg_enduse):\n\n def create_dict(file_dir, file):\n dict_out = dict(pd.read_excel(\n file_dir + file, _sheetname=0).iloc[:, (0, 2)].values\n )\n\n return dict_out\n\n ndict = {}\n\n for k, v in cls.nfiles.items():\n ndict[k[0:7]] = create_dict(cls.file_dir, v)\n\n temps = pd.read_excel(cls.file_dir + cls.temp_file, sheetname=0)\n\n temps.SIC.fillna(method='ffill', inplace=True)\n\n temps.loc[:, 'Temp_C'] = temps.Temp_C.apply(\n lambda x: int(np.around(x))\n )\n\n # Calculate energy fraction of each process by temperature\n temps = pd.DataFrame(temps.groupby(\n ['SIC', 'Unit_Process', 'Heat_type', 'Temp_C']\n )['E_Btu'].sum())\n\n e_totals = temps.reset_index()[\n temps.reset_index()['Unit_Process'] != 'Boiler'\n ].groupby(['SIC', 'Heat_type']).E_Btu.sum()\n\n for i in temps.index:\n if 'Boiler' in i:\n continue\n\n temps.loc[i, 'Fraction'] = \\\n temps.loc[i, 'E_Btu'] / e_totals.loc[(i[0], i[2])]\n\n temps.reset_index(inplace=True)\n\n temps.loc[:, 'SIC'] = temps.SIC.apply(lambda x: int(str(x)[0:4]))\n\n temps.loc[:, 'NAICS02'] = temps.SIC.map(ndict['sic_N02'])\n\n temps.loc[:, 'NAICS07'] = temps.NAICS02.map(ndict['N02_N07'])\n\n temps.loc[:, 'NAICS12'] = temps.NAICS07.map(ndict['N07_N12'])\n\n # Multiple entries for each SIC/NAICS; take simple mean. \n temps = temps.groupby(\n ['NAICS12', 'Unit_Process', 'Heat_type']\n )[['E_Btu', 'Temp_C', 'Fraction']].mean()\n\n # Create 4-, and 3-digit NAICS table for matching \n temps_NAICS = pd.DataFrame(index=temps.index.levels[0],\n columns=['N5', 'N4', 'N3']\n )\n\n for n in [5, 4, 3]:\n temps_NAICS.loc[:, 'N' + str(n)] = \\\n [float(str(x)[0:n]) for x in temps_NAICS.index.values]\n\n temps_NAICS.reset_index(inplace=True)\n\n eu_naics = pd.DataFrame(\n mfg_enduse.naics.drop_duplicates().sort_values(ascending=True),\n copy=True\n )\n\n eu_naics.reset_index(inplace=True, drop=True)\n\n eu_naics.rename(columns={'naics':'NAICS12'}, inplace=True)\n\n for n in [5, 4, 3]:\n eu_naics.loc[:, 'N' + str(n)] = \\\n [float(str(x)[0:n]) for x in eu_naics.NAICS12.values]\n\n # Match naics between end use data set and temperature info. \n nmatch = pd.DataFrame()\n for column in temps_NAICS.columns:\n nmatch = pd.concat([nmatch, pd.Series(\n [x in temps_NAICS[column].values for x in eu_naics[\n column\n ].values]\n )], axis=1)\n\n nmatch.columns = eu_naics.columns\n\n nmask = pd.DataFrame()\n\n for c in nmatch.columns:\n\n nmask = pd.concat(\n [nmask, eu_naics[c].multiply(nmatch[c])],\n axis=1\n )\n\n nmask.replace({0:np.nan}, inplace=True)\n\n # Values of 0 indicate no matching temperature data, even at 3-digit \n # level.\n nmask.N3.fillna(0, inplace=True)\n\n nmask.loc[:, 'TN_Match'] = nmask.apply(\n lambda x: int(list(x.dropna())[0]), axis=1\n )\n\n nmask.rename(columns={'NAICS12':'N6'}, inplace=True)\n\n nmask.loc[:, 'NAICS12'] = eu_naics.NAICS12\n\n # Merge matched NAICS values with end use energy data\n mfg_enduse = pd.merge(mfg_enduse,\n nmask[['NAICS12', 'TN_Match']], how='left',\n left_on='naics', right_on='NAICS12')\n\n mfg_enduse.drop('NAICS12', inplace=True, axis=1)\n\n # Merge temps and temps_NAICS for future operations by other NAICS\n temps.reset_index(inplace=True)\n\n temps = pd.merge(temps, temps_NAICS, left_on='NAICS12',\n right_on='NAICS12', how='left')\n\n for tb, tr in {'<100': (0, 99), '100-249': (100, 249),\n '250-399': (250, 399), '400-999': (400, 999),\n '>1000': (1000, 3000)}.items():\n\n ti = temps[temps.Temp_C.between(tr[0], tr[1])].index\n\n temps.loc[ti, 'Temp_Bucket'] = tb\n\n return mfg_enduse, temps",
"def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source",
"def import_constants_section(self, filename_suffix='con'):\n with open('%s/%s.%s' % (self.model_path, self.model_name, filename_suffix)) as f:\n for lnum, l in enumerate(f):\n if re.match('^\\s*(;|$)', l): continue # skip comments and blank lines\n l = l.strip().partition(';')[0].strip() # strip leading whitespace, trailing comments\n t = re.split('\\s+', l)\n self.constants[t[0].lower()] = float(t[1])",
"def cl_file(tmp_path):\n nc_path = os.path.join(tmp_path, 'cesm2_waccm_cl.nc')\n dataset = Dataset(nc_path, mode='w')\n dataset.createDimension('lev', size=2)\n dataset.createDimension('bnds', size=2)\n\n # Dimensional variables\n dataset.createVariable('lev', np.float64, dimensions=('lev',))\n dataset.createVariable('lev_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['lev'][:] = [1.0, 2.0]\n dataset.variables['lev'].bounds = 'lev_bnds'\n dataset.variables['lev'].units = '1'\n dataset.variables['lev_bnds'][:] = [[0.5, 1.5], [1.5, 3.0]]\n dataset.variables['lev_bnds'].standard_name = (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n dataset.variables['lev_bnds'].units = '1'\n dataset.variables['lev_bnds'].formula_terms = (\n 'p0: p0 a: a_bnds b: b_bnds ps: ps')\n\n # Coordinates for derivation of pressure coordinate\n dataset.createVariable('a', np.float64, dimensions=('lev',))\n dataset.createVariable('a_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.createVariable('b', np.float64, dimensions=('lev',))\n dataset.createVariable('b_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['a'][:] = [1.0, 2.0]\n dataset.variables['a'].bounds = 'a_bnds'\n dataset.variables['a_bnds'][:] = [[1.5, 0.0], [3.0, 1.5]]\n dataset.variables['b'][:] = [0.0, 1.0]\n dataset.variables['b'].bounds = 'b_bnds'\n dataset.variables['b_bnds'][:] = [[0.5, -1.0], [2.0, 0.5]]\n\n dataset.close()\n return nc_path",
"def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)",
"def loadPoints(self, inptsfile=None):\n print \"Reading data from input point cloud and preparing data for clustering ...\"\n \n ind = (self.cind['d_I_nir'], self.cind['d_I_swir'], self.cind['range'], self.cind['d0_nir'], self.cind['d0_swir'])\n\n if inptsfile is None:\n inptsfile = self.inptsfile\n\n if inptsfile is None:\n raise RuntimeError(\"Input point cloud file is neither provided by the DWELPointsCluster class instance nor given to the loadPoints function\")\n \n data = np.loadtxt(inptsfile, usecols=ind, comments=None, delimiter=',', \\\n skiprows=self.headerlines)\n\n # get valid point indices (not zero-hit point)\n # self.validhit_ind = np.where(data[:, 2]>1e-10)[0]\n self.validhit_bool = data[:, 2]>1e-6\n # # remove ground points from the analysis and classification\n # self.validhit_bool = np.logical_and(self.validhit_bool, data[:, 5]<1e-6)\n \n self.labels = np.zeros(len(data), dtype=int)-1\n self.ndi = np.zeros(len(data))-2.0\n\n # get NDI from the uncalibrated/raw intensity for comparison\n self.ndi0 = np.zeros(len(data)) - 2.0\n\n self.ndi[self.validhit_bool] = (data[self.validhit_bool, 0] - data[self.validhit_bool, 1])/(data[self.validhit_bool, 0] + data[self.validhit_bool, 1])\n \n self.ndi0[self.validhit_bool] = (data[self.validhit_bool, 3] - data[self.validhit_bool, 4])/(data[self.validhit_bool, 3] + data[self.validhit_bool, 4])\n # also no interpolated values for missing NIR or SWIR raw intensity. no NDI for these points\n tmp_bool = np.logical_or(data[:, 3].astype(int) == 0, data[:, 4].astype(int) == 0)\n self.ndi0[tmp_bool] = -2.0\n \n # ndi, rho_app_nir, rho_app_swir, no range\n # points = np.hstack((self.ndi[self.validhit_bool].reshape((np.sum(self.validhit_bool), 1)), \\\n # data[self.validhit_bool, 0:2])).astype(np.float32)\n points = np.hstack((self.ndi.reshape(len(self.ndi), 1), data[:, 0:2])).astype(np.float32)\n # # ndi, rho_app_nir, rho_app_swir, with range\n # points = np.hstack((self.ndi[self.validhit_bool].reshape((len(self.validhit_bool), 1)), data[self.validhit_bool, 0:3])).astype(np.float32)\n\n # self.data = data\n return points",
"def read_local_20Hz_files(**kwargs):\n pathlst = kwargs.get('pathlst')\n product = kwargs.get('product')\n varalias = kwargs.get('varalias')\n sdate = kwargs.get('sdate')\n edate = kwargs.get('edate')\n twin = kwargs.get('twin')\n\n # establish coords if defined in config file\n timestr = satellite_dict[product]['vardef']['time']\n lonstr = satellite_dict[product]['vardef']['lons']\n latstr = satellite_dict[product]['vardef']['lats']\n\n # adjust start and end\n sdate = sdate - timedelta(minutes=twin)\n edate = edate + timedelta(minutes=twin)\n # get meta data\n ncmeta = ncdumpMeta(pathlst[0])\n ncvar = get_filevarname(varalias, variable_info,\n satellite_dict[product], ncmeta)\n # retrieve sliced data\n ds = read_netcdfs(pathlst)\n ds_sort = ds.sortby(timestr)\n\n # get indices for included time period\n nptime = ds_sort[timestr].data\n print('here0')\n print(len(nptime))\n #dtime = [parse_date(str(nptime[i])) for i in range(len(nptime))]\n print('here1')\n #idx = find_included_times_pd(dtime, sdate=sdate, edate=edate)\n idx = find_included_times_pd(nptime, sdate=sdate, edate=edate)\n print(len(nptime[idx]))\n print('here2')\n dtime = [parse_date(str(nptime[idx][i])) for i in range(len(nptime[idx]))]\n print(dtime)\n print('here3')\n #dtime = list(np.array(dtime)[idx])\n lons = list(((ds_sort[lonstr].data[idx] - 180) % 360) - 180)\n lats = list(ds_sort[latstr].data[idx])\n\n unxt = (nptime[idx].astype(int) / 10**9)\n\n # make dict and start with stdvarname for varalias\n stdvarname = variable_info[varalias]['standard_name']\n vardict = {}\n vardict[stdvarname] = list(ds_sort[ncvar].data[idx])\n vardict['longitude'] = lons\n vardict['latitude'] = lats\n vardict['time'] = unxt\n vardict['datetime'] = dtime\n vardict['time_unit'] = variable_info['time']['units']\n print(vardict.keys())\n return vardict",
"def ingest_netcdf_latest(output_netcdf, # pragma: no cover\n temp_var_name,\n precip_var_name,\n awc_var_name):\n\n # log some timing info, used later for elapsed time\n start_datetime = datetime.now()\n _logger.info(\"Start time: %s\", start_datetime)\n\n try:\n\n # ingest the latest nClimDiv datasets using the processing date specified at the FTP location \n _ingest_netcdf(output_netcdf,\n _get_processing_date(),\n temp_var_name,\n precip_var_name,\n awc_var_name)\n except:\n \n _logger.exception('Failed to complete', exc_info=True)\n raise\n\n # report on the elapsed time\n end_datetime = datetime.now()\n _logger.info(\"End time: %s\", end_datetime)\n elapsed = end_datetime - start_datetime\n _logger.info(\"Elapsed time: %s\", elapsed)",
"def readExperi(directory,varid,experi,level):\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'T2M_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = 'surface'\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'TEMP_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = dataq.variables['level'][:]\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var",
"def readExperiAll(varid,timeperiod,level):\n print('\\n>>>>>>>>>> Using readExperiAll function!')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n\n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Directories for Antarctic experiments (1-100 members)\n if any([timeperiod=='ANT_Fu',timeperiod=='ANT_Cu',timeperiod=='ANT_Pi']):\n if timeperiod == 'ANT_Fu':\n experi = 'PAMIP-1.8'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n print('Reading in Antarctic Future Sea Ice!')\n elif timeperiod == 'ANT_Cu':\n experi = 'PAMIP-1.1-QBO'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n if varid == 'SIC':\n experi = 'PAMIP_Cu' # missing SIC data in 1.1-QBO\n directorydata = '/seley/zlabe/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1701-2000.nc'\n print('Reading in Antarctic Present-Day Sea Ice!')\n elif timeperiod == 'ANT_Pi':\n experi = 'PAMIP-1.7'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n print('Reading in Antarctic Pre-Industrial Sea Ice!')\n else:\n print(ValueError('Selected wrong time period!')) \n else:\n print(ValueError('Selected wrong experiment name!'))\n \n if varid == 'EGR' and level == 'surface': # integrated from 500-850 hPa\n filename = totaldirectory + varid + '_500_850.nc'\n\n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n lev = 'surface'\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n lev = data.variables['level'][:]\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'zonmean': # 3d variables (zonal mean!)\n varidz = varid + '_' + level\n filename = totaldirectory + varidz + '_1900-2000.nc'\n data = Dataset(filename,'r')\n lev = data.variables['level'][:]\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n varq = data.variables['%s' % varid][:].squeeze()\n data.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:],varid))\n\n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(varq.shape[0]//months,months,\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(varq.shape[0]//months,months,int(lev.shape[0]),\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'zonmean': # 3d variables (zonal mean!)\n var = np.reshape(varq,(varq.shape[0]//months,months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n elif varid == 'SWE':\n var = var*1000. # Meters to Millimeters \n print('Completed: Changed units (m to mm)!')\n \n print('Completed: Read members 1-100!')\n\n print('>>>>>>>>>> Completed: Finished readExperiAll function!')\n return lat,lon,lev,var",
"def read_FMI_weather(ID, start_date, end_date, sourcefile, CO2=380.0):\n \n # OmaTunniste;OmaItä;OmaPohjoinen;Kunta;siteid;vuosi;kk;paiva;longitude;latitude;t_mean;t_max;t_min;\n # rainfall;radiation;hpa;lamposumma_v;rainfall_v;lamposumma;lamposumma_cum\n # -site number\n # -date (yyyy mm dd)\n # -latitude (in KKJ coordinates, metres)\n # -longitude (in KKJ coordinates, metres)\n # -T_mean (degrees celcius)\n # -T_max (degrees celcius)\n # -T_min (degrees celcius)\n # -rainfall (mm)\n # -global radiation (per day in kJ/m2)\n # -H2O partial pressure (hPa)\n\n sourcefile = os.path.join(sourcefile)\n\n #ID = int(ID)\n\n # import forcing data\n fmi = pd.read_csv(sourcefile, sep=';', header='infer', \n usecols=['OmaTunniste', 'Kunta', 'aika', 'longitude',\n 'latitude', 't_mean', 't_max', 't_min', 'rainfall',\n 'radiation', 'hpa', 'lamposumma_v', 'rainfall_v'],\n parse_dates=['aika'],encoding=\"ISO-8859-1\")\n \n time = pd.to_datetime(fmi['aika'], format='%Y%m%d')\n\n fmi.index = time\n fmi = fmi.rename(columns={'OmaTunniste': 'ID', 'longitude': 'lon',\n 'latitude': 'lat', 't_mean': 'T', 't_max': 'Tmax',\n 't_min': 'Tmin', 'rainfall': 'Prec',\n 'radiation': 'Rg', 'hpa': 'h2o', 'lamposumma_v': 'dds',\n 'rainfall_v': 'Prec_a'})\n \n fmi['h2o'] = 1e-1*fmi['h2o'] # hPa-->kPa\n fmi['Rg'] = 1e3 / 86400.0*fmi['Rg'] # kJ/m2/d-1 to Wm-2\n fmi['Par'] = 0.5*fmi['Rg']\n\n # saturated vapor pressure\n esa = 0.6112*np.exp((17.67*fmi['T']) / (fmi['T'] + 273.16 - 29.66)) # kPa\n vpd = esa - fmi['h2o'] # kPa\n vpd[vpd < 0] = 0.0\n rh = 100.0*fmi['h2o'] / esa\n rh[rh < 0] = 0.0\n rh[rh > 100] = 100.0\n\n fmi['RH'] = rh\n fmi['esa'] = esa\n fmi['VPD'] = vpd\n\n fmi['doy'] = fmi.index.dayofyear\n fmi = fmi.drop(['aika'], axis=1)\n # replace nan's in prec with 0.0\n #fmi['Prec'][np.isnan(fmi['Prec'])] = 0.0\n fmi['Prec']= fmi['Prec'].fillna(value=0.0)\n # add CO2 concentration to dataframe\n fmi['CO2'] = float(CO2)\n \n # get desired period\n fmi = fmi[(fmi.index >= start_date) & (fmi.index <= end_date)]\n# if ID > 0:\n# fmi = fmi[fmi['ID'] == ID]\n return fmi",
"def import_data(self):\n\n self.worksheet = (\n xlrd.open_workbook(filename=self.source).sheet_by_index(0)\n )\n # Import conversion data from worksheet and store as scipy arrays\n self.T_exp = np.array(\n self.worksheet.col_values(0, start_rowx=4, end_rowx=None)\n ) + 273.15\n self.HCout_raw = np.array(\n self.worksheet.col_values(4, start_rowx=4, end_rowx=None)\n )\n self.HCin_raw = np.array(\n self.worksheet.col_values(8, start_rowx=4, end_rowx=None)\n )\n self.eta_exp = (\n (self.HCin_raw - self.HCout_raw) / self.HCin_raw\n )\n self.T_model = np.linspace(\n self.T_exp[0] - 50, self.T_exp[-1] + 50, 25\n )\n self.T_array = self.T_model",
"def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return",
"def CDItoNetCDF(self, region=None, ip=None, separatefile=True,\n exclude=None):\n\n if region is None:\n region = self.regions\n else:\n if isinstance(region, str):\n region = [region]\n\n if ip is None:\n ip = self.ip\n else:\n if isinstance(ip, int):\n ip = [ip]\n\n if not os.path.exists(self.cdi_path):\n os.mkdir(self.cdi_path)\n\n for reg in region:\n grid = grids.ShapeGrid(reg, self.spatial_resolution)\n gps = grid.get_gridpoints().index\n\n for ipe in ip:\n key = 'ECDI_' + str(ipe)\n\n print ('[INFO] calc ECDI ' + reg + ' IP' + str(ipe))\n\n if separatefile:\n dest_file = os.path.join(self.cdi_path,\n reg + '_' + key + '.nc')\n else:\n dest_file = os.path.join(self.data_path, reg + '_' +\n str(self.spatial_resolution) +\n '_' + self.temporal_resolution +\n '.nc')\n\n wfile = os.path.join(self.weights_path, reg + '_weights_'\n + str(ipe) + '.nc')\n\n if not os.path.isfile(dest_file):\n grid = grids.ShapeGrid(reg, self.spatial_resolution)\n save_grid(dest_file, grid)\n\n with Dataset(dest_file, 'r+', format='NETCDF4') as cdifile:\n\n if 'time' not in cdifile.dimensions.keys():\n dt = get_dtindex(self.temporal_resolution,\n self.start_date)\n cdifile.createDimension(\"time\", None)\n\n times = cdifile.createVariable('time', 'uint16',\n ('time',))\n\n times.units = 'days since ' + str(self.start_date)\n times.calendar = 'standard'\n times[:] = date2num(dt.tolist(), units=times.units,\n calendar=times.calendar)\n\n else:\n times = cdifile.variables['time']\n\n if key not in cdifile.variables.keys():\n dim = ('time', 'lat', 'lon')\n cdi = cdifile.createVariable(key, 'f8',\n dim, fill_value=-99)\n else:\n cdi = cdifile.variables[key]\n\n for k, gp in enumerate(gps):\n\n if k % 100 == 0:\n print '.',\n\n position = np.where(cdifile.variables['gpi'][:] == gp)\n lat_pos = position[0][0]\n lon_pos = position[1][0]\n\n weights = {}\n\n parnum = (len(self.sources.keys()) -\n len(self.staticsources))\n\n if exclude is not None:\n parnum = parnum - 1\n\n dat = np.zeros((parnum, cdi.shape[0]), dtype=np.float)\n\n # dat = np.zeros((len(self.sources.keys()), cdi.shape[0]),\n # dtype=np.float)\n dat[dat == 0] = self.nan_value\n dat = np.ma.masked_values(dat, self.nan_value)\n\n # extract data from DI files and calc weights\n i = 0\n\n for param in self.sources.keys():\n if param in self.staticsources:\n continue\n if param == exclude:\n continue\n\n difile = os.path.join(self.di_path,\n reg + '_' + param\n + '_DI_' + str(ipe) + '.nc')\n\n with Dataset(difile, 'r', format='NETCDF4') as nc:\n for var in nc.variables.keys():\n if param in var:\n for j in range(0,\n nc.variables[var].shape[0]):\n dat[i, j] = (nc.variables[var]\n [j, lat_pos, lon_pos])\n\n with Dataset(wfile, 'r', format='NETCDF4') as nc:\n for var in nc.variables.keys():\n if param in var:\n weights[param] = (nc.variables[var]\n [lat_pos, lon_pos])\n i += 1\n\n dat = np.ma.masked_where(dat == self.nan_value, dat)\n dat = np.nan_to_num(dat)\n dat = np.ma.masked_where(dat == 0., dat)\n\n avg = np.ma.average(dat, axis=0,\n weights=weights.values())\n\n cdi[:, lat_pos, lon_pos] = avg\n\n print 'Done!'\n\n print 'Done!'",
"def ingest_data(self, input_file, num_bands, labels):\n self.labels = labels\n self.num_bands = num_bands\n with rasterio.open(input_file, \"r\") as dataset:\n for i in range(1, self.num_bands + 1):\n band = dataset.read(i)\n self.bands[self.labels[i - 1]] = band"
]
| [
"0.5458172",
"0.544506",
"0.5209446",
"0.5198056",
"0.51781434",
"0.51172733",
"0.5074061",
"0.50648355",
"0.50631434",
"0.5045619",
"0.50367564",
"0.50034976",
"0.5003238",
"0.5000506",
"0.49836215",
"0.49703583",
"0.4964933",
"0.49271986",
"0.49256334",
"0.4920216",
"0.4889704",
"0.48880574",
"0.48801282",
"0.48771656",
"0.4872068",
"0.48677355",
"0.4863998",
"0.4862673",
"0.48429438",
"0.48404187"
]
| 0.6332168 | 0 |
get valid direcitons by action | def _get_valid_directions(self, turn, action):
if not isinstance(action, Action):
raise TypeError("Action must be Action class")
if action.x < 0 or action.x > 7 or action.y < 0 or action.y > 7:
raise Exception("You must set disk in board.")
if self.board[action.x][action.y] is not 0:
return []
agent = -1 if turn is 0 else 1
dirs = []
for direction in self.directions:
for i in range(1, 9):
x = action.x + i * direction[0]
y = action.y + i * direction[1]
if x < 0 or x > 7 or y < 0 or y > 7:
break
if self.board[x][y] is 0:
break
elif self.board[x][y] is agent:
# if meet agent's disk and no opposite's dist in between, not valid
if i is 1:
break
dirs.append(direction)
break
else:
continue
return dirs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _needaction_domain_get(self):\n return [('name','!=',False)]",
"def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())",
"def getAction(self, state):\n \"*** YOUR CODE HERE ***\"\n x, y = state.getPacmanPosition(self.index)\n numPacmen = state.getNumPacmanAgents()\n if not MyAgent.customFood:\n MyAgent.customFood = state.getFood()\n MyAgent.foodLeft = len(MyAgent.customFood.asList())\n\n #if not self.foodIsThere(x, y):\n # self.path = None\n #trueLen = len(state.getFood().asList())\n #if not self.path and self.index < trueLen and trueLen < numPacmen:\n # problem = MySearchProblem(state, self.index, 1, state.getFood())\n # self.path = search.bfs(problem)\n if self.path and self.path[0] == 'place':\n if sum(MyAgent.finding) == 1:\n MyAgent.specialWalls[(x, y)] = self.path[1]\n self.path = None\n\n if not self.path and MyAgent.foodLeft > 0:\n problem = MySearchProblem(state, self.index, min(foodCount, MyAgent.foodLeft), MyAgent.customFood, MyAgent.specialWalls, MyAgent.finding)\n\n self.path = cbfs(problem)\n\n \n\n nx, ny = x, y\n if not self.path:\n return state.getLegalActions(self.index)[0]\n for i in range(len(self.path)):\n action = self.path[i]\n if action == 'place':\n MyAgent.finding[self.index] = False\n break\n MyAgent.finding[self.index] = True\n dx, dy = Actions.directionToVector(action)\n nx, ny = int(nx + dx), int(ny + dy)\n check = MyAgent.customFood[nx][ny]\n if check:\n MyAgent.foodLeft -= 1\n MyAgent.customFood[nx][ny] = False\n\n if not self.path:\n return state.getLegalActions(self.index)[0]\n dir = self.path.pop(0)\n return dir",
"def getLegalActions( state ): ## This is being called by the GameState.getLegalActions function and uses self as the state argument.\n return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )## REF-211 calls the getPossibleActions method in the Actions class.",
"def dominated_actions(self, tol=None, method=None):\n out = []\n for action in range(self.num_actions):\n if self.is_dominated(action, tol=tol, method=method):\n out.append(action)\n return out",
"def solution(self):\n\t\treturn [node.action for node in self.path()[1:]]",
"def _needaction_domain_get(self):\n return [('state', '=', 'failed')]",
"def get_legal_actions(self):\n pass",
"def getLegalActions(self,state):\n return self.actionFn(state)",
"def getLegalActions(self, state):\n return self.actionFn(state)",
"def _get_legal_actions(self):\n return self.game.get_legal_actions()",
"def _get_legal_actions(self):\n raise NotImplementedError",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def get_path(self):\r\n action_dict = {self.searchenv.agent_id:self.action}\r\n if self.parent.parent is None:\r\n return [action_dict]\r\n else:\r\n path = self.parent.get_path()\r\n path.append(action_dict)\r\n return path",
"def getLegalActions( state, ghostIndex ):\n conf = state.getGhostState( ghostIndex ).configuration\n possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )\n reverse = Actions.reverseDirection( conf.direction )\n if Directions.STOP in possibleActions:\n possibleActions.remove( Directions.STOP )\n if reverse in possibleActions and len( possibleActions ) > 1:\n possibleActions.remove( reverse )\n return possibleActions",
"def path_entries(self):",
"def getSuccInTree(self, action):\n if not action in self.children: return None\n if self.children[action].numVisits == 0: return None\n return self.children[action]",
"def list_dir(self):\n x = [x for x in os.listdir(self.spath) if os.path.isdir(os.path.join(self.spath, x))]\n if x != [] :\n print (f\"choose one of these : {x}\")",
"def get_input_files(self, action):\n assert action in self.actions, \"Invalid action\"\n return getattr(self, \"_get_input_files_{action}\".format(action=action))",
"def filter_paths(self, blobs):\n # check against one map for read, one for write\n # if check fails, figure out if it was the view map or the protects\n # that caused the problem and report accordingly\n self.author_denied = []\n self.pusher_denied = []\n self.foruser_denied = []\n self.fusion_denied = []\n self.unmapped = []\n c2d = P4.Map.RIGHT2LEFT\n\n LOG.debug('filter_paths() write_filter: %s', self.write_filter)\n for blob in blobs:\n gwt_path = self.ctx.gwt_path(blob['path'])\n topath_c = gwt_path.to_client()\n topath_d = gwt_path.to_depot()\n\n LOG.debug('filter_paths() topath_d: %s', topath_d)\n # for all actions, need to check write access for dest path\n result = \" \" # zum loggen\n if topath_d and P4GF_DEPOT_OBJECTS_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/objects')\n continue\n # do not require user write access to //.git-fusion/branches\n if topath_d and P4GF_DEPOT_BRANCHES_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/branches')\n continue\n if not self.write_filter.includes(topath_c, c2d):\n if not self.view_map.includes(topath_c, c2d):\n self.unmapped.append(topath_c)\n result = NTR('unmapped')\n elif not (self.ignore_author_perms or\n self.write_protect_author.includes(topath_d)):\n self.author_denied.append(topath_c)\n result = NTR('author denied')\n elif (self.write_protect_pusher and\n not self.write_protect_pusher.includes(topath_d)):\n self.pusher_denied.append(topath_c)\n result = NTR('pusher denied')\n elif (self.write_protect_foruser and\n not self.write_protect_foruser.includes(topath_d)):\n self.foruser_denied.append(topath_c)\n result = NTR('foruser denied')\n elif not self.write_protect_fusion.includes(topath_d):\n self.fusion_denied.append(topath_c)\n result = NTR('Git Fusion denied')\n else:\n result = \"?\"\n LOG.error('filter_paths() {:<13} {}, {}, {}'\n .format(result, blob['path'], topath_d, topath_c))\n elif LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('filter_paths() topath_c in write_filter: %s', topath_c)",
"def get_legal_actions(self, index):\n actions = []\n agent = self.agent_states[index]\n for action in ACTIONS:\n pos = agent.pos[0] + action[0], agent.pos[1] + action[1]\n if MAP[pos[0]][pos[1]] not in WALL:\n actions.append(action)\n return actions",
"def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]",
"def get_legal_moves(self, i, j):\r\n legal_moves = list()\r\n for action in self.action_dic.keys():\r\n coordinate_change = self.action_dic[action]\r\n new_i = coordinate_change[0] + i\r\n new_j = coordinate_change[1] + j\r\n if (new_i >= 0 and new_i < 3) and (new_j >= 0 and new_j < 3):\r\n legal_moves.append(self.reflection_dic[action])\r\n return legal_moves",
"def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list",
"def actions_for_path(problem,path):\n actions = []\n for i,node in enumerate(path):\n if i==(len(path)-1):\n break\n successors = problem.getSuccessors(node)\n for successor in successors:\n ss,aa,_ = successor\n if ss == path[i+1]:\n actions.append(aa)\n break\n return(actions)",
"def get_available_actions(self): \n actions = [] \n direction = [[1, 0], [0, 1]]\n for dir_ in direction:\n for point in self.points_generator(): \n dir_p = Point(*dir_)\n new_point = point + dir_p\n try:\n _ = self.game.board[new_point] \n actions.append((point, new_point))\n except OutOfBoardError:\n continue\n return actions"
]
| [
"0.5468673",
"0.54495037",
"0.5311105",
"0.5310429",
"0.5265401",
"0.5223957",
"0.5167855",
"0.51475793",
"0.51320946",
"0.5081162",
"0.50440097",
"0.50379986",
"0.49994648",
"0.49994648",
"0.49994648",
"0.49994648",
"0.49954423",
"0.49549085",
"0.49456745",
"0.49088195",
"0.49078098",
"0.4893499",
"0.48894826",
"0.48688",
"0.4838526",
"0.4838526",
"0.48039582",
"0.4798551",
"0.47892737",
"0.4787909"
]
| 0.6010672 | 0 |
register function as agent actor | def agent_actor(self, func):
if len(self.agents) is 2:
raise Exception("You cannot register 3 or more agents.")
self.agents.append(func)
return func | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def agentbehavior1():\n gr = register_message()\n\n pass",
"def _register_agent(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_location_description()\n self._register(description, \"registering agent on SOEF.\")",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def make_agent(agent_id, **kwargs):\n return agent_register[agent_id](**kwargs)",
"def agent_behaviour(queue):\n\n gr = register_message()",
"def _register_agent(self, agent, agent_avatar: AgentBody):\n\n # Random seed for agent between 1 and 10000000, might need to be adjusted still\n agent_seed = self.__rnd_gen.randint(1, 1000000)\n\n # check if the agent can be succesfully placed at that location\n self.__validate_obj_placement(agent_avatar)\n\n # Add agent to registered agents\n self.__registered_agents[agent_avatar.obj_id] = agent_avatar\n\n if self.__verbose:\n print(f\"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.\")\n\n # Get all properties from the agent avatar\n avatar_props = agent_avatar.properties\n\n if agent_avatar.is_human_agent is False:\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed)\n else: # if the agent is a human agent, we also assign its user input action map\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed,\n key_action_map=agent_avatar.properties[\"key_action_map\"])\n\n return agent_avatar.obj_id",
"def register(locator: str, entry_point, **kwargs):\n\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)",
"def RegisterActor(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def actor():\n return Actor()",
"def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)",
"def server_agent():",
"def registrarAgenteJogador(self):\r\n return",
"def __init__(self, agent):\n self.agent = agent",
"def onActionTaken(self, agent):\n\n pass",
"def register_message():\n\n logger.info('Nos registramos')\n\n gr = register_agent(AgenteAlojamientosExternoAmadeus, DirectoryAgent, AgenteAlojamientosExternoAmadeus.uri, get_count())\n return gr",
"def agent_init(self):\n pass",
"def __init__(self, agent: AEA) -> None:\n self._agent = agent\n super().__init__()",
"def agent_reward(self, func):\n if len(self.agents_reward) is 2:\n raise Exception(\"You cannot register 3 or more agents_reward.\")\n\n self.agents_reward.append(func)\n return func",
"def register(func):\n plugins[func.__name__] = func\n return func",
"def add_transport(self, agent):\n with self.simulation_mutex:\n self.get(\"transport_agents\")[agent.name] = agent",
"def doRegisterAgent(\n registrar_ip: str,\n registrar_port: str,\n agent_id: str,\n ek_tpm: bytes,\n ekcert: Optional[Union[bytes, str]],\n aik_tpm: bytes,\n mtls_cert: Optional[bytes] = None,\n contact_ip: Optional[str] = None,\n contact_port: Optional[str] = None,\n) -> Optional[str]:\n\n data: Dict[str, Any] = {\n \"ekcert\": ekcert,\n \"aik_tpm\": aik_tpm,\n }\n if ekcert is None or ekcert == \"emulator\":\n data[\"ek_tpm\"] = ek_tpm\n\n if mtls_cert is not None:\n data[\"mtls_cert\"] = mtls_cert\n else:\n data[\"mtls_cert\"] = \"disabled\"\n logger.error(\"Most actions require the agent to have mTLS enabled, but no cert was provided!\")\n if contact_ip is not None:\n data[\"ip\"] = contact_ip\n if contact_port is not None:\n data[\"port\"] = contact_port\n\n response = None\n try:\n # The agent accesses the registrar without mTLS, meaning without client\n # certificate\n # TODO the registrar could be accessed using TLS, but without client\n # certificate verification. Currently it is accessed without TLS at all\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", False)\n response = client.post(f\"/v{api_version}/agents/{agent_id}\", data=json.dumps(data))\n response_body = response.json()\n\n if response.status_code != 200:\n logger.error(\"Error: unexpected http response code from Registrar Server: %s\", response.status_code)\n keylime_logging.log_http_response(logger, logging.ERROR, response_body)\n return None\n\n logger.info(\"Agent registration requested for %s\", agent_id)\n\n if \"results\" not in response_body:\n logger.critical(\"Error: unexpected http response body from Registrar Server: %s\", response.status_code)\n return None\n\n if \"blob\" not in response_body[\"results\"]:\n logger.critical(\"Error: did not receive blob from Registrar Server: %s\", response.status_code)\n return None\n\n return str(response_body[\"results\"][\"blob\"])\n except Exception as e:\n if response and response.status_code == 503:\n logger.error(\"Agent cannot establish connection to registrar at %s:%s\", registrar_ip, registrar_port)\n sys.exit()\n else:\n logger.exception(e)\n\n return None",
"def TraceAgent(agent):\n old_program = agent.program\n\n def new_program(percept):\n action = old_program(percept)\n print '%s perceives %s and does %s' % (agent, percept, action)\n return action\n\n agent.program = new_program\n return agent",
"def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initialization\n grid_queue = [gq for gq in self.grid_queues.values() if gq.accepts(agent)][agent.sex]\n agent.grid_queue = grid_queue.index\n self.add_to_grid_queue(agent)",
"def register(self, target, hostname, listener_type, expire=-1):",
"def add_manager(self, agent):\n with self.simulation_mutex:\n self.get(\"manager_agents\")[agent.name] = agent",
"def _register(self, comm, handler):",
"def attach(self, name: str, func: Callable[[], Any]) -> None:\n self._probes[name] = func",
"def register(func):\n PLUGINS[func.__name__] = func\n return func",
"def register_act(key, module):\n register(key, module, act_dict)",
"def agent_set(bus):\n # TODO\n pass"
]
| [
"0.6673103",
"0.65623486",
"0.6539472",
"0.6514525",
"0.6410053",
"0.6113836",
"0.6079823",
"0.60349107",
"0.59799165",
"0.5877702",
"0.5860427",
"0.58369124",
"0.5813449",
"0.5748822",
"0.568736",
"0.5673512",
"0.566552",
"0.5624594",
"0.5615585",
"0.5604878",
"0.55964804",
"0.5591971",
"0.5581583",
"0.557001",
"0.5556007",
"0.5518238",
"0.5512559",
"0.5507618",
"0.54961336",
"0.5490832"
]
| 0.7703901 | 0 |
regiseter function as renderer | def renderer(self, func):
self.renderers.append(func)
return func | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render(self):",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def dspyRender(self):\n pass",
"def render(self, renderer, right=False):\n pass # pragma: no cover",
"def _render(self) -> None:\n pass",
"def _render_callback(self, _sim, _viewer):\n pass",
"def render(self):\n raise NotImplementedError",
"def renderer(self, ctx, name):\n\t\tif name in self.service.nevowRenderers:\n\t\t\treturn self.service.nevowRenderers[name]\n\t\treturn rend.Page.renderer(self, ctx, name)",
"def render(self, rstate):\n pass",
"def render(self):\n raise NotImplementedError(\"Renderer is an abstract class\")",
"def render(self):\n raise NotImplementedError()",
"def render(self, mode='human'):",
"def render(self, r):\n raise NotImplementedError",
"def render( *args, **kwargs ):",
"def render(self):\r\n super().render()",
"def render( request, etype, value, tb ):",
"def regular(self):",
"def render(self):\n return self",
"def renderer(*args, addGlobalsNode: Union[AnyStr, bool]=\"\", addGlobalsTab: List[AnyStr, AnyStr,\n AnyStr]=None, batchRenderOptionsProcedure: Union[AnyStr, bool]=\"\",\n batchRenderOptionsStringProcedure: Union[AnyStr, bool]=\"\", batchRenderProcedure:\n Union[AnyStr, bool]=\"\", cancelBatchRenderProcedure: Union[AnyStr, bool]=\"\",\n changeIprRegionProcedure: Union[AnyStr, bool]=\"\", commandRenderProcedure:\n Union[AnyStr, bool]=\"\", exists: bool=True, globalsNodes: bool=True,\n globalsTabCreateProcNames: bool=True, globalsTabLabels: bool=True,\n globalsTabUpdateProcNames: bool=True, iprOptionsMenuLabel: Union[AnyStr, bool]=\"\",\n iprOptionsProcedure: Union[AnyStr, bool]=\"\", iprOptionsSubMenuProcedure:\n Union[AnyStr, bool]=\"\", iprRenderProcedure: Union[AnyStr, bool]=\"\",\n iprRenderSubMenuProcedure: Union[AnyStr, bool]=\"\", isRunningIprProcedure:\n Union[AnyStr, bool]=\"\", logoCallbackProcedure: Union[AnyStr, bool]=\"\",\n logoImageName: Union[AnyStr, bool]=\"\", materialViewRendererList: bool=True,\n materialViewRendererPause: bool=True, materialViewRendererSuspend: bool=True,\n namesOfAvailableRenderers: bool=True, pauseIprRenderProcedure: Union[AnyStr,\n bool]=\"\", polyPrelightProcedure: Union[AnyStr, bool]=\"\",\n refreshIprRenderProcedure: Union[AnyStr, bool]=\"\", renderDiagnosticsProcedure:\n Union[AnyStr, bool]=\"\", renderGlobalsProcedure: Union[AnyStr, bool]=\"\",\n renderMenuProcedure: Union[AnyStr, bool]=\"\", renderOptionsProcedure: Union[AnyStr,\n bool]=\"\", renderProcedure: Union[AnyStr, bool]=\"\", renderRegionProcedure:\n Union[AnyStr, bool]=\"\", renderSequenceProcedure: Union[AnyStr, bool]=\"\",\n rendererUIName: Union[AnyStr, bool]=\"\", renderingEditorsSubMenuProcedure:\n Union[AnyStr, bool]=\"\", showBatchRenderLogProcedure: Union[AnyStr, bool]=\"\",\n showBatchRenderProcedure: Union[AnyStr, bool]=\"\", showRenderLogProcedure:\n Union[AnyStr, bool]=\"\", startIprRenderProcedure: Union[AnyStr, bool]=\"\",\n stopIprRenderProcedure: Union[AnyStr, bool]=\"\", supportColorManagement: bool=True,\n textureBakingProcedure: Union[AnyStr, bool]=\"\", unregisterRenderer: bool=True,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass",
"def _get_renderer(self) :\n \n return self._renderer",
"def render(self, *args, **kwargs):\r\n raise NotImplementedError",
"def render(self, value, renderer, workload, incognito=False):\n # get the name to use\n name = None if incognito else self.name\n # delegate to my protocol\n yield from self.protocol.pyre_render(renderer=renderer,\n name=name, component=value, workload=workload)\n # all done\n return",
"def render(self):\n raise RenderNotImplemented('Render function is not implemented.')",
"def render_form():",
"def register_render_tag(renderer):\n def tag(parser, token):\n class TagNode(template.Node):\n def render(self, context):\n return renderer(context, token)\n return TagNode()\n for copy_attr in (\"__dict__\", \"__doc__\", \"__name__\"):\n setattr(tag, copy_attr, getattr(renderer, copy_attr))\n return register.tag(tag)"
]
| [
"0.7153093",
"0.68493503",
"0.68493503",
"0.68493503",
"0.68493503",
"0.68493503",
"0.68493503",
"0.67106134",
"0.6558736",
"0.64851606",
"0.647033",
"0.64371425",
"0.6435466",
"0.6386689",
"0.6374712",
"0.6264179",
"0.62263614",
"0.62246114",
"0.6217402",
"0.6178282",
"0.61030376",
"0.60545623",
"0.6007939",
"0.5999473",
"0.5975126",
"0.59732693",
"0.594157",
"0.5924084",
"0.584276",
"0.58358383"
]
| 0.69676054 | 1 |
Changes the state of a cell to 'dead' (deletes it from _state) | def die(self, cell: Position):
self._next_state.remove(cell) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setToDead(self, state=DeadState()):\n if len(self._lastStates) > 5:\n self._lastStates.pop(0)\n self._lastStates.append(self._state)\n self._state = state\n self._nextState = self._state\n return self._state",
"def just_died(self):\r\n self.dead = True",
"def set_dead(self):\n assert self.health < 0, \"Health is not less than 0, Sprite should not be dead\"\n self.is_alive = False",
"def reset_state(self):\n for row in range(len(self.state)):\n for column in range(len(self.state[row])):\n self.state[row][column] = None",
"def set_dead(self):\n self.is_alive = False\n print(self.name, \"dies: How unfortunate, my time has come!\")",
"def kill_cell(self, cell):\n cell.type = self.DYING\n\n # Remove viral replication model: no model for dead cell type\n CoronavirusLib.reset_viral_replication_variables(cell=cell)\n self.remove_viral_replication_model(cell=cell)",
"def erase(self):\n\tself.state={}\n\tself.display(update_board=0)",
"def erase_cell(state: State) -> State:\n assert state.index < state.array_len\n return state._replace(\n array=state.array[: state.index] + [None] + state.array[state.index + 1 :]\n )",
"def onLoseCell(self):\n\t\t# Destroy base\n\t\tif not self.isDestroyed:\n\t\t\tself.destroy()\n\t\tDEBUG_MSG('Avatar::onLoseCell: %i' % self.id)",
"def reset_board(self):\n cell_list = self.get_cells()\n for current_cell in cell_list:\n current_cell.set_cell_state(0) # remove player ownership of cell",
"def transition_to(self, state: cell_state):\n self._state = state\n self._state.cell = self",
"def kill(self):\n self.is_dead = True\n self.color = (255, 0, 0)# Turn RED when dead",
"def set_dead(self):\n self.is_alive = False\n print(self.name, \"has been slayed: My evil comrades will avenge my death!\")",
"def die(self, dt):\r\n self.dead = True",
"def mark_safe(self, cell):\n \n if cell in self.cells:\n self.cells.discard(cell)",
"def test_dead_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive",
"def cell_removed(self):\n self.stop_discharge()\n self.set_empty()\n log.info(\"Cell removed from slot {}.\".format(self.channel))",
"def _drop_io_state(self, state):\n if self._state & state:\n self._state = self._state & (~state)\n self._update_handler(self._state)",
"def refill(self):\n self.deadt = self.reviveTime\n self.mana[0] = self.mana[1]",
"def mark_safe(self, cell):\n #if cell in self.cells, else do nothing\n if cell in self.cells:\n #remove the cell since known\n self.cells.discard(cell)",
"def mark_mine(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)\n self.count=self.count-1",
"def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)\n #raise NotImplementedError",
"def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)",
"def del_cells(self):\t\r\n del self._cells",
"def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())",
"def clearState(self):\n self.physicalState = (None for unused in self.indVars)",
"def mark_mine(self, cell):\n \n if cell in self.cells:\n self.cells.discard(cell)\n self.count -= 1",
"def reset(self):\n self.state.fill(EMPTY)",
"def mark_mine(self, cell):\n if cell in self.cells:\n self.count -= 1\n self.cells.remove(cell)",
"def set_Off(self):\n if not(self._locked):\n self.__dict__['statusOn']=False\n self._undo_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)"
]
| [
"0.7117927",
"0.6882622",
"0.67887944",
"0.6527977",
"0.63036853",
"0.62571025",
"0.61986595",
"0.6177285",
"0.6154865",
"0.61490744",
"0.61047286",
"0.6075287",
"0.6068681",
"0.5937211",
"0.59354275",
"0.5935308",
"0.5922249",
"0.5892255",
"0.5870188",
"0.58586854",
"0.58119404",
"0.58018845",
"0.57958096",
"0.5774942",
"0.5771686",
"0.57558674",
"0.5734776",
"0.57150143",
"0.56957036",
"0.5675021"
]
| 0.7240025 | 0 |
Checks if a given cell is alive or not. | def is_alive(self, cell: Position) -> bool:
return cell in self._state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _should_cell_live(self, cell: Cell) -> bool:\n living_neighbours_count = self._count_living_neighbors(cell)\n # Any live cell with two or three live neighbours survives\n if cell.is_alive and living_neighbours_count in [2, 3]:\n return True\n # Any dead cell with three live neighbours becomes a live cell\n if not cell.is_alive and living_neighbours_count == 3:\n return True\n # All other live cells die in the next generation. Similarly, all other dead cells stay dead\n return False",
"def live_or_die(self, x, y):\n neighbors = self.get_neighbors(x, y)\n num_neighbors = 0\n for val in neighbors:\n if val:\n num_neighbors+=1\n\n\n # cell dies if less than 2 neighbors\n if num_neighbors < 2:\n return False\n\n # cell lives on if has 2 or 3 neighbors\n if (num_neighbors == 2 or num_neighbors == 3) and self._board[x][y]:\n return True\n\n # cell dies if more than 2 neighbors\n if num_neighbors > 3:\n return False\n\n # cell is born if has 3 neighbors\n if num_neighbors == 3 and not self._board[x][y]:\n return True\n\n # for consistency\n return False",
"def is_alive(self, x: int, y:int) -> bool :\n return self.table[y][x]",
"def is_alive(self):\n try:\n return self.get_life() > 0\n except KeyError:\n return True",
"def calculate_dead_alive(board, posx, posy):\n alive = 0\n for aux in ((x, y) for x in [-1, 0, 1] for y in [-1, 0, 1]):\n if aux == (0, 0):\n continue\n pos = np.array((posx, posy)) + np.array(aux)\n if min(pos) < 0 or max(pos) >= board.shape[0]:\n continue\n alive += board[pos[0]][pos[1]]\n if board[posx][posy]:\n # alive cell\n if alive in (2, 3):\n return True\n else:\n # dead cell\n if alive == 3:\n return True\n return False",
"def is_alive(self):\r\n return self._health_points > 0",
"def is_alive(self):\n if self.health > 0:\n return True\n return False",
"def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True",
"def is_in_board(self):\n return self.is_alive()",
"def should_be_alive(live_coords=None, coord=None, is_alive=True):\n if not live_coords or not coord:\n return False\n num_alive = alive_neighbors(live_coords, coord)\n if is_alive:\n if num_alive < 2:\n return False\n elif num_alive == 2 or num_alive == 3:\n return True\n elif num_alive > 3:\n return False\n elif num_alive == 3:\n return True\n return False",
"def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True",
"def test_live_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n world.set_cell((0, 0))\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive",
"def alive(self):\n\t\treturn any( (ind for ind in self.members if ind.current_hp > 0) )",
"def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True",
"def test_dead_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive",
"def cell_status(self, pos):\n if pos in self._coordinates:\n if pos in self._hit_coors:\n return True\n return False\n return None",
"def has_cells(self):\n return len(self._cells) > 0",
"def __cell_is_occupied(self, x, y) -> bool:\n return self.occupancy_map.data[self.__get_cell_index(x, y)] != 0",
"def check_is_alive(self) -> bool:\n crew_alive = False\n for operator in self.__operators:\n if operator.is_alive:\n crew_alive = True\n break\n if crew_alive and self.health > self.MIN_HEALTH:\n self.__is_alive = True\n return True\n else:\n self.__is_alive = False\n return False",
"def isAlive(self):\n return self.is_alive()",
"def alive(self):\n return True",
"def is_alive(self):\n return hasattr(self, 'alive') and self.alive",
"def is_alive(self):\n return hasattr(self, 'alive') and self.alive",
"def check_empty_neighbours(self, cell):\n\t\tneighbours = self.get_neighbours(cell)\n\t\tflag = True\n\t\tfor neighbour in neighbours:\n\t\t\tif neighbour.state != 0:\n\t\t\t\tflag = False\n\t\treturn flag",
"def isAlive(self):\n return self._state.isAlive()",
"def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False",
"def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False",
"def isAlive(self):\n raise NotImplementedError",
"def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())",
"def is_inacessible(cell):\n adj, count = num_adj_buildings(cell)\n return adj == count"
]
| [
"0.7732439",
"0.7323605",
"0.72250414",
"0.70527005",
"0.68661433",
"0.6856171",
"0.68087983",
"0.67438215",
"0.6720457",
"0.6720003",
"0.6717379",
"0.6694156",
"0.6650826",
"0.6636511",
"0.6630166",
"0.6612319",
"0.6587549",
"0.65421706",
"0.6487532",
"0.6472208",
"0.6469954",
"0.64643496",
"0.64643496",
"0.6381865",
"0.6366702",
"0.63459635",
"0.6312499",
"0.62954354",
"0.62923455",
"0.62906855"
]
| 0.8583199 | 0 |
Returns coordinates of all neighbours of a given cell. | def get_neighbours(self, cell: Position) -> Iterable[Position]:
x, y = cell
return [
(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),
(x - 1, y), (x + 1, y),
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1),
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours",
"def get_neighbours(self, row, col):\n neighbour_location_diffs = [(-1, -1),\n ( 0, -1),\n ( 1, -1),\n ( 1, 0),\n ( 1, 1),\n ( 0, 1),\n (-1, 1),\n (-1, 0)]\n neighbours = []\n for diff in neighbour_location_diffs:\n if (row + diff[0] >= 0 and\n row + diff[0] < self.height and\n col + diff[1] >= 0 and\n col + diff[1] < self.width):\n neighbours.append(self.cells[row + diff[0]][col + diff[1]])\n return neighbours",
"def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours",
"def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]",
"def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours",
"def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor",
"def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours",
"def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours",
"def get_neighbors(self, cell, count):\n row, col = cell\n # get all the neighbors\n neighbors = set([(min(self.height - 1, max(row + i, 0)), min(self.width - 1, max(col + j, 0))) \n for i in range(-1, 2)\n for j in range(-1, 2)])\n\n for neighbor in deepcopy(neighbors):\n if neighbor in self.safes or neighbor == cell:\n neighbors.remove(neighbor)\n elif neighbor in self.mines:\n neighbors.remove(neighbor)\n count -= 1\n\n return neighbors, count",
"def get_neighbours_square(self, cell, dist):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-dist,y+dist+1) if 0<=i<width for j in range(x-dist,x+dist+1) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours",
"def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours",
"def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]",
"def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n",
"def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours",
"def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]",
"def get_neighbours(self):\n return self.neighbours",
"def get_neighbours(self):\n return self._neighbours",
"def find_valid_neighbours(self, cell):\n\n delta = [('W', (-1, 0)),\n ('E', (1, 0)),\n ('S', (0, 1)),\n ('N', (0, -1))]\n neighbours = []\n for direction, (dx, dy) in delta:\n x2, y2 = cell.x + dx, cell.y + dy\n if (0 <= x2 < self.nx) and (0 <= y2 < self.ny):\n neighbour = self.cell_at(x2, y2)\n if neighbour.has_all_walls():\n neighbours.append((direction, neighbour))\n return neighbours",
"def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]",
"def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))",
"def getNeighbors(self, row, col):\n neighbors = []\n for deltaRow in range(-1, 2):\n for deltaCol in range(-1, 2):\n if not (deltaRow == 0 and deltaCol == 0) and self.inBoard(row + deltaRow, col + deltaCol):\n neighbors += [(row + deltaRow, col + deltaCol)]\n return neighbors",
"def eight_neighbors(self, row, col):\n ans = []\n if row > 0:\n ans.append((row - 1, col))\n if row < self._grid_height - 1:\n ans.append((row + 1, col))\n if col > 0:\n ans.append((row, col - 1))\n if col < self._grid_width - 1:\n ans.append((row, col + 1))\n if (row > 0) and (col > 0):\n ans.append((row - 1, col - 1))\n if (row > 0) and (col < self._grid_width - 1):\n ans.append((row - 1, col + 1))\n if (row < self._grid_height - 1) and (col > 0):\n ans.append((row + 1, col - 1))\n if (row < self._grid_height - 1) and (col < self._grid_width - 1):\n ans.append((row + 1, col + 1))\n return ans",
"def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count",
"def get_numbered_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if type(cell.state) is int]",
"def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates",
"def get_neighbours(self):\n return []",
"def neighbours(self):\n return [x.node for x in self.edges]",
"def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)",
"def get_neighbours(self):\n return self.points_to.keys()",
"def cell_neighbours(self, x, y):\n if self.maze_map[y][x]:\n return set()\n neighbours = set()\n for (direction, ((i, j), dummy)) in MazeGraph.DIRECTIONS.items():\n xi, yj = (x + i) % self.width, (y + j) % self.height\n if not self.maze_map[yj][xi]:\n neighbours.add((direction, (xi, yj)))\n return neighbours"
]
| [
"0.82961637",
"0.7774539",
"0.77439696",
"0.77102953",
"0.7669595",
"0.76490885",
"0.76151395",
"0.7545304",
"0.7466637",
"0.74308044",
"0.7411153",
"0.7390729",
"0.7387905",
"0.73723334",
"0.73409045",
"0.7315632",
"0.7302817",
"0.7294805",
"0.72876567",
"0.72745216",
"0.71861976",
"0.7136028",
"0.7134473",
"0.71342754",
"0.7110261",
"0.7099526",
"0.70873857",
"0.70529735",
"0.7048305",
"0.70326513"
]
| 0.88792723 | 0 |
Version 2 Counts the number of alive cells around a given cell. | def get_neighbours_count(self, cell: Position) -> int:
possible_neighbours = self.get_neighbours(cell)
return sum(self.is_alive(n) for n in possible_neighbours) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count",
"def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]",
"def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours",
"def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count",
"def get_cellcount(self):\n self.cellcount += 1\n return self.cellcount - 1",
"def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count",
"def number_at_cell(game, pokemon_locations, grid_size, index):\n num = 0\n # number of Pokemon in neighbouring cells\n neighbours = neighbour_directions(index,grid_size)\n for neighbour in neighbours:\n if neighbour in pokemon_locations:\n num += 1\n return num",
"def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count",
"def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives",
"def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors",
"def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count",
"def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count",
"def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total",
"def getCellCount(self, idx = None, cell = 1, verbose = 0):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n areas = self.getAreas(idx = idx, cell = cell)\n if cell == 1:\n base_area = np.abs(np.linalg.det(self.base_1[:2, :2]))\n elif cell == 2:\n base_area = np.abs(np.linalg.det(self.base_2[:2, :2]))\n\n count = areas / base_area\n\n if verbose > 0:\n string = \"Cell count for cell %i, with %i index, max deviation: %.4E\"\\\n % (cell, len(count), np.max(count - np.round(count, 0)))\n ut.infoPrint(string)\n\n return count",
"def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt",
"def getAdjacentCount(grid, x, y, X, Y, char):\n count = 0\n try{\n if x == 0:\n\n if y == 0:\n\n if x == X-1:\n\n if y == Y-1:\n }",
"def get_neighbor_live_count(cart):\n count = 0\n for i in range(6):\n cart2 = (cart[0] + dxv[i],cart[1] + dyv[i],cart[2] + dzv[i])\n if check_cart(cart2) and voxel_data[cart_to_loc(cart2)] == 1:\n count += 1\n return count",
"def h(self, node):\n count_peg = -1\n for line in node.state.board:\n count_peg += line.count(c_peg())\n return count_peg",
"def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter",
"def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0",
"def get_count_life_neighbor(arr, x, y, max_x, max_y):\n\tres_count = 0\n\n\tif x > 0 and y > 0:\n\t\tif arr[y-1][x-1]:\n\t\t\tres_count += 1\n\n\tif y > 0:\n\t\tif arr[y-1][x]:\n\t\t\tres_count += 1\n\n\tif y > 0 and x < max_x:\n\t\tif arr[y-1][x+1]:\n\t\t\tres_count += 1\n\n\tif x > 0:\n\t\tif arr[y][x-1]:\n\t\t\tres_count += 1;\n\n\tif x < max_x:\n\t\tif arr[y][x+1]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x > 0:\n\t\tif arr[y+1][x-1]:\n\t\t\tres_count += 1\n\n\tif y < max_y:\n\t\tif arr[y+1][x]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x < max_x:\n\t\tif arr[y+1][x+1]:\n\t\t\tres_count += 1\n\n\treturn res_count",
"def neighbors(self, row, col):\n alive_around = 0\n for i in range(row -1, row + 2):\n for j in range(col - 1, col + 2):\n irow = i % self.row\n icol = j % self.col\n if (not (irow == row and icol == col)):\n if (self.now[irow, icol]):\n alive_around = alive_around + 1\n\n return alive_around",
"def number_at_cell(self, pokemon_locations, grid_size, index):\n if self.get_game()[index] != UNEXPOSED:\n return int(self.get_game()[index])\n\n number = 0\n for neighbour in self.neighbour_directions(index, grid_size):\n if neighbour in pokemon_locations:\n number += 1\n\n return number",
"def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count",
"def Ncells(self):\n return len(self.cells)",
"def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0",
"def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count",
"def count(self,color):\n count = 0\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n if(self.gameState[x,y]==color):\n count+=1\n return count",
"def count_blood_cells(image_path):\n\n # TODO - Prebrojati crvena i bela krvna zrnca i vratiti njihov broj kao povratnu vrednost ove procedure\n \"\"\"\n White cells\n \"\"\"\n # Getting image\n white_cells_img = cv2.imread(image_path)\n gray_img = cv2.cvtColor(white_cells_img, cv2.COLOR_BGR2GRAY)\n\n # Apply median filter for smoothing\n smooth_img_white = cv2.medianBlur(gray_img, 5)\n\n # Morphological operations\n kernel = np.ones((5, 5), np.uint8)\n closing_img = cv2.morphologyEx(smooth_img_white, cv2.MORPH_CLOSE, kernel)\n\n # Adaptive threshold gaussian filter\n threshold_img = cv2.adaptiveThreshold(closing_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY, 9, 2)\n\n # Segmentation of white cells\n circles_a = cv2.HoughCircles(threshold_img, cv2.HOUGH_GRADIENT, 1.2, 105,\n param1=50, param2=28, minRadius=2, maxRadius=28)\n\n # Getting count of white cells\n cell_count_a = []\n if circles_a is not None:\n circles_a = np.round(circles_a[0, :]).astype(\"int\")\n for (r) in circles_a:\n cell_count_a.append(r)\n # print(len(cell_count_a))\n white_blood_cell_count = len(cell_count_a)\n\n \"\"\"\n Red cells\n \"\"\"\n # Getting image\n red_cells_img = cv2.imread(image_path)\n\n # Getting red color\n red = [(150, 137, 168), (218, 209, 208)] # (lower), (upper)\n colors = [red]\n\n # Apply median filter for smoothing\n smooth_img_red = cv2.medianBlur(red_cells_img, 3)\n\n cell_count_b = 0\n output = red_cells_img.copy()\n for lower, upper in colors:\n mask = cv2.inRange(smooth_img_red, lower, upper)\n\n # Segmentation of red cells\n circles_b = cv2.HoughCircles(mask, cv2.HOUGH_GRADIENT, 1, 20, param1=15, param2=17,\n minRadius=2, maxRadius=60)\n\n # Getting count of red cells\n if circles_b is not None:\n circles_b = np.round(circles_b[0, :]).astype(\"int\")\n\n for (x, y, r) in circles_b:\n cv2.circle(output, (x, y), r, (255, 0, 255), 2)\n cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (255, 0, 255), -1)\n cell_count_b += 1\n\n # cv2.imwrite('output.png', output)\n # print(cell_count_b)\n red_blood_cell_count = cell_count_b\n\n # TODO - Odrediti da li na osnovu broja krvnih zrnaca pacijent ima leukemiju i vratiti True/False kao povratnu\n # vrednost ove procedure\n\n if (white_blood_cell_count > 2\n or\n white_blood_cell_count >= (red_blood_cell_count / 3)):\n has_leukemia = True\n else:\n has_leukemia = False\n\n return red_blood_cell_count, white_blood_cell_count, has_leukemia",
"def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count"
]
| [
"0.7672008",
"0.76624143",
"0.7382347",
"0.7139768",
"0.7037743",
"0.69860864",
"0.69566",
"0.6810786",
"0.6799832",
"0.6796467",
"0.6747899",
"0.6627136",
"0.6614573",
"0.6529785",
"0.6528386",
"0.64564586",
"0.6418126",
"0.638742",
"0.6342964",
"0.6341502",
"0.634074",
"0.6327375",
"0.6310835",
"0.6309657",
"0.6292701",
"0.628979",
"0.62341166",
"0.6229762",
"0.61480194",
"0.6138953"
]
| 0.80764383 | 0 |
Log the test method name at the information level | def logTestName(self):
logging.info('%s', self.id()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testCaseInfo(self):\n print(\"Override method..\")",
"def _log_some_info(self):\n logging.info('info')",
"def function_name(func):\n return log(level=\"info\", message=_function_name(func))",
"def _info(self, func):\n self.logger.info(\"llamando a %s\" % func)",
"def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')",
"def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return",
"def info(self, message, *args, **kwargs):\n method = kwargs.pop('method_name', None)\n clazz = kwargs.pop('class_name', None)\n error = kwargs.pop('error', None)\n level = Level.INFO\n if self._mode_type == TOOL:\n level = Level.FINE\n record = self._get_log_record(level, clazz, method, message, error, *args)\n self.logger.log(record)",
"def get_test_method_name(self) -> str:\n return self._testMethodName",
"def method_name(self):\n pass",
"def test_level_info(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.INFO)), \":speech_balloon: **test**\")",
"def test_03_module_logger(self):\n module_logger = get_module_logger(self.__module__)\n module_logger.info('Info in Module {}'.format(self.__module__))",
"def log(info):\n print(f\"[{info}]\")",
"def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')",
"def __init__(self, method_name='runTest'):\n super(COTTestCase, self).__init__(method_name)\n self.logging_handler = UTLoggingHandler(self)",
"def log_info(info_dict):\n pass",
"def log_info(self, message, msg_type='info'):\n pass",
"def test_logging():\n assert logger.name == 'wellcomeml.logger'",
"def __call__(self, *args, **kwargs):\n self.logger.info(*args, **kwargs)",
"def info(self, *args, **kwargs):\n self.msg(logging.INFO, *args, **kwargs)",
"def test_info(self, request):\n def add_info(info):\n \"\"\"\n Adds information about test\n \"\"\"\n self._info[get_test_name(request)].append(info)\n return add_info",
"def info ( self , message , *args , **kwargs ) :\n return self.logger.info ( message , *args , **kwargs )",
"def generic_log(function_name, args):\n print(\"*\" * 100)\n print(\"function: \" + function_name + \"()\")\n print(' '.join([str(arg) for arg in args]))\n print(\"*\" * 100)",
"def test_stack_info(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(sinfo=\"stack\")), \":warning: **test**\\n\" + \\\n \"**stack**\\n\" + \\\n \"```\\n\" + \\\n \"stack\\n\" + \\\n \"```\")",
"def info( cls, msg ):\n cls.log( logging.INFO, msg )",
"def info(self, *args, **kwargs):",
"def unitTest(self, _strMessage=\"\"):\n self.edLogging.unitTest(_strMessage)",
"def _extract_test_method_name(item):\n return getattr(item, \"_testMethodName\", None)",
"def known_verbose_name():\n return 'test Verbose name'",
"def foo_method(self):\n return \"My name is foo_method.\"",
"def __init__(self, method_name='runTest'):\n super(COT_UT, self).__init__(method_name)\n self.logging_handler = UTLoggingHandler(self)"
]
| [
"0.6939496",
"0.6753157",
"0.67056024",
"0.6593296",
"0.655111",
"0.65080243",
"0.63936245",
"0.63719064",
"0.63291377",
"0.62749195",
"0.6237274",
"0.6226717",
"0.6113689",
"0.60864156",
"0.6064637",
"0.6063154",
"0.60485363",
"0.6036148",
"0.6035138",
"0.60232055",
"0.6016088",
"0.60004264",
"0.5986786",
"0.5980827",
"0.5980055",
"0.5958538",
"0.5955972",
"0.5953422",
"0.5929682",
"0.58860356"
]
| 0.7718581 | 0 |
Check the target. Create the directory if it does not exists. Remove the file if it exists. | def check_output(self):
directory, file = split(self.target)
if not exists(directory):
mkdir(directory)
if exists(self.target):
unlink(self.target) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _prepare_target_dir(file_path: Path):\n if os.path.isfile(file_path):\n delete = input(f\"File '{file_path.resolve()}' exists. Delete? (Y/n) \")\n if delete == \"Y\":\n os.remove(file_path)\n logging.info(\"File removed.\")\n else:\n logging.info(\"Won't overwrite existing file. Quit.\")\n quit()\n else:\n file_path.parents[0].mkdir(parents=True, exist_ok=True)",
"def _create_target_path(self, path):\n if not os.path.exists(path) and not self._dry_run:\n logging.debug('Creating target path: %s ...', path)\n try:\n os.makedirs(path)\n except OSError:\n raise LetMeError('Unable to create target path: %s' % path)",
"def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def _prepare_directory(self, directory: str) -> None:\n\t\tif os.path.exists(directory):\n\t\t\tif self.settings.log.overwrite:\n\t\t\t\tshutil.rmtree(path=directory)\n\t\t\telse:\n\t\t\t\traise FileExistsError(r'Target \"{}\" already existing! Aborting ...'.format(directory))\n\t\tos.makedirs(directory)",
"def _directory(self):\n dir = self.target\n\n if not os.path.exists(dir):\n return os.makedirs(dir)\n return True",
"def prepare_target_dir(self, target):\n marker_file = os.path.join(target, CONTROLLED_DIR_MARKER)\n if os.path.isdir(target):\n if not self.disable_marker and not os.path.isfile(marker_file):\n self.stderr.write(\"Target directory already exists, but it appears to have been \"\n \"created by some other means. Marker file missing.\\n\")\n raise LayerCombinerExceptionCode(\"Target directory exists without marker file\",\n EXIT_CODE_COMBINE_MARKER_MISSING)\n\n elif self.dry_run:\n self.stderr.write(\"Skipping creating destination directory {target} (dry-run)\\n\")\n else:\n try:\n os.mkdir(target)\n except OSError as e:\n self.stderr.write(f\"Unable to create destination directory {target}. {e}\\n\")\n raise LayerCombinerExceptionCode(f\"Unable to create destination directory {target}\",\n EXIT_CODE_NO_SUCH_FILE)\n self.stderr.write(f\"Created destination directory {target}\\n\")\n if not self.disable_marker:\n with open(marker_file, \"w\") as f:\n f.write(\"This directory is managed by KSCONF. Don't touch\\n\")",
"def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass",
"def test_ensure_dir_exists(self):\n pass",
"def ensure_path_exists(filename):\n targetdir = dirname(expanduser(filename))\n if exists(targetdir):\n return\n os.makedirs(abspath(targetdir))",
"def clean_target_pages_dir():\n if not os.path.exists(target_pages_dir):\n os.makedirs(target_pages_dir)\n else:\n shutil.rmtree(target_pages_dir)\n os.mkdir(target_pages_dir)",
"def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)",
"def check_file(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def ensure_build_dir(self):\n logging.debug('Ensuring build dir, {build_dir}, exists and is empty'.format(build_dir=self.build_dir))\n try:\n # raises OSError if path exists..\n os.makedirs(self.build_dir, exist_ok=False)\n except OSError:\n if not os.path.isdir(self.build_dir):\n raise\n\n # self.build_dir exists\n\n # recursively delete contents of build_dir, if exist\n # - let exceptions bubble up\n for file_ in os.listdir(self.build_dir):\n file_path = os.path.join(self.build_dir, file_)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)",
"def __checkDestination(self):\n return os.path.exists(self.__targetPath)",
"def clean_directory():\n if os.path.exists('data'):\n shutil.rmtree('data')\n os.makedirs('data')\n\n if os.path.exists('returns'):\n shutil.rmtree('returns')\n os.makedirs('returns')",
"def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)",
"def test_removed(self):\n path = None\n with TemporaryDirectory() as tmp:\n path = tmp\n self.assertTrue(os.path.isdir(tmp))\n tmpfile = os.path.join(tmp, \"a_temp_file\")\n open(tmpfile, \"w\").write(\"data\")\n self.assertTrue(os.path.isfile(tmpfile))\n self.assertFalse(os.path.isdir(path))\n self.assertFalse(os.path.exists(path))",
"def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)",
"def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass",
"def _delete_path_unsafe(target_path: str):\n if os.path.exists(target_path):\n if os.path.isdir(target_path):\n shutil.rmtree(target_path)\n else:\n os.remove(target_path)\n return True\n return False",
"def create_file_directory():\n\n # Verify if directory exist.\n # If yes, delete it and every thing inside and create it again.\n # If not, just create it.\n\n if os.path.isdir('./file'):\n\n shutil.rmtree('./file')\n\n os.mkdir('./file')",
"def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)",
"def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")",
"def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)",
"def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")",
"def check_path(fp):\n if not Path(fp).exists():\n\n if len(Path(fp).suffix) > 0: # check if file\n Path(fp).parent.mkdir(exist_ok=True, parents=True)\n\n else: # or directory\n Path(fp).mkdir(exist_ok=True, parents=True)",
"def test_remove(self):\n reposDir = self.makeRepository(self.tmpDir)\n testFile = reposDir.child(\"some-file\")\n testFile.setContent(b\"something\")\n self.commitRepository(reposDir)\n self.assertTrue(testFile.exists())\n\n self.createCommand.remove(testFile)\n testFile.restat(False) # Refresh the file information\n self.assertFalse(testFile.exists(), \"File still exists\")",
"def mkdir_if_nonexist(check_dir, raise_error=False):\n if not os.path.exists(check_dir):\n os.mkdir(check_dir)\n else:\n if raise_error:\n raise RuntimeError(\"Warning! {} has exist! please check\".format(check_dir))\n else:\n print(\"Warning! {} has exist! remove it now!\".format(check_dir))\n shutil.rmtree(check_dir)\n os.mkdir(check_dir)",
"def ensure_exists(output_dir):\n try:\n makedirs(output_dir)\n except OSError:\n if not isdir(output_dir):\n raise"
]
| [
"0.76190555",
"0.6762315",
"0.66105056",
"0.6576535",
"0.6568848",
"0.64974195",
"0.6490682",
"0.6441938",
"0.6410524",
"0.6340638",
"0.63112664",
"0.62681997",
"0.6213513",
"0.6172672",
"0.6133321",
"0.6117379",
"0.61134595",
"0.61049193",
"0.60567",
"0.6047739",
"0.6032793",
"0.60207653",
"0.6008231",
"0.5998676",
"0.59654063",
"0.5952216",
"0.59508157",
"0.5922629",
"0.59224063",
"0.5921088"
]
| 0.79120916 | 0 |
Call 'lame' to convert the source into target. | def convert(self):
#lame --mp3input --silent -h -b BITRATE SOURCE TARGET
self.success = False
command = ['lame', '-h', '--silent']
command.append('-b ' + str(self.bitrate))
command.append(self.source)
command.append(self.target)
msg('command', command)
error = check_call(command)
if error != 0:
raise TaskError(subprocess.CalledProcessError)
self.success = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run():\n renanme_action()\n\n write_anim()\n alc.save_file()",
"def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')",
"def c2o(source_file, target_file):\n import time\n import subprocess as sp\n\n #object_file = target_file + '_NoZOLandVLIW.o'\n object_file = target_file + '.o'\n middle_file = target_file + '.ll'\n asm_file = target_file + '.s'\n\n stderr_mid = None\n stderr_asm = None\n stderr_obj = None\n\n start = time.perf_counter()\n cmd = [CLANG, source_file, '-o', middle_file] + CLANG_PARAMETER\n print('Generating .ll file...')\n\n try:\n completed_process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE,\n universal_newlines=True)\n print('Done!')\n stderr_mid = completed_process.stderr\n\n cmd = [LLC, middle_file, '-filetype=asm','-o', asm_file] + LLC_PARAMETER\n print('Generating asm file...')\n completed_process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE,\n universal_newlines=True)\n print('Done!')\n stderr_asm = completed_process.stderr\n\n if completed_process.returncode == 0:\n cmd = [LLC, middle_file, '-filetype=obj', '-o', object_file] + LLC_PARAMETER\n print('Generating obj file...')\n completed_process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE,\n universal_newlines=True)\n print('Done!')\n stderr_obj = completed_process.stderr\n\n try:\n operation(asm_file, object_file)\n change_machine_code(object_file)\n except FileNotFoundError as error:\n print(error)\n return (None, source_file)\n except ValueError:\n return (False, source_file)\n else:\n return (True,)\n finally:\n elapsed = time.perf_counter() - start\n with open(LOG_FILE, mode='a', newline=None) as log:\n sentence = '%s:\\nTime Elapsed %fs\\n' % (\n os.path.basename(source_file), elapsed)\n log.write(sentence)\n try:\n sentence = '\\n指令个数:%d\\t平均耗时:%fs\\n\\n' % (INST_NUM, elapsed / INST_NUM)\n log.write(sentence)\n except ZeroDivisionError:\n log.write('读取ASM文件中的指令出错')\n else:\n return (None, source_file)\n except OSError as error:\n print('\\n', cmd[0], '调用错误 :', error)\n return (None, source_file)\n finally:\n if stderr_mid is not None or stderr_asm is not None or stderr_obj is not None:\n with open(target_file + '.log', mode='w', newline=None) as log:\n if stderr_mid is not None:\n log.write('\\n#####==========stderr_mid==========#####:\\n')\n log.write(stderr_mid)\n if stderr_asm is not None:\n log.write('\\n#####==========stderr_asm==========#####:\\n')\n log.write(stderr_asm)\n if stderr_obj is not None:\n log.write('\\n#####==========stderr_obj==========#####:\\n')\n log.write(stderr_obj)",
"def main(args):\n print('loading {}'.format(args.stem_path))\n y, fs = librosa.load(args.stem_path, sr=44100)\n notes = mono_anal(y, fs)\n jam = output_to_jams(y, fs, notes, args)\n jam_path = args.stem_path.split('.')[0]+'.jams'\n jam.save(jam_path)\n print('jams file generated')\n return 0",
"def forward(self, audio):\n return self.l1(audio)",
"def encode(audio, video, output):\n check_call([\"mencoder\", \"-audiofile\", audio, \"-oac\", \"lavc\", \"-ovc\",\n \"lavc\", video, \"-o\", output], stdin=PIPE, stdout=PIPE, stderr=STDOUT)",
"def color_transfert():\n\n\n\tn_target = input(\"Tell me which picture wants a new make up.\\n\\n\")\n\tn_source = input(\"And now tell me which one she wanna look like \\n\\n\")\n\n\ttarget = cv.imread(n_target, 1)\n\tsource = cv.imread(n_source, 1)\n\n\t### So basically, target will get new colors from source\n\n\t## First let's convert them into the l alpha beta color space\n\n\tt_alpha = rgb2alpha(target)\n\ts_alpha = rgb2alpha(source)\n\n\n\t## Now let's make up our target thanks to some statistical operations\n\n\tm_target = make_up(t_alpha, s_alpha)\n\n\n\t## Finally we gonna convert target back to rgb space\n\n\tm_target = alpha2rgb(m_target)\n\n\t## And save it, so let's name it, you don't have to give the format, we'll add it here\n\n\tname = input(\"What's the name of the new picture ? \\n\")\n\n\tname += \".png\"\n\n\tcv.imwrite(name, m_target)\t\t# You can now post your new picture to instagramm and let\n\t\t\t\t\t\t# your followers believe that you are a skilled photograph.\t\n\t\t\t\t\t\t# I personally don't use this shit so fuck it.\n\n\tprint(\"{} saved.\".format(name))",
"def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):\n inspec = None\n inspecz = np.nan\n inspecmag = np.nan\n inspecpb = None\n\n source_table_file = os.path.join('sources', 'sourcetable.txt')\n source_table_file = io.get_pkgfile(source_table_file)\n source_table = at.Table.read(source_table_file, format='ascii')\n ind = (source_table['specname'] == source)\n nmatch = len(source_table['specname'][ind])\n if nmatch == 1:\n # load the file and the info\n inspec = source_table['specname'][ind][0]\n inspecz = source_table['redshift'][ind][0]\n inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag\n elif nmatch == 0:\n message = 'Spectrum {} not listed in lookup table'.format(source)\n pass\n else:\n message = 'Spectrum {} not uniquely listed in lookup table'.format(source)\n pass\n\n if inspec is None:\n warnings.warn(message, RuntimeWarning)\n inspec = source\n inspecz = sourcez\n inspecmag = sourcemag\n inspecpb = sourcepb\n\n if not os.path.exists(inspec):\n message = 'Spectrum {} could not be found'.format(inspec)\n raise ValueError(message)\n\n try:\n spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')\n except Exception as e:\n message = 'Could not read file {}'.format(source)\n raise ValueError(message)\n\n if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):\n pass\n else:\n pbs = passband.load_pbs([inspecpb], 0.)\n try:\n inspecpb = pbs[inspecpb][0]\n except KeyError as e:\n message = 'Could not load passband {}'.format(inspecpb)\n raise RuntimeError(message)\n\n try:\n inspecmag = float(inspecmag)\n except (TypeError, ValueError) as e:\n message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)\n raise ValueError(message)\n\n try:\n inspecz = float(inspecz)\n except (TypeError, ValueError) as e:\n message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)\n raise ValueError(message)\n\n if inspecz < 0 :\n message = 'Source must have positive definite cosmological redshift'\n raise ValueError(message)\n\n inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')\n try:\n inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)\n inspec.convert('flam')\n except Exception as e:\n message = 'Could not renormalize spectrum {}'.format(inspec)\n raise RuntimeError(message)\n\n if inspecz > 0:\n zblue = 1./(1+inspecz) - 1.\n inspec_rest = inspec.redshift(zblue)\n inspec_rest.convert('flam')\n c = default_cosmology.get()\n mu = c.distmod(inspecz)\n out = inspec_rest*(10.**(0.4*mu.value))\n else:\n out = inspec\n # TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is\n # and renormalizes - there's some sanity checking for overlaps\n # we can do this without using it and relying on the .passband routines\n return out",
"def do_add_ink():\n clip = mpy.VideoClip(mix_video_ink, duration=13.0)\n clip.write_videofile(\"test_edited.mp4\", fps=24)",
"def convert_to_mp3(self,path, filename):\n\n codec = \"libmp3lame\"\n mp3_filename = filename + \".mp3\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n mp3_filename\n ]\n\n return command",
"def forward(opt):\n my_utils.plant_seeds(randomized_seed=opt.randomize)\n os.makedirs(opt.output_dir, exist_ok=True)\n\n trainer = t.Trainer(opt)\n trainer.build_dataset_train_for_matching()\n trainer.build_dataset_test_for_matching()\n trainer.build_network()\n trainer.build_losses()\n trainer.network.eval()\n\n if opt.eval_list and os.path.isfile(opt.eval_list):\n source_target_files = np.loadtxt(opt.eval_list, dtype=str)\n source_target_files = source_target_files.tolist()\n for i, st in enumerate(source_target_files):\n source, target = st\n cat1, fname1 = source.split('/')\n fname1 = os.path.splitext(fname1)[0]\n cat2, fname2 = target.split('/')\n fname2 = os.path.splitext(fname2)[0]\n if len(opt.shapenetv1_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv1_path, cat1, fname1, \"model.obj\"), os.path.join(opt.shapenetv1_path, cat2, fname2, \"model.obj\"))\n elif len(opt.shapenetv2_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv2_path, cat1, fname1, \"models\", \"model_normalized.obj\"), os.path.join(opt.shapenetv2_path, cat2, fname2, \"models\", \"model_normalized.obj\"))\n elif (opt.eval_source != \"\" and opt.eval_source[-4:] == \".txt\") and (opt.eval_target != \"\" and opt.eval_target[-4:] == \".txt\"):\n source_target_files = [(figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_source), figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_target))]\n\n rot_mat = get_3D_rot_matrix(1, np.pi/2)\n rot_mat_rev = get_3D_rot_matrix(1, -np.pi/2)\n isV2 = len(opt.shapenetv2_path) > 0\n for i, source_target in enumerate(source_target_files):\n basename = get_model_id(source_target[0], isV2) + \"-\" + get_model_id(source_target[1], isV2)\n path_deformed = os.path.join(opt.output_dir, basename + \"-Sab.ply\")\n path_source = os.path.join(opt.output_dir, basename + \"-Sa.ply\")\n path_target = os.path.join(opt.output_dir, basename +\"-Sb.ply\")\n\n mesh_path = source_target[0]\n print(mesh_path)\n source_mesh_edge = get_shapenet_model.link(mesh_path)\n\n mesh_path = source_target[1]\n target_mesh_edge = get_shapenet_model.link(mesh_path)\n\n\n print(\"Deforming source in target\")\n\n source = source_mesh_edge.vertices\n target = target_mesh_edge.vertices\n\n pymesh.save_mesh_raw(path_source, source, source_mesh_edge.faces, ascii=True)\n pymesh.save_mesh_raw(path_target, target, target_mesh_edge.faces, ascii=True)\n\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat)\n target = target.dot(rot_mat)\n\n source = torch.from_numpy(source).cuda().float().unsqueeze(0)\n target = torch.from_numpy(target).cuda().float().unsqueeze(0)\n\n with torch.no_grad():\n source, _, _, _, _ = loss.forward_chamfer(trainer.network, source, target, local_fix=None,\n distChamfer=trainer.distChamfer)\n\n try:\n source = source.squeeze().cpu().detach().numpy()\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat_rev)\n P2_P1_mesh = pymesh.form_mesh(vertices=source, faces=source_mesh_edge.faces)\n pymesh.save_mesh(path_deformed, P2_P1_mesh, ascii=True)\n\n # print(\"computing signal tranfer form source to target\")\n # high_frequencies.high_frequency_propagation(path_source, path_deformed, path_target)\n except Exception as e:\n print(e)\n import pdb; pdb.set_trace()\n path_deformed = path_deformed[:-4] + \".pts\"\n save_pts(path_deformed, source.squeeze().cpu().detach().numpy())",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()",
"def transfer_shaders(source, target):\n if isinstance(source, pm.nt.Transform):\n source_shape = source.getShape()\n else:\n source_shape = source\n\n if isinstance(target, pm.nt.Transform):\n target_shape = target.getShape()\n else:\n target_shape = target\n\n # get the shadingEngines\n shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)\n\n data_storage = []\n\n # get the assigned faces\n for shading_engine in shading_engines:\n faces = pm.sets(shading_engine, q=1)\n for faceGroup in faces:\n str_face = str(faceGroup)\n # replace the objectName\n new_face = \\\n str_face.replace(source_shape.name(), target_shape.name())\n data_storage.append((shading_engine.name(), new_face))\n\n for data in data_storage:\n shading_engine = data[0]\n new_face = data[1]\n pm.select(new_face)\n # now assign the newFaces to the set\n pm.sets(shading_engine, fe=1)",
"def run_lammps(lammps_executable, input_file, output_file):\n # run lammps\n lammps_command = f\"{lammps_executable} -in {input_file} \"\n print(\"run command:\", lammps_command)\n with open(\"tmp2False.out\", \"w+\") as fout:\n subprocess.call(lammps_command.split(), stdout=fout)",
"def main():\n\n usage = \"usage: %prog [options] file\"\n parser = OptionParser(usage)\n\n parser.add_option(\"-d\", \"--dest\", dest=\"filename\",\n help=\"save result in FILENAME, overrides -e, --extension\")\n parser.add_option(\"-v\", \"--verbosity\", dest=\"verbosity\",\n help=\"set verbosity (0: critical, 1: error, 2: warning, 3: info, 4: debug)\")\n parser.add_option(\"-e\", \"--extension\", dest=\"extension\",\n help=\"save result in source filename + EXTENSION\")\n parser.add_option(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", default=False,\n help=\"no optimisations, only parsing and code generation. -o '' is equivalent.\")\n parser.add_option(\"-o\", \"--optimisations\", dest=\"optimisations\", default=None,\n help=\"enable specific optimisations, default is all\")\n\n parser.add_option(\"-m\", \"--meld\", action=\"store_true\", dest=\"meld\", default=False,\n help=\"after optimising open meld to show diff\")\n\n (options, args) = parser.parse_args()\n\n if len(args) != 1:\n parser.error('incorrect number of arguments')\n\n if not options.verbosity:\n options.verbosity = 2\n\n if not options.extension:\n options.extension = '.opt'\n\n if options.test:\n options.optimisations = ''\n \n\n logging_levels = {0: logging.CRITICAL,\n 1: logging.ERROR,\n 2: logging.WARNING,\n 3: logging.INFO,\n 4: logging.DEBUG}\n \n logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-14s %(message)s',\n level=4,\n filename='log',\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging_levels[int(options.verbosity)])\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-7s %(name)-14s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\n logger = logging.getLogger('main')\n logger.info(' '.join(sys.argv[1:]))\n logger.info('opening sourcefile')\n try:\n sourcefile = open(args[0], 'r')\n except IOError:\n print('error: file not found: %s' % args[0])\n exit(1)\n opt = Optimiser(sourcefile.readlines(), options.verbosity, options.optimisations)\n sourcefile.close()\n logger.info('sourcefile closed')\n\n opt.optimise()\n\n if options.filename:\n target_filename = options.filename\n else:\n target_filename = args[0] + options.extension\n\n targetfile = open(target_filename, 'w')\n logger.info('writing optimised assembly to file')\n targetfile.writelines(opt.result())\n targetfile.close()\n\n if options.meld:\n import subprocess\n print args[0], target_filename\n subprocess.call(['meld', args[0], target_filename], shell=False)",
"def increase_aliens_speed(self):\r\n self.alien_speed_factor += 0.01\r\n self.alien_bullet_speed_factor += 0.02",
"def file(self,file):\n self.lib.lammps_file(self.lmp,file.encode('utf-8'))",
"def forward(self, clip):\n if self.mode == \"rgbdiff\":\n clip = self.rgb_diff(clip)\n logits_motion = self.motion_decoder(clip)\n\n return logits_motion",
"def lexEmitter(target, source, env) -> tuple:\n\n sourceBase, sourceExt = os.path.splitext(to_String(source[0]))\n if sourceExt == \".lm\": # If using Objective-C\n target = [sourceBase + \".m\"] # the extension is \".m\".\n\n # With --header-file and ----tables-file, the file to write is defined\n # by the option argument. Extract this and include in the list of targets.\n # NOTE: a filename passed to the command this way is not modified by SCons,\n # and so will be interpreted relative to the project top directory at\n # execution time, while the name added to the target list will be\n # interpreted relative to the SConscript directory - a possible mismatch.\n #\n # These are GNU flex-only options.\n # TODO: recognize --outfile also?\n file_gen_options = [\"--header-file=\", \"--tables-file=\"]\n lexflags = env.subst_list(\"$LEXFLAGS\", target=target, source=source)\n for option in lexflags[0]:\n for fileGenOption in file_gen_options:\n l = len(fileGenOption)\n if option[:l] == fileGenOption:\n # A file generating option is present, so add the\n # file name to the target list.\n file_name = option[l:].strip()\n target.append(file_name)\n\n lexheaderfile = env.subst(\"$LEX_HEADER_FILE\", target=target, source=source)\n if lexheaderfile:\n target.append(lexheaderfile)\n # rewrite user-supplied file string with a node, we need later\n env.Replace(LEX_HEADER_FILE=env.File(lexheaderfile))\n\n lextablesfile = env.subst(\"$LEX_TABLES_FILE\", target=target, source=source)\n if lextablesfile:\n target.append(lextablesfile)\n # rewrite user-supplied file string with a node, we need later\n env.Replace(LEX_TABLES_FILE=env.File(lextablesfile))\n\n return target, source",
"def remix(self):\n self.log(\"Looking up track...\", 5)\n self.getTag()\n self.processArt()\n\n self.log(\"Listening to %s...\" % ('\"%s\"' % self.tag['title'] if 'title' in self.tag else 'song'), 5)\n self.original = audio.LocalAudioFile(self.infile, False)\n if not 'title' in self.tag:\n self.detectSong(self.original)\n self.st = FastModify()\n \n self.log(\"Choosing key and tempo...\", 10)\n self.tonic = self.original.analysis.key['value']\n self.tempo = self.original.analysis.tempo['value']\n self.bars = self.original.analysis.bars\n self.beats = self.original.analysis.beats\n self.sections = self.original.analysis.sections\n self.tag['key'] = self.keys[self.tonic] if self.tonic >= 0 and self.tonic < 12 else '?'\n self.tag['tempo'] = self.template['tempo']\n\n self.log(\"Arranging intro...\", 40.0/(len(self.sections) + 1))\n self.partialEncode(self.compileIntro())\n\n past_progress = 0\n hats = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n\n i = 0 # Required if there are no sections\n for i, section in enumerate(self.sections):\n self.log(\"Arranging section %s of %s...\" % (i+1, len(self.sections)), 40.0/(len(self.sections) + 1))\n a, b = self.compileSection(i, section, hats)\n self.partialEncode(a)\n self.partialEncode(b)\n del a, b\n del hats\n self.original.unload()\n\n self.log(\"Adding ending...\", 5)\n self.partialEncode(\n audio.AudioData(\n self.sample_path + self.template['splash_ends'][(i + 1) % len(self.template['splash_ends'])],\n sampleRate=44100,\n numChannels=2,\n verbose=False\n )\n )\n \n self.log(\"Mixing...\", 5)\n self.mixwav(self.tempfile)\n\n if self.deleteOriginal:\n try:\n unlink(self.infile)\n except:\n pass # File could have been deleted by an eager cleanup script\n\n self.log(\"Mastering...\", 5)\n self.lame(self.tempfile, self.outfile)\n unlink(self.tempfile)\n \n self.log(\"Adding artwork...\", 20)\n self.updateTags(titleSuffix = \" (Wub Machine Remix)\")\n \n return self.outfile",
"def amsg_source_make(*args, **kwargs):\n return _uhd_swig.amsg_source_make(*args, **kwargs)",
"def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))",
"def to_larva(self, pipeline=True) -> None:\n if \"-larva\" in sys.argv and pipeline:\n file_write(pipe_path(\"larva\"), self.build(), \"a\")\n else:\n print(self.build())",
"def forward(self, text, prev_mel):\n # forward pass through text embedding and get k and v\n kv = self.t_encoder(text)\n k = kv[:,:self.hp.d,:]\n v = kv[:,self.hp.d:,:]\n # forward pass through audio encoding and get Q\n q = self.a_encoder(prev_mel)\n \n # compute attention\n a = (k.transpose(2,1)).matmul(q)/np.sqrt(self.hp.d)\n a = F.softmax(a, dim=1)\n r = v.matmul(a)\n \n # create R' and forward pass through decoder\n # note that the decoder does not have sigmoid transform at the end, so we are actually getting \n # ylogit\n rprime = torch.cat((r, q), dim=1)\n ylogit = self.decoder(rprime)\n y = F.sigmoid(ylogit)\n return y, ylogit, a",
"def update(src):",
"def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)",
"def create_joint_mono_corpus(src_mono_fname, tgt_mono_fname, joint_mono_fname, src_lang, tgt_lang):\n\n with codecs.open(src_mono_fname,'r','utf-8') as srcfile, \\\n codecs.open(tgt_mono_fname,'r','utf-8') as tgtfile, \\\n codecs.open(joint_mono_fname,'w','utf-8') as jointfile : \n\n outlines=[]\n outlines.extend([ l for l in srcfile])\n outlines.extend([ uit.transliterate(l,tgt_lang,src_lang) for l in tgtfile])\n random.shuffle(outlines)\n\n for line in outlines: \n jointfile.write(line)",
"def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)",
"def forward(self, output, target):\n raise NotImplementedError"
]
| [
"0.50153357",
"0.50003153",
"0.49708053",
"0.49008447",
"0.48989916",
"0.48274243",
"0.47542843",
"0.46465498",
"0.4634066",
"0.46137536",
"0.45959595",
"0.4591142",
"0.45887837",
"0.45870468",
"0.45831475",
"0.45617545",
"0.45598978",
"0.4540805",
"0.45056278",
"0.44987753",
"0.44963637",
"0.44737157",
"0.447202",
"0.44698",
"0.44697618",
"0.44676864",
"0.44607717",
"0.44559342",
"0.44556883",
"0.44531962"
]
| 0.75757915 | 0 |
Print the conversion command. | def print_command(self):
self.success = False
command = ['lame', '-h', '--silent']
command.append('-b ' + str(self.bitrate))
command.append(self.source)
command.append(self.target)
print(' '.join(command)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )",
"def print_cmd(ctx, klass=None):\n connecter = ScalingoInterface(ctx.obj)\n connecter.print_cmd()",
"def print_out():\n pass",
"def to_string(self):\r\n return self.command()",
"def result_display(self, arg):\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)",
"def print_usage_command(self):\n print self.get_usage_command()",
"def print_usage_command(self):\n print self.get_usage_command()",
"def show_process_message(self):\n\n return \"Converting\"",
"def test_commandRepr(self):\n repr(imap4.Command(b\"COMMAND\", [b\"arg\"], (b'extra')))",
"def print_command(self,command):\n print \"Command (%X): \" % command\n if (command & COMMAND_ENABLE) > 0:\n print \"\\tENABLE\"\n if (command & COMMAND_ENABLE_INTERRUPT) > 0:\n print \"\\tENABLE INTERRUPT\"",
"def print(self):\r\n self.print_avec_separateur()",
"def text_output(self):\n print(self.board)\n print()",
"def test_cli_conversion(self):\n output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)\n # Make sure the output is encoded as HTML.\n assert '<span' in output",
"def _printable(self):\n toPrint = \"Command Header. Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Instruction: \" + str(self.instr) + \" \"\n toPrint = toPrint + \"Notify: \" + str(self.notify) + \" \"\n toPrint = toPrint + \"Block: \" + str(self.block) + \" \"\n toPrint = toPrint + \"Action: \" + str(self.action)\n return toPrint",
"def printhelp_01():\n print('Convert simba3d report output from npz to .mat, or .json')\n print('simba3d-convertion --ext_out [.mat, .json, .txt, or .pdb] <list of files>')",
"def printOutput(self):\n pass",
"def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text",
"def do_print(self, cmd):\n try:\n print(self.EvalExpression(cmd))\n except:\n pass",
"def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')",
"def print(self):\n print(self.pretty_str())",
"def command(self):\n # These are pset variables, (aside from sfh)\n dt = [(\"zred\", \"%.2f\"), (\"zmet\", \"%02i\"), (\"tau\", \"%.10f\"),\n (\"const\", \"%.4f\"), (\"sf_start\", \"%.2f\"), (\"tage\", \"%.4f\"),\n (\"fburst\", \"%.4f\"), (\"tburst\", \"%.4f\"), (\"imf1\", \"%.2f\"),\n (\"imf2\", \"%.2f\"), (\"imf3\", \"%.2f\"), (\"vdmc\", \"%.2f\"),\n (\"mdave\", \"%.1f\"), (\"dust_tesc\", \"%.2f\"), (\"dust1\", \"%.6f\"),\n (\"dust2\", \"%.6f\"), (\"dust_clumps\", \"%.1f\"),\n (\"frac_nodust\", \"%.2f\"), (\"dust_index\", \"%.2f\"),\n (\"mwr\", \"%.2f\"), (\"uvb\", \"%.2f\"), (\"wgp1\", \"%i\"),\n (\"wgp2\", \"%i\"), (\"wgp3\", \"%i\"), (\"dell\", \"%.2f\"),\n (\"delt\", \"%.2f\"), (\"sbss\", \"%.2f\"), (\"fbhb\", \"%.2f\"),\n (\"pagb\", \"%.2f\")]\n cmd = str(self.name) + \" \" + \" \".join([s % self.p[k] for (k, s) in dt])\n return cmd",
"def print(self):\n self.print_avec_separateur(\" \")",
"def printhelp():",
"def _printstr(self, args):\n s = \"\\n\"\n\n for arg in args:\n #s += arg.encode('utf-8', 'pout.replace')\n s += arg\n\n return s",
"def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string",
"def help_dump(self):\n print(DUMP)",
"def _printable(self):\n\n toPrint = \"Xtra Qubit: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Angle Step: \" + str(self.step) + \" \"\n toPrint = toPrint + \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint = toPrint + \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint = toPrint + \"Remote Port: \" + str(self.remote_port) + \" \"\n toPrint = toPrint + \"Command Length: \" + str(self.cmdLength)\n\n return toPrint",
"def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)",
"def _print_custom(self):\n pass",
"def __str__(self):\n return self.printable()"
]
| [
"0.6848495",
"0.66727597",
"0.63724464",
"0.6287818",
"0.62833863",
"0.6276899",
"0.6276899",
"0.6243502",
"0.6141594",
"0.6130643",
"0.61209077",
"0.6089271",
"0.60720986",
"0.6064459",
"0.60399777",
"0.6034725",
"0.60216033",
"0.599648",
"0.59858876",
"0.59689856",
"0.59606546",
"0.59604985",
"0.59151864",
"0.58915734",
"0.58911026",
"0.588998",
"0.5864774",
"0.58564585",
"0.5838834",
"0.58324885"
]
| 0.67482394 | 1 |
Create a list of tasks to convert each file in 'from_dir' to a file in 'to_dir'. The leaf directory name is appended to 'to_dir' to create the target file name. | def create_tasks(from_dir, to_dir):
try:
tasks = []
source_files = list_directory(from_dir)
dir_name = get_last_dir(from_dir)
print('dir_name', dir_name)
for file in source_files:
source = join(from_dir, file)
target = join(to_dir, dir_name, file)
t = Task(source, target)
tasks.append(t)
return tasks
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def task():\n if os.path.isdir(orig):\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*')) if \\\n os.path.isdir(fP) ]:\n if not os.path.exists(dest + fP[len(orig):]):\n os.makedirs(dest + fP[len(orig):])\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*/%s.log' %fmt.get_date())) if \\\n os.path.isfile(fP) ]:\n convert(fP, dest + fP[len(orig):])",
"def convert_directory(self, local_path, to_format, from_format, conversion_func=None, dataset=None):\n pool = ThreadPool(processes=6)\n for path, subdirs, files in os.walk(local_path):\n for name in files:\n save_to = os.path.join(os.path.split(local_path)[0], to_format)\n save_to = path.replace(local_path, save_to)\n if not os.path.isdir(save_to):\n os.mkdir(save_to)\n file_path = os.path.join(path, name)\n converter = utilities.Converter()\n if dataset is None:\n converter.dataset = self.dataset\n else:\n converter.dataset = dataset\n converter.save_to_format = self.save_to_format\n pool.apply_async(\n func=converter.convert_file,\n kwds={\n \"to_format\": to_format,\n \"from_format\": from_format,\n \"file_path\": file_path,\n \"save_locally\": True,\n \"save_to\": save_to,\n 'conversion_func': conversion_func\n }\n )\n pool.close()\n pool.join()\n pool.terminate()",
"def _sync_directories(from_directory, to_directory):\n if not os.path.exists(to_directory):\n os.mkdir(to_directory)\n for root, dirs, files in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to_child_dir = os.path.join(to_root, directory)\n if not os.path.exists(to_child_dir):\n os.mkdir(to_child_dir)\n for fname in files:\n from_file = os.path.join(root, fname)\n to_file = os.path.join(to_root, fname)\n with open(from_file, 'rb') as a, open(to_file, 'wb') as b:\n b.write(a.read())",
"def build(self, manager):\n contents = sorted(self.file_src_dest)\n output_files_dir = self.output_files_dir\n all_dest_files = []\n\n for src_file, dest_file in contents:\n all_dest_files += [dest_file]\n rel = dest_file.relative_to(output_files_dir)\n yield self.task(\n name=f\"copy:{rel}\",\n doc=f\"copy {src_file} to {rel}\",\n file_dep=[src_file],\n targets=[dest_file],\n actions=[\n (self.copy_one, [src_file, dest_file]),\n ],\n )\n\n if manager.source_date_epoch is not None:\n yield self.task(\n name=\"timestamp\",\n file_dep=all_dest_files,\n actions=[\n (self.maybe_timestamp, [self.output_files_dir]),\n ],\n )",
"def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)",
"def transform_all_files(in_folder, out_folder):\n if not exists(out_folder):\n mkdir(out_folder)\n all_files = get_all_files_and_nested(in_folder)\n for in_file in all_files:\n out_file_name = in_file.replace(in_folder, out_folder)\n transform_file_to_utf_8_from(in_file, out_file_name=out_file_name)",
"def list(self, from_timestap=None, to_timestap=None):\n tasks = []\n for path in j.sal.fs.listFilesInDir(self._root):\n blob = j.sal.fs.readFile(path)\n task = self._deserialize_task(blob)\n if from_timestap and task.created < from_timestap:\n continue\n if to_timestap and task.created > to_timestap:\n continue\n tasks.append(task)\n return tasks",
"def convert_for_submission(source_dir, target_dir):\r\n files = subfiles(source_dir, suffix=\".nii.gz\", join=False)\r\n maybe_mkdir_p(target_dir)\r\n for f in files:\r\n img = sitk.ReadImage(join(source_dir, f))\r\n out_file = join(target_dir, f[:-7] + \".nii\")\r\n sitk.WriteImage(img, out_file)",
"def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)",
"def movefiles_subjectdirs(sub_dirs, ToProcess):\n \n \n # Create subdirectories\n for subjectDir in sub_dirs:\n os.chdir(subjectDir)\n \n mri_files = glob.glob('*.nii.gz')\n mri_dir_names = []\n \n for mriFile in mri_files:\n split_file = mriFile.split('_')\n from_idx = split_file.index('WIP')\n to_idx = split_file.index('SENSE')\n toAppend = \"_\".join(split_file[from_idx+1:to_idx]) \n mri_dir_names.append(toAppend)\n \n os.mkdir(toAppend)\n shutil.move(mriFile, toAppend)\n \n print \"Created the following subdirs for {0}: \".format(os.path.basename(subjectDir))\n for d in mri_dir_names:\n print d\n print \"\\n\"",
"def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))",
"def convert_treebank(input_dir, output_dir, strategy, subtask):\n\n for f in input_dir.iterdir():\n with open(f, \"r\") as json_file:\n docs = json.load(json_file)\n trees = \"\"\n for doc in docs[\"docs\"]:\n for sent in doc[\"sents\"]:\n graph = sent[\"graph\"]\n if strategy == \"start\":\n tree = traverse_graph_start(graph)\n elif strategy == \"start-without-pos\":\n tree = traverse_graph_start_without_pos(graph)\n elif strategy == \"end\":\n tree = traverse_graph_end(graph)\n elif strategy == \"end-extra-node\":\n tree = traverse_graph_end_extra_node(graph)\n elif strategy == \"start-end-extra-node\":\n tree = traverse_graph_start_end_extra_node(graph)\n elif strategy == \"start-end-extra-node-heuristic\":\n tree = traverse_graph_start_end_extra_node_heuristic(graph) \n if subtask:\n tree = subtask_prune(tree)\n tree_string = get_string(tree)\n trees += tree_string + \"\\n\"\n with open(output_dir.joinpath(f.name).with_suffix(\".txt\"), \"w+\") as tree_files:\n tree_files.write(trees)",
"def main_convert():\n\n verbose = True\n\n # Build parser.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('fname_pattern', action='store', help='File name pattern')\n parser.add_argument('-R', '--recursive', action='store_true', default=True,\n help='Search several subdirectories')\n\n # Run parser, extract arguments.\n args = parser.parse_args()\n\n # List of files.\n pattern = os.path.normpath(unicode(args.fname_pattern))\n\n if os.path.isdir(pattern):\n pattern = os.path.join(pattern, '*')\n fname_list = glob.glob(pattern)\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n else:\n fname_list = glob.glob(pattern)\n\n to_be_removed = []\n for f in fname_list:\n if os.path.isdir(f):\n to_be_removed.append(f)\n\n for f in to_be_removed:\n fname_list.remove(f)\n\n # Do the work.\n num_files = len(fname_list)\n for k, f_src in enumerate(fname_list):\n f_src = os.path.abspath(f_src)\n\n b_src, e = os.path.splitext(f_src)\n\n folder = os.path.basename(os.path.dirname(f_src))\n if (e == '.mp3' or e == '.wma' or e == '.wav' or e == '.aiff') and b_src != 'tmp' and folder != '.audio_convert':\n\n if verbose:\n try:\n print('%3d/%d: [%s -> .m4a] %s' % (k, num_files, e, os.path.basename(b_src)))\n except Exception as e:\n val = repr(f_src)\n raise Exception('Problem processing file: %s' % val)\n\n # Temporary working copy.\n path_work = os.path.dirname(f_src)\n f_tmp_src = os.path.join(path_work, 'tmp' + e)\n shutil.copy(f_src, f_tmp_src)\n\n # Transcode file format.\n f_tmp_dst = convert(f_tmp_src, verbose=verbose)\n\n # Finish.\n b_tmp_dst, e_dst = os.path.splitext(f_tmp_dst)\n\n f_dst = b_src + e_dst\n if os.path.isfile(f_dst):\n os.remove(f_dst)\n os.rename(f_tmp_dst, f_dst)\n\n if os.path.isfile(f_tmp_src):\n os.remove(f_tmp_src)\n\n if os.path.isfile(f_dst):\n move_processed_file(f_src)\n\n # Done.",
"def make_files(dir_in, dir_out):\n try:\n listaFisiere = os.listdir(f\"{dir_in}\")\n except Exception as eroare:\n print(\"Path to input file is invalid, exiting...\")\n quit()\n if not os.path.exists(f\"{dir_out}\"):\n os.mkdir(f\"{dir_out}\")\n paths_out = []\n for numeFisier in listaFisiere:\n numeFisierOutput=\"output_\"+numeFisier\n f=open(f\"{dir_out}/\"+numeFisierOutput,\"w\")\n paths_out.append(f\"{dir_out}/\"+numeFisierOutput)\n f.close()\n for i in range(len(listaFisiere)):\n listaFisiere[i] = dir_in + \"/\" + listaFisiere[i]\n return listaFisiere, paths_out",
"def convert (self, lossless=False):\n self._has_errors = False\n if self._progress:\n max_val = 0\n for root, dirs, files in os.walk(self._in_dir):\n max_val += len(files)\n self._bar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar()],\n maxval=max_val).start()\n pool = multiprocessing.Pool()\n command = CONVERT_TO_JP2_LOSSY\n if lossless:\n command = CONVERT_TO_JP2_LOSSLESS\n for root, dirs, files in os.walk(self._in_dir):\n out_rel_path = os.path.relpath(root, self._in_dir)\n out_full_path = os.path.abspath(\n os.path.join(self._out_dir, out_rel_path))\n try:\n os.mkdir(out_full_path)\n except OSError:\n # It is not an error for the directory to already exist.\n pass\n for name in files:\n basename = os.path.splitext(name)[0]\n in_file = os.path.join(root, name)\n base_out_file = os.path.join(out_full_path, basename)\n tiff_file = '%s.tif' % base_out_file\n jp2_file = '%s.jp2' % base_out_file\n if self._force or not(os.path.isfile(jp2_file)):\n params = (in_file, tiff_file, jp2_file, command)\n pool.apply_async(self._convert, params,\n callback=self._result_callback)\n elif self._progress:\n self._bar.update(self._bar.currval + 1)\n pool.close()\n pool.join()\n if self._progress:\n self._bar.finish()\n return not(self._has_errors)",
"def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])",
"def make_targets(dir_name, method, *args):\n roots = [\n \"-\".join([str(c) for c in comb])\n for comb in product(*args)\n ]\n\n return [f\"{dir_name}/{method}-{root}\" for root in roots]",
"def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)",
"def create_new_directories_for_filter_reads(in_dir, out_dir):\n for sub_in_dir in get_all_sub_directories(in_dir):\n # make dir if it doesnt exist\n sub_out_dir = os.path.join(out_dir, os.path.basename(sub_in_dir))\n if not os.path.isdir(sub_out_dir):\n os.mkdir(sub_out_dir)\n yield sub_in_dir, sub_out_dir",
"def copy(from_dir: tfds.typing.PathLike, to_dir: tfds.typing.PathLike) -> None:\n for full_name in tfds.core.load.list_full_names():\n from_full_name_dir = os.path.join(from_dir, full_name)\n to_full_name_dir = os.path.join(to_dir, full_name)\n\n # Skip if the dataset isn't generated or that metadata are already copied\n if not tf.io.gfile.exists(from_full_name_dir):\n logging.info('Skipping %s (not found)', from_full_name_dir)\n continue\n if tf.io.gfile.exists(to_full_name_dir) and not FLAGS.overwrite:\n logging.info('Skipping %s (already exists)', to_full_name_dir)\n continue\n\n _copy_metadata(from_dir=from_full_name_dir, to_dir=to_full_name_dir)",
"def convert_all(base_path: Path,\n dest_path: Path,\n count: int,\n max_size: int) -> None:\n os.makedirs(DEST_FOLDER, exist_ok=True)\n os.makedirs(CONVERTED_VIDEOS_FOLDER, exist_ok=True)\n\n processes_count = mp.cpu_count() - 1 or 1\n with mp.Pool(processes_count) as pool:\n pool.starmap(\n convert_file_to_mp4, files(base_path, dest_path, count, max_size)\n )",
"def convert_all(input: str, out: str):\n dateien = listdir(input)\n for datei in dateien:\n out_datei = datei.replace(\" \", \"_\") # Leertasten durch Unterstriche ersetzen\n convert_image(input + datei, out + out_datei)",
"def copy_images_to_new(lab_img, from_dir, to_dir):\n \n for img in lab_img:\n if not os.path.exists(join(todir, img)):\n shutil.copyfile(join(fromdir, img), join(todir, img)) \n print(\"Done\")",
"def copy_files_and_create_dirs(files) -> None:\r\n for file in files:\r\n target_dir_name = os.path.dirname(file[1])\r\n\r\n # will create all intermediate-level directories\r\n if not os.path.exists(target_dir_name):\r\n os.makedirs(target_dir_name)\r\n\r\n shutil.copyfile(file[0], file[1])",
"def run_conversion(\n names, \n folder_in, \n folder_out, \n color_diff=20, \n crop_threshold=0,\n shape=(32, 32, 32),\n interpolate_method=\"linear\",\n threads=1,\n ):\n interpolator = Interpolator(shape=shape, method=interpolate_method)\n def get_args():\n for name in names:\n filename_in = os.path.join(folder_in, name + \".mp4\")\n filename_out = os.path.join(folder_out, name + \".npy\")\n yield (filename_in, filename_out, \n color_diff, crop_threshold, interpolator)\n os.makedirs(folder_out, exist_ok=True)\n count_success = 0\n progress = tqdm(total=len(names))\n if threads == 1:\n for arg in get_args():\n count_success += save_video_to_volume(*arg)\n progress.update()\n else:\n pool = Pool(processes=threads)\n for res in pool.imap_unordered(save_video_to_volume_p, get_args()):\n count_success += res\n progress.update()\n pool.close()\n pool.join()\n progress.close()\n return count_success",
"def to_sources(todos):\n for subtodos in todos.iter_sourced():\n to_path(subtodos, subtodos.get_source())",
"def copy_new_files(self, out_dir, answer_dir, filenames):\n if not answer_dir.exists():\n answer_dir.mkdir(parents=True)\n for filename in filenames:\n fromfile = out_dir / filename\n tofile = answer_dir / filename\n shutil.copyfile(fromfile, tofile)",
"def apply_dart(self):\n shutil.copyfile(self.env['DART_JS_BOOTSTRAP'], self.outdir.make_node('dart.js').abspath())\n for filetype in ['dartfiles','jsfiles','htmlfiles','cssfiles','otherfiles']:\n files = getattr(self, filetype)\n for f in files:\n if f.is_bld():\n outf = self.outdir.make_node(f.path_from(self.path.get_bld()))\n elif f.is_src():\n outf = self.outdir.make_node(f.path_from(self.path.get_src()))\n else:\n raise Exception(\"I don't know what I'm doing anymore.\")\n self.create_task('copytask',f,outf)",
"def applyDir(self,srcDir,destDir,exts): \n for srcFile in os.listdir(srcDir):\n srcExt = os.path.splitext(srcFile)[-1].lower()\n srcPath = os.path.join(srcDir,srcFile)\n destPath = os.path.join(destDir,srcFile)\n if srcExt in exts:\n if not os.path.exists(destDir):\n os.makedirs(destDir)\n shutil.copyfile(srcPath,destPath)\n if self.progress: \n self.cumSize += os.path.getsize(srcPath)\n self.progress(self.cumSize,_('Copying Files...'))\n elif os.path.isdir(srcPath):\n self.applyDir(srcPath,destPath,exts)",
"def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')"
]
| [
"0.7332554",
"0.6948502",
"0.6218275",
"0.595692",
"0.5839352",
"0.5757753",
"0.57038224",
"0.56779176",
"0.5652623",
"0.5578878",
"0.5547265",
"0.54988056",
"0.5482269",
"0.54351574",
"0.53682166",
"0.5311556",
"0.5300187",
"0.529343",
"0.5276933",
"0.5270991",
"0.5238482",
"0.52351344",
"0.5226692",
"0.52135724",
"0.5202185",
"0.51964104",
"0.5187638",
"0.5144876",
"0.5138662",
"0.51233274"
]
| 0.80804676 | 0 |
Get the name of the last directory in the path. | def get_last_dir(path):
head, tail = split(path)
while not tail:
head, tail = split(head)
return tail | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dirname(self, dirpath):\n return dirpath.split('/')[-1]",
"def get_last_part_of_path(path: str) -> str:\n multi_os_path = path.replace(\"\\\\\", \"/\")\n return re.search(\"(?:[^/](?!/))+$\", multi_os_path).group(0)",
"def get_leafname(path):\n\n\tpos = string.rfind(path, os.sep)\n\tif pos != -1:\n\t\treturn path[pos+1:]\n\telse:\n\t\treturn path",
"def get_directory(path):\n return mangle_path(path).rsplit('/',1)[0]",
"def Dirname(self):\n result = self.Copy()\n\n while 1:\n last_directory = posixpath.dirname(result.last.path)\n if last_directory != \"/\" or len(result) <= 1:\n result.last.path = last_directory\n # Make sure to clear the inode information.\n result.last.inode = None\n\n break\n\n result.Pop(-1)\n\n return result",
"def get_last_path(self):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n spec_path = self.data_root_path / folders[0]\n logging.info('Last download folder was %s', spec_path)\n return spec_path",
"def dirname(path):\r\n return split(path)[0]",
"def Dir(path=None):\n global _last_files\n if path:\n _last_files = glob.glob(path)\n if _last_files:\n return os.path.split(_last_files.pop(0))[1] # VB just returns the filename, not full path\n else:\n return \"\"",
"def getDirectoryFilename(path):\n\tfrom os.path import splitext\n\tpath = normalizePath(path)\n\treturn splitext(path)[0]",
"def get_name(path):\n return path.rsplit('/',1)[1]",
"def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]",
"def path_name(self, path):\r\n ind = path.rfind(\"/\") + 1\r\n return (path[:ind], path[ind:])",
"def path_leaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)",
"def getLastPath(self):\n return self.getSection(CFG_GENERAL, CFG_LASTPATH)",
"def pathLeaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)",
"def get_filename(path):\n return path.split('/')[-1]",
"def path_leaf(path):\n\thead, tail = ntpath.split(path)\n\treturn tail or ntpath.basename(head)",
"def basename(path):\r\n return path.replace(\"\\\\\", \"/\").split(\"/\")[-1]",
"def getDirectory(path):\n\tfrom os.path import split\n\tpath = normalizePath(path)\n\treturn split(path)[0]",
"def GcsDirname(path):\n return os.path.dirname(path)",
"def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path",
"def basename(path):\n\n return path.rpartition(\"/\")[2]",
"def dirname(path: str) -> str:\n pass",
"def extract_dir_name(input_file):\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name",
"def dirname(path):\n return os.path.dirname(path)",
"def get_last_time_step(dir):\n\n return str(max([fd for fd in listdir(dir) if fd.isnumeric()]))",
"def lastPath(self, toNative=True):\n return self.paths(toNative=toNative)[-1]",
"def get_dir(path):\n extension = path.suffix\n if extension == '':\n return path\n else:\n return path.parent",
"def getfilename(path):\r\n return path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]",
"def basename(path):\r\n return split(path)[1]"
]
| [
"0.7505416",
"0.74623406",
"0.74319196",
"0.73088986",
"0.7307475",
"0.7292277",
"0.7252292",
"0.7176553",
"0.71378094",
"0.7074937",
"0.7073228",
"0.7056331",
"0.70458376",
"0.70192856",
"0.6981031",
"0.69700545",
"0.69595456",
"0.6891311",
"0.6724386",
"0.67243403",
"0.672319",
"0.67166466",
"0.6715456",
"0.6682695",
"0.66823405",
"0.66424143",
"0.6613437",
"0.6608526",
"0.66081375",
"0.65929615"
]
| 0.83793795 | 0 |
Print the argument list, if messages are enabled. | def msg(*args):
if messages_on:
print(*args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_args():\r\n args = \", \".join(sys.argv)\r\n print(\"pfArgs: \" + args)",
"def print_args():\n for key, value in vars(ARGS).items():\n print(key + ' : ' + str(value))",
"def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)",
"def add_arguments(self, parser):\n parser.add_argument('--print', action='store_true', required=False, help='Print details')",
"def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)",
"def printv(self, *arg):\n if self.verbose:\n print(*arg)",
"def print_msg(*vargs, **kwargs):\n print(*vargs, **kwargs)",
"def print_verbose(args, msg):\n if args.verbose:\n print(msg)",
"def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)",
"def debug(*args):\n for arg in args:\n print(arg, file=stderr)",
"def test():\n v_print(1, \"-vvv Verbose 1 - INFO\")\n v_print(2, \"-vv Verbose 2 - WARN\")\n v_print(3, \"-v Verbose 3 - ERROR\")",
"def pprint(self, level: int, *values):\n if abs(self.max_verbosity - level + 1) < self.verbosity:\n print(*values)",
"def pacman_msg(*args, **kwargs):\n msg = YELLOW + ':: informant: ' + CLEAR\n for arg in args:\n msg += arg\n print(msg, **kwargs)",
"def debug(*text):\n if False:\n # if True:\n print(' '.join(str(t) for t in text))",
"def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)",
"def print_args(args):\n _args = vars(args)\n max_length = max([len(k) for k, _ in _args.items()])\n for k, v in _args.items():\n print(' ' * (max_length - len(k)) + k + ': ' + str(v))",
"def print_commandline_arguments(argument_list: list) -> None:\n if len(argument_list) == 0:\n print('print_commandline_arguments(): Error, empty argument_list passed, exiting.')\n exit(1)\n\n print('\\nScript name: ' + argument_list[0])\n if len(argument_list) == 1:\n print('Command line arguments: [none]')\n else:\n print('Command line arguments: '\n + ' '.join([str(arg) for arg in argument_list[1:]])\n + '\\n')\n return",
"def debug_logger(*args):\n\n for x in args:\n print(repr(x))",
"def PrintOurUsage():\n print 'Stub script %s (auto-generated). Options:' % sys.argv[0]\n print ('--helpstub '\n 'Show help for stub script.')\n print ('--debug_binary '\n 'Run python under debugger specified by --debugger.')\n print ('--debugger=<debugger> '\n \"Debugger for --debug_binary. Default: 'gdb --args'.\")\n print ('--debug_script '\n 'Run wrapped script with python debugger module (pdb).')\n print ('--show_command_and_exit '\n 'Print command which would be executed and exit.')\n print ('These options must appear first in the command line, all others will '\n 'be passed to the wrapped script.')",
"def print_usage(arg_processor):\n min_width = max(map(lambda x : len(x),\\\n arg_processor.program_arg_order + arg_processor.program_flag_order))\n print \"Usage: python {} \".format(sys.argv[0])\n print \"The following flags and arguments can be supplied:\"\n print \"Flags:\"\n for flag in arg_processor.program_flag_order:\n print \" {:<{}} : {}\".format(flag, min_width,\n arg_processor.program_flags[flag].description)\n print \"Arguments:\"\n for arg in arg_processor.program_arg_order:\n if arg_processor.program_args[arg].validator != None:\n advice_str = arg_processor.advice_functions[\\\n arg_processor.program_args[arg].validator](\\\n arg_processor.program_args[arg].validator_args)\n else:\n advice_str = \"\"\n if arg_processor.program_args[arg].needed:\n print \" {:<{}} : {}{}\".format(arg, min_width,\n arg_processor.program_args[arg].description, advice_str)\n else:\n print \" {:<{}} : {}{} [optional, default: {}]\".format(arg,\n min_width, arg_processor.program_args[arg].description,\n advice_str, arg_processor.program_args[arg].default_value)\n sys.exit(0)",
"def say(self, msg, *args):\n print \">>> %s\" % re.sub(re.compile('\\^[0-9]'), '', msg % args).strip()",
"def show_parameters(args):\n\n logging.basicConfig(format='%(message)s', level=args.logging)\n\n logging.info('\\n#{0}'.format('-'*60))\n logging.info('BUILD CONFIG : {0}'.format(args.config))\n logging.info('BUNDLE FILE : {0}'.format(args.bfile))",
"def _print_output(*args):\n for arg in args:\n print(arg)\n print('\\n')",
"def out(*args):\r\n print(*args)",
"def v_print(msg):\n if (VERBOSE == 1):\n print(msg)",
"def print_messages(self):\n if self.messages:\n self.messages.append(\"\")\n sys.stderr.write(os.linesep.join(self.messages))",
"def _PRINT_DEBUG(*args):\n print(sys.stderr, args, file=sys.stderr)",
"def _handle_debug_argument(self, arguments):\n if '--debug' in arguments:\n LOGGER.setLevel(DEBUG)\n LOGGER.debug('Logging of debug messages enabled.')\n return [argument for argument in arguments if argument != '--debug']\n return arguments",
"def log(*argv):\n\tmsg = ''\n\tfor i in argv:\n\t\tmsg += i + ' '\n\tprint(msg)",
"def printer(message):\n if VERBOSITY:\n pprint(message)"
]
| [
"0.6971768",
"0.67458904",
"0.6694167",
"0.6375415",
"0.632234",
"0.6310823",
"0.62469834",
"0.61811984",
"0.6164702",
"0.61581194",
"0.6086061",
"0.6059611",
"0.60574204",
"0.60503286",
"0.6039692",
"0.60293937",
"0.59743184",
"0.59670794",
"0.59340376",
"0.5925582",
"0.5907069",
"0.5901256",
"0.5897251",
"0.5883141",
"0.5881814",
"0.5857513",
"0.5855752",
"0.58547354",
"0.5852206",
"0.585114"
]
| 0.75241137 | 0 |
Verify that the token is mapped with an leave request. | def _check_leave_request(self, cr, uid, request, token, context=None):
holidays_obj = request.registry['hr.holidays']
holidays_ids = holidays_obj.search(cr, uid, [
('token', '=', token)
])
if len(holidays_ids) == 0:
return request.website.render(
"tk_hr_approve_request.leave_request_not_found"
)
_id = holidays_ids[0] if len(holidays_ids) else None
if _id:
leave_request = holidays_obj.browse(
cr, uid, _id, context=context
)
return leave_request | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('refuse')\n if res.state == 'refuse':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_refused\"\n )",
"def leave_request_accept(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('validate')\n if res.state == 'validate':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_accepted\"\n )",
"def verify_token(event):\n if event['token'] != VERIFICATION_TOKEN:\n print('Presented with invalid token - ignoring message...')\n return False\n return True",
"def check_token_invalidate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/invalidate/\"\n return self._lr_object._get_json(url, payload)",
"def verify_token(self, token):\n return False",
"def verify_reset_token(self, token):\n\n expired, invalid, data = self._verify_token(token)\n if data and data.get('id') == self.id and data.get('op') == 'reset':\n data = True\n else:\n data = False\n return expired, invalid, data",
"def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False",
"def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()",
"def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time",
"def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"async def leave(self):\n return await self._state.leave_team(self.id)",
"def check_token(self):\n return config.outlook_token is not None",
"def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True",
"async def validate_token(self, token):",
"def auth_logout(token):\n if verify_token(token):\n return { \"is_success\": True }\n else:\n raise AccessError(description=\"Logout failed. Token is invalid\")",
"def expired(token):\n token = session.query(PasswordRecoveryToken)\\\n .filter(PasswordRecoveryToken.token == token)\\\n .first()\n return token.expiration < datetime.now()",
"def token_is_stale(self):\n return self.m_token_expiry < datetime.datetime.now(tz=pytz.utc)",
"def is_expired(self, token: str) -> bool:\n try:\n decoded_token = jwt.decode(token, options=self._options)\n except jwt.ExpiredSignatureError: # type: ignore\n return True\n else:\n if decoded_token['exp'] - time.time() >= self.renew_buffer:\n # If the token will expire in less than cls._renew_buffer amount of time in seconds, the token is\n # considered expired.\n return True\n else:\n return False",
"def is_token_revoked(decoded_token):\n jti = decoded_token['jti']\n token = BlacklistedToken.query.filter_by(jti=jti).first()\n return token is not None",
"def _check_token_is_revoked(self, jti: str) -> None:\n redis = self._conn_redis()\n entry = redis.get(jti)\n if entry and entry == 'true':\n raise HTTPException(status_code=401,detail=\"Token has been revoked\")",
"def test_validate_token_returns_false_for_invalid_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key_2, algorithm='HS256')\n\n self.assertFalse(\n validate_token(token)[0],\n 'Failed to recognise invalidate token.'\n )",
"def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()",
"def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True",
"def _is_oauth_token_valid(token: dict, time_key=\"expires_on\") -> bool:\n if \"access_token\" not in token or token.get(\"token_type\", \"\") != \"Bearer\" or time_key not in token:\n raise AirflowException(f\"Can't get necessary data from OAuth token: {token}\")\n\n return int(token[time_key]) > (int(time.time()) + TOKEN_REFRESH_LEAD_TIME)",
"def check_token(token):\n token = db.session.query(Token).filter(Token.token==token).first()\n if token == None:\n return False\n #TODO token lifetime\n #if (datetime.datetime.now() - token.date >= datetime.timedelta(day=2)):\n # return False \n return True",
"def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False",
"def leave(self, fsm):\n pass",
"def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")",
"def test_channel_leave_invalid_token():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'First', 'Last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n channel_leave(user['token'], userchannel_id['channel_id'])",
"def test_rejects_expired_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n utils.set_utcnow_for_test(XsrfToolTests.TEST_NOW +\n datetime.timedelta(hours=4, minutes=1))\n self.assertFalse(tool.verify_token(token, 12345, 'test_action'))"
]
| [
"0.6704487",
"0.6437914",
"0.6411729",
"0.6111032",
"0.6050588",
"0.59361327",
"0.5914343",
"0.57987326",
"0.5751445",
"0.57407093",
"0.57377267",
"0.5708885",
"0.56781447",
"0.56609726",
"0.5640214",
"0.56348395",
"0.56014645",
"0.5599922",
"0.5575192",
"0.55741096",
"0.55704963",
"0.5563669",
"0.5562311",
"0.5540957",
"0.55348843",
"0.55268073",
"0.54654855",
"0.54632765",
"0.54549325",
"0.54503405"
]
| 0.674709 | 0 |
Accept the leave request | def leave_request_accept(self, token, **kwargs):
cr, uid, context = self._get_cr_uid_context()
res = self._check_leave_request(
cr, uid, request, token, context=context
)
if isinstance(res, http.Response):
return res
if res:
res.signal_workflow('validate')
if res.state == 'validate':
return request.website.render(
"tk_hr_approve_request.leave_request_accepted"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('refuse')\n if res.state == 'refuse':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_refused\"\n )",
"def _check_leave_request(self, cr, uid, request, token, context=None):\n holidays_obj = request.registry['hr.holidays']\n holidays_ids = holidays_obj.search(cr, uid, [\n ('token', '=', token)\n ])\n\n if len(holidays_ids) == 0:\n return request.website.render(\n \"tk_hr_approve_request.leave_request_not_found\"\n )\n\n _id = holidays_ids[0] if len(holidays_ids) else None\n if _id:\n leave_request = holidays_obj.browse(\n cr, uid, _id, context=context\n )\n return leave_request",
"def leave(self):\n self.pleaseQuit=1",
"def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)",
"def _validate_leave_request(self):\n\t\tfor holiday in self.filtered (lambda request: request.type == 'remove' and request.holiday_type == 'employee'):\n\t\t\tmeeting_values = holiday._prepare_holidays_meeting_values ()\n\t\t\tmeeting = self.env['calendar.event'].with_context (no_mail_to_attendees=True).create (meeting_values)\n\t\t\tholiday.write ({'meeting_id': meeting.id})\n\t\t\tholiday._create_resource_leave ()",
"async def chat_leave(self, event):\n await self.send_json(\n return_value(\n ACTION_WENT_OFFLINE,\n event['label'],\n event['username'],\n MSG_LEAVE,\n NO_MESSAGE\n )\n )",
"def OnLeaveEpisode(self):\n pass",
"def leave(ctx, network):\n return _leave(ctx.obj['client'], network)",
"def on_leave(data):\r\n\r\n username = data['username']\r\n room = data['room']\r\n leave_room(room)\r\n send({\"msg\": username + \" has left the room\"}, room=room)",
"async def leave(ctx, *, check=\"\"):\r\n # if botv.isAdmin(ctx.message.author) and check == \"now, bot\":\r\n # if necessary, save checks can go here; check presently commented out because botv can\r\n # fail to initialize in testing\r\n await bot.say(\"Allan, please add dialogue!\")\r\n quit()",
"def on_leave(self):\n\n self.check_connection.cancel()",
"def leave(self, fsm):\n pass",
"def handle_accept(self):\r\n pass",
"def on_leave(data):\n logger.info(f\"Leaving: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n leave_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")",
"def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None",
"def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)",
"def on_leave(self):\n\n self.date_time.cancel()\n\n try:\n self.listen.cancel()\n except:\n pass\n\n # self.watch.cancel()\n # self.prtsc.cancel()",
"def _onOk(self):\n\n self.accepted = True\n self.close()",
"def _onOk(self):\n\n self.accepted = True\n self.close()",
"def leave(self, *args, **kwargs):\n return self.bot.leave_chat(self.id, *args, **kwargs)",
"async def giveaway(self, ctx):\n\n pass",
"async def giveaway(self, ctx):\n\n pass",
"def on_reject(self):\n self.state = REJECTED\n self._reject()",
"def leaveEvent(self, event):\n self.destroy()",
"async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))",
"def post(self):\n if not arearesilience.imLeader():\n # It's not like I don't want you to send me messages or anything, b-baka!\n return {'deviceID': agentstart.deviceID, 'backupPriority': arearesilience.PRIORITY_ON_FAILURE}, 405\n\n deviceIP = '' if 'deviceIP' not in api.payload else api.payload['deviceIP']\n correct, priority = arearesilience.receive_keepalive(api.payload['deviceID'], deviceIP)\n LOG.debug('Device {} has sent a keepalive. Result correct: {}, Priority: {}, deviceIP: {}'.format(api.payload['deviceID'],correct,priority,deviceIP))\n if correct:\n # Authorized\n return {'deviceID': agentstart.deviceID, 'backupPriority': priority}, 200\n else:\n # Not Authorized\n return {'deviceID': agentstart.deviceID, 'backupPriority': priority}, 403",
"async def leave(self):\n request = self._state.leave_thread(self.id)\n await request",
"def leave_notify_event(self, widget, event):\n self.logger.debug(\"leaving widget...\")\n return self.make_callback('leave')",
"def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None",
"def frameLeave(self):\n try:\n self.contentFrame.currFrame.leave()\n except AttributeError:\n pass"
]
| [
"0.6880239",
"0.6516881",
"0.6185665",
"0.61551434",
"0.6062611",
"0.6054114",
"0.60301316",
"0.5909732",
"0.5860862",
"0.58154184",
"0.5806893",
"0.5784919",
"0.56871283",
"0.5668547",
"0.56303436",
"0.5545764",
"0.5517316",
"0.54911333",
"0.54911333",
"0.5460764",
"0.5457676",
"0.5457676",
"0.54406655",
"0.54218715",
"0.53946024",
"0.5363872",
"0.53627783",
"0.53578115",
"0.5350617",
"0.5332907"
]
| 0.74375314 | 0 |
Refuse the leave request | def leave_request_decline(self, token, **kwargs):
cr, uid, context = self._get_cr_uid_context()
res = self._check_leave_request(
cr, uid, request, token, context=context
)
if isinstance(res, http.Response):
return res
if res:
res.signal_workflow('refuse')
if res.state == 'refuse':
return request.website.render(
"tk_hr_approve_request.leave_request_refused"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leave(self):\n self.pleaseQuit=1",
"def on_leave(self):\n\n self.check_connection.cancel()",
"def leave(self):\n self.__alive = False\n self.__successor = self\n self.__successor_next = self",
"def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None",
"def force_stop(self):\n #cancel any current request:\n self._cancel_current_request()",
"def cog_unload(self):\n self.resend_post.cancel()",
"def on_leave(self):\n\n self.date_time.cancel()\n\n try:\n self.listen.cancel()\n except:\n pass\n\n # self.watch.cancel()\n # self.prtsc.cancel()",
"async def leave(self):\n request = self._state.leave_thread(self.id)\n await request",
"def leave(self, fsm):\n pass",
"def leave(ctx, network):\n return _leave(ctx.obj['client'], network)",
"def leave_request_accept(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('validate')\n if res.state == 'validate':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_accepted\"\n )",
"def stay(self):\n\n pass",
"def lost(self):\r\n return None",
"async def _leave(self, ctx: commands.Context):\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Combat screen.')\n self.updater.cancel() # Clear the event interval.\n self.stop_soundtrack()",
"def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)",
"def kill(self):\n if self.living == True:\n self.living = False\n self.arrow_enter_callback(self)",
"def forget(self, response, request):\n pass",
"def leaveEvent(self, event):\n self.destroy()",
"def unaway(self):\n self.away()",
"def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()",
"def reject_waiting_call(self) -> None:",
"def OnLeaveEpisode(self):\n pass",
"async def giveaway(self, ctx):\n\n pass",
"async def giveaway(self, ctx):\n\n pass",
"def cancel(self):",
"def cancel(self):",
"def cancel(self):",
"def Take_Off_Done(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def on_reject(self):\n self.state = REJECTED\n self._reject()"
]
| [
"0.70730305",
"0.6811757",
"0.6799905",
"0.6524368",
"0.63409483",
"0.63367146",
"0.63284117",
"0.62440395",
"0.6219235",
"0.6180015",
"0.6151843",
"0.614144",
"0.6127388",
"0.61022913",
"0.6079791",
"0.607551",
"0.60644406",
"0.6062947",
"0.60620743",
"0.60315233",
"0.6029595",
"0.6019282",
"0.5994994",
"0.5990462",
"0.5990462",
"0.5983838",
"0.5983838",
"0.5983838",
"0.5975254",
"0.59668213"
]
| 0.7075348 | 0 |
Thinning image using morphological operations | def thinning_morph(image, kernel):
thining_image = np.zeros_like(image)
img = image.copy()
while 1:
erosion = cv.erode(img, kernel, iterations = 1)
dilatate = cv.dilate(erosion, kernel, iterations = 1)
subs_img = np.subtract(img, dilatate)
cv.bitwise_or(thining_image, subs_img, thining_image)
img = erosion.copy()
done = (np.sum(img) == 0)
if done:
break
# shift down and compare one pixel offset
down = np.zeros_like(thining_image)
down[1:-1, :] = thining_image[0:-2, ]
down_mask = np.subtract(down, thining_image)
down_mask[0:-2, :] = down_mask[1:-1, ]
cv.imshow('down', down_mask)
# shift right and compare one pixel offset
left = np.zeros_like(thining_image)
left[:, 1:-1] = thining_image[:, 0:-2]
left_mask = np.subtract(left, thining_image)
left_mask[:, 0:-2] = left_mask[:, 1:-1]
cv.imshow('left', left_mask)
# combine left and down mask
cv.bitwise_or(down_mask, down_mask, thining_image)
output = np.zeros_like(thining_image)
output[thining_image < 250] = 255
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def thinning (input_path, output_path):\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n os.makedirs(output_path)\n img_fn_list = get_images(input_path)\n epsilon = 0.0001\n for img_fn in img_fn_list:\n print('===============')\n print(img_fn)\n start = time.time()\n try:\n img_gray = cv2.imread(img_fn,cv2.IMREAD_GRAYSCALE)\n except:\n print(\"Error reading image {}!\".format(img_fn))\n continue\n # swap the color from black white to white black\n img= cv2.subtract(255, img_gray)\n\n img1 = img.copy()\n # Structuring Element\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))\n # Create an empty output image to hold values\n thin = np.zeros(img.shape,dtype='uint8')\n \n # Loop until erosion leads to an empty set\n while (cv2.countNonZero(img1)!=0):\n # Erosion\n erode = cv2.erode(img1,kernel)\n # Opening on eroded image\n opening = cv2.morphologyEx(erode,cv2.MORPH_OPEN,kernel)\n # Subtract these two\n subset = erode - opening\n # Union of all previous sets\n thin = cv2.bitwise_or(subset,thin)\n # Set the eroded image for next iteration\n img1 = erode.copy()\n cv2.imwrite(os.path.join(output_path, os.path.basename(img_fn)), thin)",
"def normalise(image):",
"def thin(image, n_iter=None):\n return _bwmorph_luts(image, THIN_LUTS, n_iter=n_iter)",
"def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled",
"def process_image(image):\n \n # (step 1) get gray image\n gray = grayscale(image)\n \n # (step 2) do gaussian blur with kernel size is 3\n blur_gray = gaussian_blur(gray, 3)\n \n # (step 3) do canny edge detction with low 50 and hight 150\n canny_edges = canny(blur_gray, 50, 150)\n \n # (step 4) region of interset\n imshape = image.shape\n left_bottom = (50,imshape[0])\n right_bottom = (imshape[1]-50,imshape[0])\n left_top = (420, 330)\n right_top = (imshape[1]-420, 330)\n # used later to discard lines which are out of the ROI\n polygon = Polygon([(50,imshape[0]+1),(imshape[1]-50,imshape[0]+1), (imshape[1]-420, 329), (420, 329)])\n vertices = np.array([[left_bottom,left_top, right_top, right_bottom]], dtype=np.int32)\n masked_edge = region_of_interest(canny_edges, vertices)\n \n # (step 5) get lane lines from hough transform\n rho = 2\n theta = np.pi/18 \n threshold = 15\n min_line_length = 10\n max_line_gap = 20\n lines = hough_lines(masked_edge, rho, theta, threshold, min_line_length, max_line_gap)\n \n # (step 6) seperate left and right lines\n left_lines = []\n right_lines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n if y1 > y2:\n temp_line = [x1,y1,x2,y2]\n if x2 != x1:\n m = (float(y2) - float(y1)) / (float(x2) - float(x1))\n else:\n m = 1000 # it will be dicarded, any high value will work\n temp_line.append(m)\n if x1 < x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n else:\n temp_line = [x2,y2,x1,y1]\n if x2 != x1:\n m = (float(y1) - float(y2)) / (float(x1) - float(x2))\n else:\n m = 1000\n temp_line.append(m)\n if x1 > x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n \n # (step 7) get left and right lines slopes, can be done with step 6 although\n left_slop = []\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; \n if x1 != x2:\n left_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_left_slop = sum(left_slop)/len(left_slop) # not used yet\n \n right_slop = []\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; \n if x1 != x2:\n right_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_right_slope = sum(right_slop)/len(right_slop) # not used yet\n \n \n # (step 8) delete left lines which deviate from thersold_s slope\n thersold_s = 0.4\n delet_left_index = []\n i = 0\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; m = left_line[4]; \n if abs(m) < thersold_s:\n delet_left_index.append(i)\n i=i+1\n for i in range((len(delet_left_index)-1), -1, -1):\n del left_lines[delet_left_index[i]]\n \n # (step 9) delete right lines which deviate from average slope\n delet_index_right = []\n i = 0\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; m = right_line[4]; \n if abs(m) < thersold_s:\n delet_index_right.append(i)\n i=i+1\n for i in range((len(delet_index_right)-1), -1, -1):\n del right_lines[delet_index_right[i]]\n \n # (step 10) extrapolate left and right lines\n left_line_draw = True\n x_lefts = []\n y_lefts = []\n for line in left_lines:\n x1, y1, x2, y2, m = line\n x_lefts.append(x1)\n x_lefts.append(x2) \n y_lefts.append(y1)\n y_lefts.append(y2)\n \n if len(x_lefts) > 0:\n slope_left, c_left = np.polyfit(x_lefts, y_lefts, 1)\n else:\n slope_left, c_left = 1, 1\n left_line_draw = False\n \n right_line_draw = True\n x_rights = []\n y_rights = []\n for line in right_lines:\n x1, y1, x2, y2, m = line\n x_rights.append(x1)\n x_rights.append(x2)\n y_rights.append(y1)\n y_rights.append(y2)\n if len(x_rights) > 0:\n slope_right, c_right = np.polyfit(x_rights, y_rights, 1)\n else:\n slope_right, c_right = 1, 1\n right_line_draw = False\n \n y1_left = 530 # again hardcoded values, from ROI\n y2_left = 330 # again hardcoded values, from ROI\n x1_left = int((y1_left - c_left) / slope_left)\n x2_left = int((y2_left - c_left) / slope_left)\n \n y1_right = 530 # again hardcoded values, from ROI\n y2_right = 330 # again hardcoded values, from ROI \n x1_right = int((y1_right - c_right) / slope_right)\n x2_right = int((y2_right - c_right) / slope_right)\n \n # (step 11) check if left/right line is out of ROI\n left_point1 = Point(x1_left, y1_left)\n left_point2 = Point(x2_left, y2_left)\n \n right_point1 = Point(x1_right, y1_right)\n right_point2 = Point(x2_right, y2_right)\n \n if polygon.contains(left_point1) and polygon.contains(left_point2):\n left_line_draw = True\n else:\n #print (\"left line out\", left_point1, left_point2)\n left_line_draw = False\n \n if polygon.contains(right_point1) and polygon.contains(right_point2):\n right_line_draw = True\n else:\n #print (\"right line out\", right_point1, right_point2)\n right_line_draw = False\n \n \n # (step 12) draw lines\n line_image = np.copy(image)\n # Draw the right and left lines on image\n if left_line_draw:\n cv2.line(line_image, (x1_left, y1_left), (x2_left, y2_left), (255,0,0),5)\n if right_line_draw:\n cv2.line(line_image, (x1_right, y1_right), (x2_right, y2_right), (255,0,0),5)\n \n # Create a \"color\" binary image to combine with line image\n color_edges = np.dstack((masked_edge, masked_edge, masked_edge)) \n \n # Draw the lines on the edge image\n lines_edges = cv2.addWeighted(color_edges, 0.4, line_image, 1, 0) \n #plt.imshow(lines_edges)\n #plt.show()\n return lines_edges",
"def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img",
"def skeletonize(img):\n\n # hat tip to http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/\n\n img = img.copy() # don't clobber original\n skel = img.copy()\n\n skel[:,:] = 0\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))\n\n while True:\n eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel)\n temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel)\n temp = cv2.subtract(img, temp)\n skel = cv2.bitwise_or(skel, temp)\n img[:,:] = eroded[:,:]\n if cv2.countNonZero(img) == 0:\n break\n\n return skel",
"def bin_thres_img(img, ksize=3):\n # Apply each of the thresholding functions\n gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(20, 100))\n grady = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(20, 100))\n\n mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(30, 100))\n dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0.7, 1.3))\n\n hls_binary = hls_select(img, thresh=(170, 255))\n\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | hls_binary == 1] = 1\n return combined",
"def tophat(img, kernel = (5,5)):\n\ttmp = grayscale(img)\n\tk = np.ones(kernel, np.uint8)\n\treturn cv2.morphologyEx(tmp, cv2.MORPH_TOPHAT, k)",
"def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk",
"def thinning(X, TT):\n for T in TT:\n X = elementary_thinning(X, T)\n\n return X",
"def skeletonize_image(image, method=None, dilation=None, binarization=None,\n invert=False):\n # if image is all one single color, return it\n if len(np.unique(image)) == 1:\n return image\n # Dilation also binarizes the image\n mono_image = dilate_image(image, dilation=dilation, invert=invert,\n binarization=binarization) / 255\n with warnings.catch_warnings(record=True):\n warnings.filterwarnings('ignore', category=UserWarning)\n if method == '3d':\n skeleton = morphology.skeletonize_3d(mono_image)\n elif method == 'medial':\n skeleton = morphology.medial_axis(mono_image,\n return_distance=False)\n elif method == 'thin':\n skeleton = morphology.thin(mono_image)\n elif method == 'combined':\n skeleton = (morphology.skeletonize_3d(mono_image)\n | morphology.medial_axis(mono_image,\n return_distance=False)\n | morphology.skeletonize(mono_image))\n else:\n skeleton = morphology.skeletonize(mono_image)\n return convert(skeleton)",
"def process(self):\n kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))\n # np.ones((5, 5), np.uint8)\n # self.output_image = cv.morphologyEx(self.output_image, cv.MORPH_OPEN, kernel, iterations=1)\n self.output_image = cv.morphologyEx(self.output_image, cv.MORPH_GRADIENT, kernel, iterations=1)\n self.output_image = cv.morphologyEx(self.output_image, cv.MORPH_CLOSE, kernel, iterations=3)\n return self.output_image",
"def inpaint_hybrid(img, threshold=175, min_size=64, boundary_radius=10):\n\n glare = mask_glare(img, threshold=threshold, mask_only=True)\n\n glare_inside = dilate_boundary(glare, mask=img.mask,\n radius=boundary_radius).filled(0)\n\n large_glare = remove_small_objects(glare_inside, min_size=min_size,\n connectivity=2)\n small_glare = np.logical_and(glare, np.invert(large_glare))\n\n # inpaint smaller and less important values with less expensive method\n inpainted = inpaint_with_boundary_median(img, mask=small_glare)\n hybrid = img_as_float(inpainted) # scale 0 to 1\n\n # inpaint larger regions with biharmonic inpainting\n large_inpainted = inpaint_biharmonic(img.filled(0), mask=large_glare)\n\n # now overwrite with these values\n hybrid[large_glare] = large_inpainted[large_glare]\n\n # put on old image mask\n return ma.masked_array(hybrid, mask=img.mask)",
"def img_preprocess_core(img_gray_orig):\n \n\timg_flat = img_gray_orig.reshape(img_gray_orig.shape[0] *\n\t\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\t \n\tkmeans_labels = image_segmentain(img_flat)\n\n\tkmeans_labels_arr = kmeans_labels.reshape(img_gray_orig.shape[0],\n\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\n\tjust_bone, mask_img = image_mask (kmeans_labels, img_gray_orig)\n\t \n\timg_clean_background = mask_img * img_gray_orig\n\n\timg_just_bone = img_clean_background[min(just_bone[0]):\n\t\t\t\t\tmax(just_bone[0]),min(just_bone[1]):\n\t\t\t\t\tmax(just_bone[1])]\n\t\n\treturn img_just_bone",
"def transform_image_mnist(gray, target_size = (28, 28)):\n # gray\n gray = cv2.cvtColor(gray, cv2.COLOR_RGB2GRAY)\n _save_img_file(\"outputs/test_1_gray.png\", gray)\n\n # invert\n gray = 255-gray\n _save_img_file(\"outputs/test_1_gray_invert.png\", gray)\n \n # rescale it\n gray = cv2.resize(gray, target_size)\n _save_img_file('outputs/test_2_rescale.png',gray)\n\n # better black and white version\n gray = threshold(gray, \"mean\")\n _save_img_file('outputs/test_3_thresh.png',gray)\n\n while np.sum(gray[0]) == 0:\n gray = gray[1:]\n\n while np.sum(gray[:,0]) == 0:\n gray = np.delete(gray,0,1)\n\n while np.sum(gray[-1]) == 0:\n gray = gray[:-1]\n\n while np.sum(gray[:,-1]) == 0:\n gray = np.delete(gray,-1,1)\n\n _save_img_file('outputs/test_4.png',gray)\n #print(gray.shape)\n rows,cols = gray.shape\n\n if rows > cols:\n factor = 20.0/rows\n rows = 20\n cols = int(round(cols * factor))\n # first cols than rows\n gray = cv2.resize(gray, (cols, rows))\n else:\n factor = 20.0/cols\n cols = 20\n rows = int(round(rows * factor))\n # first cols than rows\n gray = cv2.resize(gray, (cols, rows))\n\n colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))\n rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))\n gray = np.lib.pad(gray,(rowsPadding,colsPadding),'constant')\n _save_img_file('outputs/test_5.png',gray)\n\n shiftx, shifty = getBestShift(gray)\n shifted = shift(gray, shiftx, shifty)\n gray = shifted\n \n _save_img_file('outputs/test_final.png',gray)\n\n return gray",
"def straightenImage(im, imextent, mvx=1, mvy=None, verbose=0, interpolation=cv2_interpolation):\n if cv2 is None:\n raise Exception('opencv is not installed, method straightenImage is not available')\n\n dxmv = imextent[1] - imextent[0]\n dymv = imextent[3] - imextent[2]\n\n dx = im.shape[1]\n dy = im.shape[0]\n mvx0 = dxmv / float(dx - 1) # mv/pixel\n mvy0 = dymv / float(dy - 1)\n\n if mvy is None:\n mvy = mvx\n\n fw = np.abs((float(mvx0) / mvx))\n fh = np.abs((float(mvy0) / mvy))\n\n if fw < .5:\n fwx = fw\n fac = 1\n ims = im\n while (fwx < .5):\n ims = cv2.resize(\n ims, None, fx=.5, fy=1, interpolation=cv2.INTER_LINEAR)\n fwx *= 2\n fac *= 2\n ims = cv2.resize(\n ims, None, fx=fac * fw, fy=fh, interpolation=interpolation)\n else:\n ims = cv2.resize(im, None, fx=fw, fy=fh, interpolation=interpolation)\n\n if verbose:\n print('straightenImage: size %s fx %.4f fy %.4f' % (im.shape, fw, fh))\n print('straightenImage: result size %s mvx %.4f mvy %.4f' % (ims.shape, mvx, mvy))\n\n H = pgeometry.pg_transl2H([-.5, -.5]) .dot(np.diag([fw, fh, 1]).dot(pgeometry.pg_transl2H([.5, .5])))\n\n return ims, (fw, fh, mvx, mvy, H)",
"def transform_images(img1,img2):",
"def applyMorphologicalCleaning(self, image):",
"def cv2_skeletonize(img):\n img = img.copy() # don't clobber original\n skel = img.copy()\n\n skel[:,:] = 0\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))\n while True:\n eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel)\n temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel)\n temp = cv2.subtract(img, temp)\n skel = cv2.bitwise_or(skel, temp)\n img[:,:] = eroded[:,:]\n if cv2.countNonZero(img) == 0:\n break\n return skel",
"def watershed_segment(M,xM=None,yM=None):\n\n if xM != None and yM != None:\n sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening\n sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding\n sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion\n ma,mi =(44245.21*xM*yM),(316.037*xM*yM) \n else:\n selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])\n selD = np.where(selD!=0,selD,1)\n \n sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])\n sel2D = np.where(sel2D!=0,sel2D,1)\n\n sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])\n sel3D = np.where(sel3D!=0,sel3D,1)\n\n\n sel = np.ones(selD) # for opening\n sel2 = np.ones(sel2D) # for local thresholding\n sel3 = np.ones(sel3D) # for erosion\n ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)\n\n # get a few points in the center of each blob\n \n # threshold\n bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))\n #& (M>=stats.scoreatpercentile(M.flatten(),80)))\n\n # open and erode\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)\n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n #M2 = rescaled(M,0,65000).astype(np.uint16)\n #newlabels = ndi.watershed_ift(M2,labels)\n newlabels = labels\n \n # get rid of groups unless they have the right number of pixels\n\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts)) \n old2new[(counts < int(mi)) | (counts > int(ma))] = 0\n newlabels = old2new[newlabels]\n\n return newlabels",
"def make_bw(im, th=150):\n im_gray = np.mean(im, axis=2)\n im_binary = im_gray > th\n boolean_to_numbers = lambda b: 1 if b else -1\n v_boolean_to_numbers = np.vectorize(boolean_to_numbers)\n return v_boolean_to_numbers(im_binary)",
"def image_preprocessing(image):\n\treturn cv2.GaussianBlur(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (5,5), 0)",
"def khan_tophat_morphology(img,mask = None,debugOption = 'off'):\n \n #Previous version.\n #selm = morphology.disk(8)\n #closedImg = morphology.closing(tempTophat1, selm)\n #closeOpenImg = morphology.closing(closedImg, selm)\n strel1 = np.array([[0,0,1,1,1,1,1,1,1,1,1,0,0],\n [0,1,1,1,1,1,1,1,1,1,1,1,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [0,1,1,1,1,1,1,1,1,1,1,1,0],\n [0,0,1,1,1,1,1,1,1,1,1,0,0]],dtype='uint8')\n\n strel2 = np.array([[0,0,1,1,1,1,1,0,0],\n [0,1,1,1,1,1,1,1,0],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [0,1,1,1,1,1,1,1,0],\n [0,0,1,1,1,1,1,0,0]],dtype='uint8')\n \n tempTophat1 = img\n closedImg = cv2.morphologyEx(tempTophat1,cv2.MORPH_CLOSE,strel1)\n closeOpenImg = cv2.morphologyEx(closedImg,cv2.MORPH_CLOSE,strel1)\n temp1 = closeOpenImg < 0\n topHatMdfd= np.subtract(closeOpenImg,self.claheImg)\n\n temp2 = topHatMdfd <0\n \n if mask != None:\n tempMaskImg = mask\n tempRow,tempCol = tempMaskImg.shape\n\n for row in range(tempRow):\n for col in range(tempCol):\n if tempMaskImg[row,col] > 0:\n tempMaskImg[row,col] = 1\n else:\n tempMaskImg[row,col] = 0\n\n tempMaskImg = tempMaskImg.astype('uint8')\n maskImg = morphology.erosion(tempMaskImg,self.strucEle2) \n result = np.multiply(topHatMdfd,maskImg)\n \n\n else:\n result = topHatMdfd\n \n\n #Debugging\n if defaultoptions == 'on'\n print(\"tempTophat1 : {} closeOpenImg : {} topHatMdfd : {} \".format(tempTophat1.dtype,closeOpenImg.dtype,topHatMdfd.dtype))\n print(\"result : {} \".format(result.dtype))\n print(\"tempMaskImg : {} maskStruc : {} maskImg : {} \".format(tempMaskImg.dtype,maskStruc.dtype,maskImg.dtype))\n \n plt.axis(\"off\")\n plt.title('tophat')\n plt.imshow(result,cmap='gray')\n plt.show()\n\n return result",
"def binarize(img, s_thres=(170, 255), l_thres=(50, 255), sobel_thres=(30, 80)):\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n hls[:, :, 1] = clahe.apply(hls[:, :, 1])\n\n l_image = hls[:, :, 1]\n l_blur = cv2.GaussianBlur(l_image, (0, 0), 9)\n l_image = cv2.addWeighted(l_image, 1, l_blur, -1, 0)\n l_image = cv2.normalize(l_image, np.zeros_like(l_image), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n l_binary = np.zeros_like(l_image)\n l_binary[(l_image >= l_thres[0]) & (l_image <= l_thres[1])] = 1\n\n # Sobel x\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # gray = hls[:, :, 1]\n # sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n # abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n # scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n # sxbinary = np.zeros_like(scaled_sobel)\n # sxbinary[(scaled_sobel >= sobel_thres[0]) & (scaled_sobel <= sobel_thres[1])] = 1\n # sxbinary = s_binary\n\n s_channel = hls[:, :, 2]\n s_channel = cv2.normalize(s_channel, np.zeros_like(s_channel), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thres[0]) & (s_channel <= s_thres[1])] = 1\n\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(s_binary)\n combined_binary[(s_binary == 1) | (l_binary == 1)] = 1\n\n # we filter out the lines with too many active pixels\n combined_binary_rows = combined_binary.sum(1)\n combined_binary[combined_binary_rows > (combined_binary.shape[1] / 2)] = 0\n\n return combined_binary",
"def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))",
"def threshold_image(self, image, ksize, sobel_thresh, mag_thresh, dir_thresh, s_thresh, l_thresh, b_thresh):\n # Note: Magnitude and direction thresholds were not needed for the project. Probably they are for the challenges\n # For Sobel, the light channel will be used\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float)\n l_channel = hls[:, :, 1]\n # Sobel x\n sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=ksize) # Take the derivative in x\n abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n\n # Threshold x gradient\n gradient_binary = np.zeros_like(scaled_sobel)\n gradient_binary[(scaled_sobel >= sobel_thresh[0]) & (scaled_sobel <= sobel_thresh[1])] = 1\n\n s_binary, l_binary = hls_filter(image, s_thresh, l_thresh)\n l_color_channel = lab_filter(image, b_thresh)\n binary = np.zeros_like(gradient_binary)\n binary[((l_binary == 1) | (l_color_channel == 1))] = 1\n binary = 255 * np.dstack((binary, binary, binary)).astype('uint8')\n images = [\n [{'title': 'Original', 'data': image},\n {'title': 'Full Combined', 'data': binary}\n ]\n ]\n title = 'Kernel = {}; sobel = {}, mag = {}, dir = {}, s_filter = {}, l_filter = {}' \\\n .format(ksize, sobel_thresh, mag_thresh, dir_thresh, s_thresh, l_thresh)\n if self.args.is_test:\n self.image_logger.plot_results(images, title)\n\n return binary",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def morph (op,im,eesize):\n\tif (op == 'closed'):\n\t\timClosed = im.copy()\n\t\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(eesize,eesize))\n\t\timClosed = cv2.morphologyEx(imClosed, cv2.MORPH_CLOSE, se)\n\t\treturn imClosed\n\telif (op == 'open'):\n\t\timOpen = im.copy()\n\t\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(eesize,eesize))\n\t\timOpen = cv2.morphologyEx(imOpen, cv2.MORPH_OPEN, se)\n\t\treturn imOpen\n\telif (op == 'tophat'):\n\t\tth = im.copy()\n\t\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(eesize,eesize))\n\t\tth = cv2.morphologyEx(th, cv2.MORPH_TOPHAT, se)\n\t\treturn th\n\telif (op == 'dilate'):\n\t\tdil = im.copy()\n\t\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(eesize,eesize))\n\t\tdil = cv2.dilate((dil *1.0).astype(np.float32),se)\n\t\treturn dil\n\telif (op == 'erode'):\n\t\ter = im.copy()\n\t\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(eesize,eesize))\n\t\ter = cv2.erode((er *1.0).astype(np.float32),se)\n\t\treturn er",
"def preprocess(image):\n return image - MEAN_PIXEL"
]
| [
"0.7514813",
"0.65897703",
"0.6360056",
"0.6171215",
"0.6054021",
"0.60341424",
"0.5943268",
"0.5903157",
"0.5851343",
"0.5834294",
"0.58272",
"0.5822836",
"0.5783079",
"0.57600325",
"0.57185334",
"0.57183504",
"0.57124734",
"0.5712307",
"0.5708888",
"0.57082224",
"0.57026327",
"0.5695759",
"0.5671589",
"0.56706184",
"0.5661745",
"0.56528246",
"0.56234956",
"0.5622249",
"0.5616362",
"0.56056863"
]
| 0.69837624 | 1 |
return the document viewer | def viewer(self):
return self.parent | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getViewer(self, v_id=-1) -> Viewer:\n viewer_ctrl = self.getViewerController(v_id)\n if viewer_ctrl is None:\n return None\n return viewer_ctrl.view",
"def getSceneviewer(self):\n return self._sceneviewer",
"def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())",
"def get_viewer(self, viewer_reference):\n return self._viewer_by_reference(viewer_reference)",
"def document(self):\n return self.parent.document()",
"def _get_viewer_container(self):\n self.viewer = self.traj._tempmol.draw3d(style='licorice')\n return self.viewer",
"def get_document(self):\n return self.document",
"def GetDocument(self):\n return self.file",
"def _viewer_by_reference(self, reference):\n viewer_item = self._viewer_item_by_reference(reference)\n\n return self._viewer_store[viewer_item['id']]",
"def get_doc(self):\n return self.p",
"def getCurrentDocument(self):\n tabId = self.tab.currentIndex()\n if tabId == -1:\n return None\n \n currentDocument = self.tab.widget(tabId)\n return currentDocument",
"def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)",
"def document(self):\n return self._modelPart.document()",
"def preview(self):\n if self._preview is None:\n self._preview = self.build_preview()\n return self._preview",
"def documents(self):\r\n return GlobalDocuments(self)",
"def _viewer_by_id(self, vid):\n return self._viewer_store.get(vid)",
"def current_document(self):\n return self.current_buffer.document",
"def open_document(filepath, show=True):\n\t\n\tk = krita.Krita.instance()\n\tprint('Debug: opening %s' % filepath)\n\tdoc = k.openDocument(filepath)\n\tif show:\n\t\tApplication.activeWindow().addView(doc)\n\treturn doc",
"def GetDocManager(self):\r\n return self._docManager",
"def get_doc(self):\n return self._doc",
"def getDocument(self, *args):\n return _libsbml.SBMLConverter_getDocument(self, *args)",
"def get_view(self):\n return self.view",
"def set_docviewer(self, docviewer):\r\n self.shell.docviewer = docviewer",
"def View(self):\n return self._view",
"def _getForDocument (self):\n return self.__forDocument",
"def document_view(self, doc_type, view, path, is_zip=False, **kwargs):\n return self.get('fileops/documentView', api='CONV', params={\n 'root': self.root,\n 'path': path,\n 'type': doc_type,\n 'view': view,\n 'zip': 1 if is_zip else 0,\n }, **kwargs)",
"def get_viewer(self, v_id):\n try:\n return self.viewers[v_id]\n except KeyError:\n pass\n\n viewer = ImageViewCanvas(self.logger)\n\n # customize this viewer\n viewer.enable_autocuts('on')\n viewer.set_autocut_params('zscale')\n viewer.enable_autozoom('on')\n viewer.set_zoom_algorithm('rate')\n viewer.set_zoomrate(1.4)\n viewer.show_pan_mark(True)\n viewer.enable_draw(False)\n viewer.set_bg(0.2, 0.2, 0.2)\n viewer.ui_setActive(True)\n\n bd = viewer.get_bindings()\n bd.enable_all(True)\n\n # add a canvas that we can draw on\n DrawingCanvas = viewer.getDrawClass('drawingcanvas')\n canvas = DrawingCanvas()\n canvas.enable_draw(True)\n canvas.enable_edit(True)\n canvas.set_drawtype('rectangle', color='lightblue', fill=True,\n fillcolor='green', fillalpha=0.3)\n canvas.setSurface(viewer)\n # add canvas to view\n viewer.add(canvas)\n canvas.ui_setActive(True)\n self.viewers[v_id] = viewer\n return viewer",
"def view_document(self, database, collection, _id):\n r = self.__get_response(settings.VIW_DOC,\n {\"db\": database, \"col\": collection, \"id\": str(_id)})\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])",
"def preview(self):\n toplevel = self._get_toplevel()\n if toplevel is not None:\n toplevel.preview(refresh=True)",
"def GetView(self):\r\n return self.model.GetView()"
]
| [
"0.69375134",
"0.69163793",
"0.6853111",
"0.67617315",
"0.6756576",
"0.6754555",
"0.6550173",
"0.6465868",
"0.6302576",
"0.6270481",
"0.62411624",
"0.6146605",
"0.6146425",
"0.60257083",
"0.6004018",
"0.59994894",
"0.59861445",
"0.5976136",
"0.59636796",
"0.59499943",
"0.5945236",
"0.5940673",
"0.5926921",
"0.591813",
"0.5914476",
"0.5896334",
"0.5857842",
"0.5851651",
"0.584769",
"0.5836916"
]
| 0.7425443 | 0 |
On total lines changed | def updateTotalLines(self):
self.viewer().TotalLinesChanged.emit( self.editor().lines() ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _dig_line_count_changed(self, text):\n self._setup_table_digital()",
"def _changedlines(self, changedlines):\n self.changedlines = changedlines\n self.before = self.context\n self.context = []",
"def update(self, line):",
"def cb_update_line_numbers(data, signal, signal_data):\n weechat.hook_timer(10, 0, 1, \"cb_timer_update_line_numbers\", \"\")\n return weechat.WEECHAT_RC_OK",
"def line_counter(self, event=None):\n try:\n text_area = self.get_current()\n self.canvas.delete('all')\n i = text_area.index(\"@0,0\")\n while True:\n dline = text_area.dlineinfo(i)\n if dline is None: break\n y = dline[1]\n linenum = str(i).split(\".\")[0]\n self.canvas.create_text(10, y + 28, anchor=\"w\", text=linenum,\n font=self.lineFont, width=0)\n text_length = self.canvas.bbox('all') # returns a tuple in the form of (x1, y1, x2, y2)\n width = text_length[2] - text_length[0] # x2-x1\n self.canvas.config(width=width + 15)\n i = text_area.index(\"%s+1line\" % i)\n # print(self.cursor_pos.cget('pady'), self.statusbar_frame.cget('pady'), )\n except:\n self.canvas.delete('all')",
"def changed_lines(self):\n return self._depot_tools_affected_file.ChangedContents()",
"def test_line_counts(self):\n diff = (\n b'+ This is some line before the change\\n'\n b'- And another line\\n'\n b'Index: foo\\n'\n b'- One last.\\n'\n b'--- README 123\\n'\n b'+++ README (new)\\n'\n b'@@ -1,1 +1,1 @@\\n'\n b'-blah blah\\n'\n b'-blah\\n'\n b'+blah!\\n'\n b'-blah...\\n'\n b'+blah?\\n'\n b'-blah!\\n'\n b'+blah?!\\n')\n files = DiffParser(diff).parse()\n\n self.assertEqual(len(files), 1)\n self.assertEqual(files[0].insert_count, 3)\n self.assertEqual(files[0].delete_count, 4)",
"def lin_log_changed(self):\n self.model.notifyObservers()",
"def OnChanged(self, evt):\n if self._line_num:\n # Adjust line number margin width to expand as needed when line\n # number width over fills the area.\n lines = self.GetLineCount()\n mwidth = self.GetTextExtent(str(lines))[0]\n\n adj = 8\n if wx.Platform == '__WXMAC__':\n adj = 2\n\n nwidth = max(15, mwidth + adj)\n if self.GetMarginWidth(NUM_MARGIN) != nwidth:\n self.SetMarginWidth(NUM_MARGIN, nwidth)\n\n wx.PostEvent(self.GetParent(), evt)\n ed_msg.PostMessage(ed_msg.EDMSG_UI_STC_CHANGED, context=self)",
"def estimate_lines(self):\r\n logger.debug(\"estimate Lines\")\r\n self.filesize = Path(self.fileName).stat().st_size\r\n text = self.textwnd.toPlainText()\r\n linetext = text.split(\"\\n\")[1] + \"\\\\r\\\\n\"\r\n self.linesize = len(linetext.encode('utf-8'))\r\n self.estimated_lines = self.filesize // self.linesize\r\n logger.debug(\"Estimate Lines: {}\".format(self.estimated_lines))\r\n self.statusBar.showMessage(f\"Estimated lines: {self.estimated_lines}\")",
"def cb_timer_update_line_numbers(data, remaining_calls):\n weechat.bar_item_update(\"line_numbers\")\n return weechat.WEECHAT_RC_OK",
"def line(self) -> int:",
"def changed(self):\n\t\tpass",
"def changed_in_diff(diff: PatchedFile, line_n: int):\n for hunk in diff:\n hunk: Hunk\n for line_change in hunk:\n line_change: Line\n if line_change.is_added and line_change.target_line_no == line_n:\n return True\n return False",
"def _update_cmd_counter(self) -> None:\n if self._docs_processed == self._upper_bound:\n msg = 'Processing: document {} of {}'\n print(msg.format(self._docs_processed, self._upper_bound))\n else:\n msg = 'Processing: document {} of {}\\r'\n print(msg.format(self._docs_processed, self._upper_bound),\n end='\\r')",
"def number_of_loc_changes(self) -> int:\n raise NotImplementedError('not implemented')",
"def callback(self, filename, lines):\n return True",
"def increment_lines(self, d):\n self.lines += d\n styled_set_label_text(self.lines_display, \"Lines: \"+str(self.lines))",
"def check_Lines(self):\n\n pass",
"def done_output(self, changed: black.Changed) -> None:\n if changed is black.Changed.YES:\n self.output_change_count += 1\n else:\n self.output_same_count += 1",
"def _update_cmd_counter(self) -> None:\n msg = '{} documents processed\\r'\n print(msg.format(self._docs_processed), end='\\r')",
"def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line",
"def onCursorPositionChanged (self , ln, col):\n self.viewer().CursorPositionChanged.emit( ln, col )",
"def onUpdated(self):",
"def getChanges():",
"def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)",
"def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)",
"def linecounter(x):\n return linecount(x) + longlines(x)",
"def analyzeChanges(self, program: ghidra.program.model.listing.Program) -> None:\n ..."
]
| [
"0.7203833",
"0.6602556",
"0.6500687",
"0.6306492",
"0.61856705",
"0.6170643",
"0.6146898",
"0.6140344",
"0.6085471",
"0.6069593",
"0.6003475",
"0.5908536",
"0.5886867",
"0.58802915",
"0.583377",
"0.5828164",
"0.5817332",
"0.5803678",
"0.5741449",
"0.5721413",
"0.56738824",
"0.5669055",
"0.5661511",
"0.5647671",
"0.5647172",
"0.5645717",
"0.5639811",
"0.562326",
"0.56160367",
"0.5611646"
]
| 0.7786801 | 0 |
Emit signal from parent to update the position of the cursor | def onCursorPositionChanged (self , ln, col):
self.viewer().CursorPositionChanged.emit( ln, col ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mousePressEvent(self, event):\n self.begin = event.pos()\n self.end = event.pos()\n self.update()",
"def mouseMoveEvent(self, event):\n self.end = event.pos()\n self.update()",
"def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos",
"def _(event):\n system_line.cursor_left()",
"def position_changed(self, position):\n pass",
"def mousePosition(self):",
"def _onmove(self, event):",
"def change_cursor(self, cursor):\n self.setCursor(cursor)",
"def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')",
"def on_position_change(self) -> None:\n pass",
"def _motion(self, event):\n if self.current:\n # modify the current line by changing the end coordinates\n # to be the current mouse position\n coords = event.widget.coords(self.current)\n coords[2] = event.x\n coords[3] = event.y\n\n event.widget.coords(self.current, *coords)",
"def append_cursor_pos_callback(self, callbacked, *args, **kwargs):\n pass",
"def _(event):\n system_line.cursor_right()",
"def cursorPositionChanged(self):\r\n cursor = self.text_area.textCursor()\r\n line_no = cursor.blockNumber()\r\n col_no = cursor.columnNumber()\r\n self.statusBar.showMessage(\"Line \"+str(line_no)+\", Column \"+str(col_no))",
"def onCursorChanged(self, view):\n if not self.settingCursor:\n row = view.get_cursor()[0]\n i = self.model.get_iter(row)\n event = self.model.get(i, 9)[0]\n self.notifyHilightChanged(event)",
"def append_cursor_enter_callback(self):",
"def setLinescanPos(self, point):\n self.lineHorizontal.setPos(0, point.y())\n self.lineVertical.setPos(point.x(), 0)\n self.handle.setPos(point)\n self.emitter.signal.emit()\n self.update()",
"def cursor_cb(self, scene_pos):\n if self.is_within_image(scene_pos):\n pos = self.vb_image.mapSceneToView(scene_pos)\n\n self.cursor_v.setPos(pos)\n self.cursor_h.setPos(pos)\n self.cursor_text.setText(\n \"({:.1f}, {:.1f}) px\".format(pos.x(), pos.y()))\n if self._mark is not None:\n delta = pos - self._mark\n self.cursor_delta.setPos(pos)\n self.cursor_delta.setText(\n \"Δ = ({:.1f}, {:.1f}) μm\".format(\n self.px_to_um(delta.x()), self.px_to_um(delta.y())))\n\n self.cursor_v.show()\n self.cursor_h.show()\n self.cursor_text.show()\n self.cursor_delta.show()\n\n elif self.is_within_zoom(scene_pos):\n pos = self.vb_zoom.mapSceneToView(scene_pos)\n\n if self._up is not None:\n self.zoom_text.setPos(pos)\n self.zoom_text.setText(\"I = {:.0f}\".format(\n self.zoom.image[int(pos.x()), int(pos.y())]))\n self.zoom_text.show()\n\n elif self.is_within_residuals(scene_pos):\n pos = self.vb_residuals.mapSceneToView(scene_pos)\n\n if self._up is not None:\n self.residuals_text.setPos(pos)\n self.residuals_text.setText(\"r = {:.2f}\".format(\n self.residuals.image[int(pos.x()),int(pos.y())]))\n self.residuals_text.show()\n\n else:\n for w in [self.cursor_v, self.cursor_h,\n self.cursor_text, self.cursor_delta,\n self.zoom_text, self.residuals_text]:\n w.hide()",
"def cursor(self, cursor):\n\n self._cursor = cursor",
"def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos",
"def __master_cursor_pos_callback(self, glfw_window, xpos, ypos):\n # flip glfw window space to match OGL space(like texture that has bottom left origin)\n ypos = self.window.glyph.size[1] - ypos\n\n # update values\n self.__pos_instant = Vec(xpos, ypos, 0)\n self.__accel = self.__pos_instant - self.__pos_prev\n self.__pos_prev = self.__pos_instant\n\n # call registered callbacks\n self.call_cursor_pos_callback(glfw_window, *self.__pos_instant.xy, mouse=self)",
"def _pos_changed(self, timestamp=None, value=None, **kwargs):\n self._set_position(value)",
"def _update_cursor(self) -> None:\n # get the brush size (get a local reference in case another process\n # changes it between the different accesses in this method)\n brush_size = self.brush_size\n # if there is not update, return\n if not self.is_cursor_change:\n return\n # otherwise dequeue the update\n self.is_cursor_change = False\n # make a static border ring for the cursor\n ring = make_ring(brush_size - 1, brush_size)\n cursor = make_cursor(ring, self._brush_border_color)\n # make a circle with the current color\n brush_circle = make_circle(brush_size) - ring\n cursor = cursor + make_cursor(brush_circle, self._color)\n # create the pyglet cursor object and set it\n mouse = pyglet_cursor(cursor)\n self._view.set_cursor(mouse)",
"def mousePressEvent(self, event):\n self.dragging = True\n self.moved = False\n self.parent.setCursor(QtCore.Qt.ClosedHandCursor)",
"def mousePositionRaw(self):",
"def mousePositionRaw(self):",
"def mouseMoveEvent(self, event):\n self.setCursor(qtc.Qt.SizeVerCursor)\n\n multiplier = self.singleStep()\n valueOffset = ((self.mouseStartPosY - event.pos().y()) * multiplier)\n value = self.startValue + valueOffset\n\n if value != self.current_value:\n self.current_value = value\n self.setValue(self.current_value)",
"def onUp():\n currentIndex = selector.currentRow()\n if currentIndex != 0:\n selector.blockSignals(True)\n currentItem = selector.takeItem(currentIndex)\n selector.insertItem(currentIndex - 1, currentItem)\n selector.setCurrentRow(currentIndex - 1)\n selector.blockSignals(False)\n position = []\n for index in range(selector.count()):\n position.append(selector.item(index).data(32))\n p.SetString(\"Position\", \",\".join(position))\n onItemChanged()",
"def onButtonPress(self, event):\n\n if event.xdata and event.ydata:\n self.emit(QtCore.SIGNAL(\"positionSelected(float, float)\"),\n float(event.xdata), float(event.ydata))",
"def update(self, pos):\n\t\tpass"
]
| [
"0.7106103",
"0.65444225",
"0.6542953",
"0.6539708",
"0.6421766",
"0.63098747",
"0.6277237",
"0.6269175",
"0.62403893",
"0.6239031",
"0.62021893",
"0.6195149",
"0.6182977",
"0.6173183",
"0.6135628",
"0.6033672",
"0.5999649",
"0.5986024",
"0.5984097",
"0.59757787",
"0.59611",
"0.5960903",
"0.5959597",
"0.59531224",
"0.59504914",
"0.59504914",
"0.593061",
"0.5924365",
"0.59240884",
"0.5917538"
]
| 0.6663372 | 1 |
Active or deactivate the lines numbering | def setLinesNumbering (self, visible):
if visible:
self.srcEditor.setMarginLineNumbers(1, visible)
self.srcEditor.onLinesChanged()
else:
self.srcEditor.setMarginLineNumbers(1, visible)
self.srcEditor.setMarginWidth(1, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggleLineNumbering(self, checked):\n self.lineNumbering = checked\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n # bypass the welcome page\n if isinstance(doc, WelcomePage):\n continue\n # end of bypass\n if doc.extension != TestPlan.TYPE and doc.extension != TestPlan.TYPE_GLOBAL and doc.extension != TestConfig.TYPE:\n doc.setLinesNumbering(checked)",
"def ToggleLineNumbers(self, switch=None):\n if (switch is None and \\\n not self.GetMarginWidth(NUM_MARGIN)) or switch:\n self.EnableLineNumbers(True)\n else:\n self.EnableLineNumbers(False)",
"def EnableLineNumbers(self, enable=True):\n if enable:\n self.SetMarginWidth(NUM_MARGIN, 30)\n else:\n self.SetMarginWidth(NUM_MARGIN, 0)\n self._line_num = enable",
"def _toggle_indicator(self, lineno: int =0, columno: int =0) -> None:\n\n pt = self.text.view.text_point(lineno - 1, columno)\n region_name = 'anaconda.indicator.{}.{}'.format(\n self.text.view.id(), lineno\n )\n\n for i in range(3):\n delta = 300 * i * 2\n sublime.set_timeout(lambda: self.text.view.add_regions(\n region_name,\n [sublime.Region(pt, pt)],\n 'comment',\n 'bookmark',\n sublime.DRAW_EMPTY_AS_OVERWRITE\n ), delta)\n sublime.set_timeout(\n lambda: self.text.view.erase_regions(region_name),\n delta + 300\n )",
"def linenumbers(self,event):\n for child in self.app.children:\n if event.IsChecked(): \n child.source.SetMarginWidth(1, 50)\n else:\n child.source.SetMarginWidth(1, 0)\n \n self.set('ViewLineNumbers',event.IsChecked())",
"def linenumber(self, pad, linepad):\n linepad.config(state=GUI.NORMAL)\n coordinate_pad = map(int, pad.index(GUI.END).split('.'))\n linepad.delete('1.0', GUI.END)\n for i in range(coordinate_pad[0] - 1):\n linepad.insert(GUI.END, str(i + 1) + '.\\n')\n linepad.config(state=GUI.DISABLED)\n linepad.see(GUI.END)",
"def line_counter(self, event=None):\n try:\n text_area = self.get_current()\n self.canvas.delete('all')\n i = text_area.index(\"@0,0\")\n while True:\n dline = text_area.dlineinfo(i)\n if dline is None: break\n y = dline[1]\n linenum = str(i).split(\".\")[0]\n self.canvas.create_text(10, y + 28, anchor=\"w\", text=linenum,\n font=self.lineFont, width=0)\n text_length = self.canvas.bbox('all') # returns a tuple in the form of (x1, y1, x2, y2)\n width = text_length[2] - text_length[0] # x2-x1\n self.canvas.config(width=width + 15)\n i = text_area.index(\"%s+1line\" % i)\n # print(self.cursor_pos.cget('pady'), self.statusbar_frame.cget('pady'), )\n except:\n self.canvas.delete('all')",
"def lineNumbers(self, linenumbers):\n libxml2mod.xmlParserSetLineNumbers(self._o, linenumbers)",
"def increment_lines(self, d):\n self.lines += d\n styled_set_label_text(self.lines_display, \"Lines: \"+str(self.lines))",
"def format_text(self):\n for line, _ in enumerate(self.readlines()[:-1]):\n self.root.colour_line(line + 1)",
"def skip_lines(nb):\n if nb == -1:\n os.system('cls' if os.name=='nt' else 'clear')\n else:\n print(\"\\n\" * (nb-1))",
"def newLine(self) :\n if not self.hpgl2 :\n dic = self.pages.get(self.pagecount, None)\n if dic is None :\n self.setPageDict(\"linescount\", 1) \n dic = self.pages.get(self.pagecount)\n nblines = dic[\"linescount\"] \n self.setPageDict(\"linescount\", nblines + 1) \n if (self.linesperpage is not None) \\\n and (dic[\"linescount\"] > self.linesperpage) :\n self.pagecount += 1",
"def active_note_row(self) -> int:\r\n ...",
"def setNoHiddenLines():\n dislin.nohide()",
"def reset_line_count(self):\n self._line_count = 0",
"def _dig_line_count_changed(self, text):\n self._setup_table_digital()",
"def cb_update_line_numbers(data, signal, signal_data):\n weechat.hook_timer(10, 0, 1, \"cb_timer_update_line_numbers\", \"\")\n return weechat.WEECHAT_RC_OK",
"def _on_change(self, event):\n self.codeLineNumbers.redraw()",
"def set_startline(self, line_no):\n self.set_attribute(\"startline\", line_no)",
"def draw_increasing(i, j):\n return \"\\\\draw[line width = \" + str(latex_options[\"line_width\"]) + \", color=\" + latex_options[\"color_increasing\"] + \"] (T\" + str(i) + \") -- (T\" + str(j) + \");\\n\"",
"def _inverse_lines(self):\n pass",
"def go_to_line(self, lineno):\r\n self.get_current_editor().highlight_line(lineno)",
"def View_Inorder( self ):\r\n cb.order = 1\r\n self.system.Draw( )",
"def update_lines(self):\n self._checkfigure()\n for ld in self.lines:\n line = ld['line']\n\n color = ld['color']\n line.set_color(color)\n\n lw = ld['linewidth']\n hlf = ld['highlight factor']\n highlight = hlf if ld['highlighted'] else 1.0\n lw = lw*highlight\n line.set_linewidth(lw)\n\n for vline in ld['vlines']:\n vline.set_color(color)\n vline.set_linestyle('--')\n vline.set_linewidth(lw)\n\n for hline in ld['vlines']:\n hline.set_color(color)\n hline.set_linestyle('--')\n hline.set_linewidth(lw)",
"def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n limx = len(i) + maxlin + 2\n for j in range(len(i) - 2):\n if limx >= maxlin:\n b = bool(i[j])\n self.pixel_tuple_to_patch(\n ((maxlin - len(i) + 2 + x) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n else:\n limx -= 1\n elif SimEngine.gui_get('init') == \"Left\": # Left\n limx = 0\n for j in range(len(i) - 2):\n if limx <= maxlin + 2:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((x - 3) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(\n b)\n x += 1\n limx += 1\n else: # Center and Random\n limx = int((len(i) - maxlin) / 2)\n k = 0\n for j in range(len(i)):\n if limx < 0:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((maxlin - len(i) + x - 1 + limx) * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n else:\n if k < maxlin + 1:\n b = bool(i[j + limx])\n self.pixel_tuple_to_patch((k * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n k += 1\n y += 1\n else:\n limy -= 1",
"def _update_lines(self, lines, new_line):\n code_matches = [x for x in _ansi_codes.finditer(new_line)]\n color_codes = [\n code.string[code.span()[0] : code.span()[1]] for code in code_matches\n ]\n\n # Add color codes from earlier in the unwrapped line, and then track any new ones we add.\n new_line = \"\".join(self._active_codes) + new_line\n\n for code in color_codes:\n if code != _ansi_color_reset_code:\n self._active_codes.append(code)\n else: # A single reset code resets everything\n self._active_codes = []\n\n # Always ensure each line is color terminted if any colors are\n # still active, otherwise colors will bleed into other cells on the console\n if len(self._active_codes) > 0:\n new_line = new_line + _ansi_color_reset_code\n\n lines.append(new_line)",
"def active_vertical_lines(self):\n val = ((self._block[1] & 0xF0) << 4) + self._block[0]\n return (val + 1) * 2",
"def enable_hidden_line_removal(self):\n self.UseHiddenLineRemovalOn()",
"def cb_line_numbers(data, item, window):\n bar_height = weechat.window_get_integer(window, \"win_chat_height\")\n content = \"\"\n for i in range(1, bar_height + 1):\n content += \"%s \\n\" % i\n return content",
"def lineNumbersDefault(val):\n ret = libxml2mod.xmlLineNumbersDefault(val)\n return ret"
]
| [
"0.715932",
"0.71021974",
"0.6928087",
"0.6304502",
"0.61044735",
"0.6058214",
"0.58346593",
"0.57963574",
"0.57231426",
"0.57112205",
"0.57014596",
"0.5667571",
"0.5618884",
"0.5568091",
"0.5567257",
"0.55642784",
"0.5552298",
"0.5506729",
"0.5483577",
"0.54285765",
"0.5386685",
"0.538265",
"0.53307134",
"0.53147805",
"0.5309902",
"0.5305081",
"0.53050303",
"0.52972776",
"0.5269648",
"0.52559066"
]
| 0.7129927 | 1 |
Active or deactivate indentation guides visibility | def setIndentationGuidesVisible (self, visible):
if visible:
self.srcEditor.setIndentationGuidesVisible(visible)
else:
self.srcEditor.setIndentationGuidesVisible(visible) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggleIndentGuidesVisibility(self, checked):\n self.indentationGuidesVisible = checked\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n # bypass the welcome page\n if isinstance(doc, WelcomePage):\n continue\n # end of bypass\n if doc.extension != TestPlan.TYPE and doc.extension != TestConfig.TYPE:\n doc.setIndentationGuidesVisible(checked)",
"def indentation_guides(self,event):\n for child in self.app.children:\n child.source.SetIndentationGuides(event.IsChecked())\n self.set('IndentationGuides',event.IsChecked())",
"def menu_indentation(self, event=None):\n self.parentPanel.indentation_guides(event)",
"def check_indent_allowed(self) -> bool:\n return True",
"def check_indent_allowed(self) -> bool:\n return False",
"def check_indent_allowed(self) -> bool:\n return False",
"def __editIndent(self):\n self.activeWindow().indentLineOrSelection()",
"def indent(self):\r\n editor = self.get_current_editor()\r\n if editor is not None:\r\n editor.indent()",
"def __editUnindent(self):\n self.activeWindow().unindentLineOrSelection()",
"def unindent(self):\r\n editor = self.get_current_editor()\r\n if editor is not None:\r\n editor.unindent()",
"def __editSmartIndent(self):\n self.activeWindow().smartIndentLineOrSelection()",
"def test_delete_indentation(self):\n before_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.8\", \"2.8\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"delete-indentation\",\n )",
"def test_back_to_indentation(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"4.13\", \"4.13\"),\n after_sel=(\"4.8\", \"4.8\"),\n command_name=\"back-to-indentation\",\n )",
"def section_overindented(): # noqa: D416",
"def set_visual_indent(self, indent):\n self._visual_indent = indent",
"def indent(self):\n cursor = self.parent.textCursor()\n # Check if something is selected\n if cursor.hasSelection():\n # get the line/block nr\n temp = cursor.blockNumber()\n # Move to last line of the selection\n cursor.setPosition(cursor.selectionEnd())\n # calculate range of selection\n diff = cursor.blockNumber() - temp\n # Go over all the selected lines\n for n in range(diff + 1):\n cursor.movePosition(QTextCursor.StartOfLine)\n # insert tab\n cursor.insertText(\"\\t\")\n # move back up\n cursor.movePosition(QTextCursor.Up)\n else:\n # There is no selection, simply insert a TAB\n cursor.movePosition(QTextCursor.StartOfLine)\n cursor.insertText(\"\\t\")",
"def setNoHiddenLines():\n dislin.nohide()",
"def test_back_to_home_at_indentation(self):\n before_b = \"\"\"\\\n if a:\n b = 'xyz'\n \"\"\"\n after_b = \"\"\"\\\n if a:\n b = 'xyz'\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"2.4\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"back-to-home\",\n )",
"def VisualMode(self):\n self.stc.SetBlockCaret()\n self.BlockMode = True\n self.stc.SetOvertype(False)\n self._SetMode(ViKeyHandler.VISUAL, u'VISUAL')\n self.commander.StartSelection()",
"def dedent(self):\n self.indent_level -= self.INDENT_STEP",
"def dedent(self):\n self.indent_level -= self.INDENT_STEP",
"def tab_insert_indent():\n before_cursor = get_app().current_buffer.document.current_line_before_cursor\n\n return bool(before_cursor.isspace())",
"def enter(self):\n self.indent += 1",
"def erase(self):\n self.view.erase_status('00_git_gutter')",
"def toggleWhitespaceVisibility(self, checked):\n self.whitespaceVisible = checked\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n # bypass the welcome page\n if isinstance(doc, WelcomePage):\n continue\n # end of bypass\n if doc.extension != TestPlan.TYPE and doc.extension != TestPlan.TYPE_GLOBAL and doc.extension != TestConfig.TYPE:\n doc.setWhitespaceVisible(checked)",
"def leave(self):\n assert(self.indent > 0)\n self.indent -= 1",
"def _increaseindentation(self):\n self._indentlist.append(self._curindent)\n if not self._equalsigns[-1]:\n self._curindent = self._curindent + self._indent",
"def indent(self):\n self.indent_level += self.INDENT_STEP",
"def indent(self):\n self.indent_level += self.INDENT_STEP",
"def toggleCodeFolding(self, checked):\n self.codeFolding = checked\n try:\n if checked:\n self.foldAllAction.setEnabled(True)\n else:\n self.foldAllAction.setEnabled(False)\n except Exception as e:\n self.error( ' toggle code folding: %s' % str(e) )\n\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n # bypass the welcome page\n if isinstance(doc, WelcomePage):\n continue\n # end of bypass\n if doc.extension != TestPlan.TYPE and doc.extension != TestTxt.TYPE and doc.extension != TestConfig.TYPE:\n doc.setFolding(checked)"
]
| [
"0.765902",
"0.756237",
"0.64416647",
"0.61077577",
"0.60643417",
"0.60643417",
"0.605182",
"0.6029787",
"0.59826225",
"0.5865516",
"0.5814423",
"0.5500346",
"0.54570514",
"0.5425387",
"0.5388049",
"0.5363415",
"0.532164",
"0.51982653",
"0.5184068",
"0.516022",
"0.516022",
"0.51428527",
"0.51414835",
"0.5107012",
"0.51040787",
"0.5099109",
"0.5081757",
"0.507686",
"0.507686",
"0.504833"
]
| 0.7604601 | 1 |
Called when focus on editors Emit the signal "focusChanged" | def focusChanged (self):
weditor = QApplication.focusWidget()
if isinstance(weditor, PyEditor):
if weditor.editorId == self.TEST_DATA_EDITOR:
self.viewer().findWidget.setEditor( editor = self.srcEditor)
self.viewer().FocusChanged.emit(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def focus_changed(self):\r\n fwidget = QApplication.focusWidget()\r\n for finfo in self.data:\r\n if fwidget is finfo.editor:\r\n self.refresh()",
"def changedFocusSlot(self, old, now):\n if self.focusWidget():\n self.focusWidget().installEventFilter(self)",
"def focusInEvent(self, evt):\n self.gotFocus.emit()\n super(QuickSearchLineEdit, self).focusInEvent(evt) # pass it on",
"def OnSetFocus(self, event):\r\n\r\n self.Refresh()",
"def _focus(self, event) -> None:\n self.focus = True",
"def focus(self):\n raise NotImplementedError",
"def onFocus(*args):",
"def onFocus(*args):",
"def onFocus(*args):",
"def onFocus(*args):",
"def OnSetFocus(self, event):\r\n\r\n self._owner.SetFocus()",
"def focusInEvent(self, event):\n super(CustomLineEdit, self).focusInEvent(event)\n if self.completer() is not None:\n # Qt should've automatically connected highlighted() to\n # an internal slot, but check just to be sure\n recievers = self.completer().getRecievers(\"highlighted(QString)\")\n if recievers > 0:\n self.completer().highlighted[\"QString\"].disconnect()\n self.connect(self.completer(), QtCore.SIGNAL(\"highlighted(QString)\"),\n self.highlightCompletion)",
"def on_chat_focus(self, request, trigger_context):\n raise NotImplementedError",
"def appFocusChanged(self, old, now):\n from QScintilla.Shell import Shell\n \n if not isinstance(now, (Editor, Shell)):\n self.editActGrp.setEnabled(False)\n self.copyActGrp.setEnabled(False)\n self.viewActGrp.setEnabled(False)\n self.sbZoom.setEnabled(False)\n else:\n self.sbZoom.setEnabled(True)\n self.sbZoom.setValue(now.getZoom())\n \n if (\n not isinstance(now, (Editor, Shell)) and\n now is not self.quickFindtextCombo\n ):\n self.searchActGrp.setEnabled(False)\n \n if now is self.quickFindtextCombo:\n self.searchActGrp.setEnabled(True)\n \n if not isinstance(now, (Editor, Shell)):\n self.__lastFocusWidget = old",
"def HandleFocusIn(self, event: tkEvent):\n pass",
"def OnSetFocus(self, event):\r\n\r\n self._hasFocus = True\r\n self.RefreshSelected()\r\n event.Skip()",
"def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())",
"def __window_focus(self):\n pass",
"def setFocus(*args, **kwargs)->None:\n pass",
"def focus_event(self, widget, event, hasFocus):\n return self.make_callback('focus', hasFocus)",
"def run_autofocus(self):\n raise NotImplementedError",
"def setFocus(*args):",
"def setFocus(*args):",
"def setFocus(*args):",
"def setFocus(*args):",
"def force_focus_set(self, event):\n self.focus_set()",
"def start_blur(self):\r\n super(Defocus, self)._start()",
"def XPSetKeyboardFocus(inWidget):\n pass",
"def set_focus(self, pos):\n urwid.emit_signal(self, 'focus_change', pos)\n return super(OptionListWalker, self).set_focus(pos)",
"def _listen(self):\n self.cv.focus_force()"
]
| [
"0.7528211",
"0.70036095",
"0.69637966",
"0.6493235",
"0.6487977",
"0.64844894",
"0.6407414",
"0.6407414",
"0.6407414",
"0.6407414",
"0.6306348",
"0.6266443",
"0.6196158",
"0.611592",
"0.60014147",
"0.59973145",
"0.5948752",
"0.59431016",
"0.59339786",
"0.59243387",
"0.5759661",
"0.5712125",
"0.5712125",
"0.5712125",
"0.5712125",
"0.57104665",
"0.5707988",
"0.57023525",
"0.5671992",
"0.5639791"
]
| 0.7537676 | 0 |
Set the default cursor position | def setDefaultCursorPosition(self):
self.srcEditor.setFocus()
self.srcEditor.setCursorPosition(0,0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_cursor_position(self, x: int, y: int) -> None:\n self.screen.move(y, x)",
"def change_cursor(self, cursor):\n self.setCursor(cursor)",
"def set_cursor_default(self, widget=None):\n logger.debug(\"Setting cursor to default. widget: %s\", widget)\n widget = self.root if widget is None else widget\n widget.config(cursor=\"\")\n widget.update_idletasks()",
"def set_cursor(self, row, col):\n self._vim.current.window.cursor = (row, col)",
"def _set_cursor(self, cursor):\n self._cursor = cursor",
"def setDefaultCursor (self,event=None):\n \n self.text.configure(cursor=\"xterm\")",
"def set_cursor( self, pos ):\n\t\tassert type( pos ) is tuple, \"pos must be a tuple (col,row)\"\n\t\trow_offsets = [ 0x00, 0x40, 0x14, 0x54 ]\n\t\trow = pos[1]\n\t\tif ( row > self.rows ):\n\t\t\trow = self.rows-1 # we count rows starting w/0\n\n\t\tself.command(LCD_SETDDRAMADDR | (pos[0] + row_offsets[row])) # Col + row offset",
"def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos",
"def setCursorToDefaultFormatting(self):\n self.__cursor.setAllPropertiesToDefault()",
"def set_cursor( self, point ):\n\t\tif 0 <= abs(point[0]) <= self.width :\n\t\t\tself.xprint = point[0]\n\t\tif 0 <= abs(point[1]) <= self.height :\n\t\t\tself.yprint = point[1]",
"def default_buffer_pos_changed():\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER:\n try:\n line_no = default_buffer.document.cursor_position_row - \\\n history_mapping.result_line_offset\n\n if line_no < 0: # When the cursor is above the inserted region.\n raise IndexError\n\n history_lineno = sorted(history_mapping.selected_lines)[line_no]\n except IndexError:\n pass\n else:\n history_buffer.cursor_position = \\\n history_buffer.document.translate_row_col_to_index(history_lineno, 0)",
"def set_cursor(self,x,y):\n if 1 <= x <= 20 and y in [1,2]:\n self.send(\"\\x1f\\x24%c%c\" % (x,y))\n else:\n raise ValueError('cursor position must be between 1,20 and 1,2')",
"def set_cursor(obj: QObject, cursor: QCursor = Qt.PointingHandCursor) -> None:\n obj.setCursor(QCursor(cursor))",
"def __setCursor(self, id=None):\n if self.__currentCursor != id: # Avoid redundant calls\n if id:\n self.drawingSurface.SetCursor(wx.StockCursor(id))\n else:\n self.drawingSurface.SetCursor(wx.NullCursor)\n self.__currentCursor = id",
"def set_cursor(self, cursor):\n for step in self.steps:\n step[1].set_cursor(cursor)\n return self",
"def setCursor(self, _name = None):\n\n\t\t_before = self._cursor\n\t\tself._cursor = _name\n\t\tif _before != _name:\n\t\t\tself._updated.append(tuple(self.rect))\n\t\t\tself.updateCursor()\n\t\t\tself._updated.append(tuple(self.rect))",
"def _set_cursor_pos(coords):\n try:\n win32api.SetCursorPos(coords)\n except pywintypes.error as exc:\n if str(exc) == \"(0, 'SetCursorPos', 'No error message is available')\":\n raise RuntimeError(\"There is no active desktop required for moving mouse cursor!\\n\")\n else:\n raise exc",
"def set_cursor_coordinates(self, x, y):\n text = self.getText()\n lines = text.split(\"\\n\")\n i = 0\n for row, line in enumerate(lines):\n if row == y:\n break\n i += len(line) + 1 # we need to include \"\\n\"\n if \"\\r\" in line: # and also \"\\r\"\n i -= 1\n pos = i + x\n if pos > len(text):\n pos = len(text)\n self.setCursorPos(pos)",
"def setPosition(position):",
"def reset_position(self):\n self.goto(STARTING_POSITION)",
"def setCursorPos(serDisplay, row, col, clearRow = False):\n offset = 127 + col\n if row == 2:\n offset = 128 + 63 + col\n elif row == 3:\n offset = 128 + 19 + col\n elif row == 4:\n offset = 128 + 83 + col\n cmd = array.array('B', (COMMAND_PREFIX, offset))\n writeToDisplay(serDisplay, cmd.tostring())",
"def cursor(self, value: int) -> None:\n\n self._cursor = max(0, min(value, real_length(self.value)))",
"def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)",
"def _set_cursor(self, enabled):\n if enabled:\n cursor = (backend_tools.Cursors.RESIZE_HORIZONTAL\n if self.direction == 'horizontal' else\n backend_tools.Cursors.RESIZE_VERTICAL)\n else:\n cursor = backend_tools.Cursors.POINTER\n\n self.ax.figure.canvas.set_cursor(cursor)",
"def resetCursor(self):\n self.personalDataList.viewport().setCursor(QtCore.Qt.ArrowCursor)\n self.personalDataList.areaClicked.disconnect()",
"def set_cursor(widget, size, hotspot, xormasks, andmasks):\n if not cursors:\n cursors.append((None, pygame.mouse.get_cursor()))\n cursors.append((widget, (size, hotspot, xormasks, andmasks)))\n pygame.mouse.set_cursor(size, hotspot, xormasks, andmasks)",
"def cursor_set(self, yes: bool = True) -> None:\n unicurses.curs_set(False)",
"def cursor_set():\n print(\"\\033[0;0H\")",
"def cursor( self, value=True ):\n\t\tif value:\n\t\t\tself._displaycontrol |= LCD_CURSORON\n\t\telse:\n\t\t\tself._displaycontrol &= (0xFF^LCD_CURSORON)\n\t\tself.command( LCD_DISPLAYCONTROL | self._displaycontrol )",
"def move_to(xy):\n (x,y) = xy\n win32api.SetCursorPos((x,y))"
]
| [
"0.7599676",
"0.75060236",
"0.73707634",
"0.7345042",
"0.73390526",
"0.73276865",
"0.71844566",
"0.7176274",
"0.70245934",
"0.69838053",
"0.69177216",
"0.69093466",
"0.68998706",
"0.6859215",
"0.6852698",
"0.6690307",
"0.66542625",
"0.6634238",
"0.66124254",
"0.65964746",
"0.65309113",
"0.65275496",
"0.65274966",
"0.65126723",
"0.64960265",
"0.6480252",
"0.64766246",
"0.64641947",
"0.6454548",
"0.6421534"
]
| 0.8522591 | 0 |
Return how many hours, rounded to 2 decimals, Python 2 has left on Planet Earth (calculated from start_date) | def py2_earth_hours_left(start_date=BITE_CREATED_DT):
td = (PY2_DEATH_DT - start_date)
return round((td.days*24 + td.seconds/3600), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def py2_earth_hours_left():\r\n left = PY2_RETIRED_DT - BITE_CREATED_DT\r\n left_earth_hours = round(left.total_seconds()/3600,2)\r\n\r\n return left_earth_hours",
"def get_time_delta_in_hours(start, end):\n dhour = end.hour - start.hour\n dmin = end.minute - start.minute\n dsec = end.second - start.second\n dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second\n # print start, end, dtime\n return float(dtime.seconds) / (60*60)",
"def calculate_hours(time):\n return int(time / 3600)",
"def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60",
"def get_time_in_round() -> int:\n # FIXME - returning negative value for projectiles\n return store.round_time",
"def _get_number_of_hours(self):\n if self.date_to:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(self.date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(self.date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - self.break_hour\n self.number_of_hours_temp = diff_day",
"def hours(self):\n return int(self.minutes / 60)",
"def duration(self):\n delta = self.occurrence.end - self.occurrence.start\n real_hours = delta.days * 24 + delta.seconds / (60.0 * 60.0)\n\n adjusted_hours = attendance_settings.HOUR_MULTIPLIER * real_hours\n\n return adjusted_hours",
"def _unit_hr(self):\n return self.time_base * 60.0",
"def unit_hr(self):\n return self.time_base * 60.0",
"def calculate_time(start_time):\r\n return round(time() - start_time, 2)",
"def get_duration(self):\n\n return self.endtime - self.starttime",
"def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25",
"def _get_number_of_hours(self, date_from, date_to, istirahat):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - istirahat\n return diff_day",
"def hours_in(sec):\r\n return int(sec//3600)",
"def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))",
"def _unit_day(self):\n return (self.time_base * 60.0) * 24.0",
"def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()",
"def how_much_hours(username, password, workers, projects, start_date, end_date):\n tt = TTrackerSession()\n tt.login(username, password)\n return tt.how_much_hours(workers, projects, start_date, end_date)",
"def date_in_hours(date):\n delta = delta_from_now(date)\n return (delta.days * 24) + delta.seconds / (60 * 60)",
"def totalHours(path):\n total = 0\n start = 0\n active = False\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if row[1] == 't':\n total += float(row[2])\n else:\n if active:\n if row[1] == 's':\n total += float(row[0]) - start\n active = False\n else:\n if row[1] == 'a':\n start = float(row[0])\n active = True\n final = row\n if active:\n total += time.time() - float(final[0])\n active = False\n return \"%.2f\" % (total / 3600)",
"def duration(self):\n return self.end_time - self.start_time",
"def duration(self):\r\n return (self.end_time or time.time()) - self.start_time",
"def elapsed(self, start=\"__enter__\", end=\"__exit__\"):\n hours, rem = divmod(self.elapsed_raw(start, end), 3600)\n minutes, seconds = divmod(rem, 60)\n return f\"{int(hours):0>2}:{int(minutes):0>2}:{seconds:05.2f}\"",
"def unit_day(self):\n return (self.time_base * 60.0) * 24.0",
"def time_interval( self ):\n begin = self.begin; end = self.end\n if end - begin < 600*self.hour_switch:\n return 600\n if end - begin < 86400*self.day_switch:\n return 3600\n elif end - begin < 86400*7*self.week_switch:\n return 86400\n else:\n return 86400*7",
"def get_time_round(date):\r\n return int(date / self.timeframe) * self.timeframe",
"def days(self):\n return int(self.hours / 24)",
"def getHoursOffset(self):\n return _libsbml.Date_getHoursOffset(self)",
"def duration_hours(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"duration_hours\")"
]
| [
"0.7207195",
"0.6741372",
"0.6569625",
"0.64889026",
"0.6452905",
"0.6452884",
"0.64516276",
"0.6404332",
"0.6370794",
"0.63112926",
"0.61538273",
"0.61222947",
"0.6121003",
"0.61137897",
"0.608259",
"0.60218483",
"0.60139096",
"0.6005377",
"0.59297705",
"0.58832794",
"0.5881609",
"0.58594126",
"0.5852537",
"0.5842776",
"0.583717",
"0.5825581",
"0.5812028",
"0.57996184",
"0.5787588",
"0.57859635"
]
| 0.7106655 | 1 |
function loads a random images from a random folder in our test path | def getRandomImage(path):
folders = list(filter(lambda x: os.path.isdir(os.path.join(path, x)), os.listdir(path)))
random_directory = np.random.randint(0,len(folders))
path_class = folders[random_directory]
print("Class - " + five_celeb_dict_n[str(path_class)])
file_path = path + path_class
file_names = [f for f in listdir(file_path) if isfile(join(file_path, f))]
random_file_index = np.random.randint(0,len(file_names))
image_name = file_names[random_file_index]
return cv2.imread(file_path+"/"+image_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_image():\n img_dir = \"./static\"\n img_list = os.listdir(img_dir)\n img_path = os.path.join(img_dir, random.choice(img_list))\n return img_path",
"def get_rand_img():\n import urllib\n import os\n import glob\n\n pics = glob.glob('/home/cody_techngs/PycharmProjects/ProjTest/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/rand*')\n nums = []\n\n for pic in pics:\n nums.append(int(pic.split('rand_img')[1].split('.')[0]))\n\n unique_num = False\n new_rand_num = 0\n\n while not unique_num:\n new_rand_num = random.randrange(1, 2000)\n if new_rand_num not in nums:\n unique_num = True\n\n img_name = 'rand_img{}.jpg'.format(new_rand_num)\n dl_location = os.getcwd() + '/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/' + img_name\n url = 'https://unsplash.it/400/300/?random'\n urllib.urlretrieve(url, dl_location)\n\n return 'static/images/HITs/{}'.format(img_name)",
"def random_img(path):\n fullpath = os.path.join(settings.MEDIA_ROOT, path)\n filenames = [f for f in os.listdir(fullpath) if is_image_file(f)]\n pick = random.choice(filenames)\n return posixpath.join(settings.MEDIA_URL, path, pick)",
"def get_random_image_path(imgs_path):\n img_files = os.listdir(imgs_path)\n\n if len(img_files) < 1:\n raise Exception(\"No images found pertaining to the given make and mode.\")\n\n img_path = imgs_path + \"/\" + str(img_files[random.randrange(0, len(img_files))])\n\n return img_path",
"def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images",
"def test_load_jpg():\n parameters = {'path': 'green-dot.jpg'}\n\n images.load(parameters)",
"def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs",
"def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)",
"def get_a_picture_randomly(self):\n files = os.listdir(self.image_directory)\n if len(files) == 0:\n return None\n full_image_name = os.path.abspath(self.image_directory + random.choice(files))\n return full_image_name",
"def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs",
"def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte",
"def test(train_out_dir):\n above = os.path.join(train_out_dir, '..')\n os.chdir(above)\n if not os.path.exists(\"test\"):\n os.mkdir(\"test\")\n\n for sdir in os.listdir(train_out_dir):\n cur_dir = os.path.join(train_out_dir, sdir)\n list_curr_dir = os.listdir(cur_dir)\n random.seed()\n rand_num = random.randint(0, len(list_curr_dir) - 1)\n rand_img = list_curr_dir[rand_num]\n rand_img_path = os.path.join(cur_dir, rand_img)\n dst_path = os.path.join(\"test\", sdir)\n if not os.path.exists(dst_path):\n os.mkdir(dst_path)\n\n shutil.move(rand_img_path, os.path.join(dst_path, os.path.basename(rand_img_path)))",
"def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)",
"def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images",
"def user_random_avatar():\n avatar_names = os.listdir(PATH)\n avatar_path = random.choice([avatar_image for avatar_image in avatar_names\n if os.path.isfile(os.path.join(PATH,avatar_image))])\n return PATH_RELATIVE+avatar_path",
"def list_images(path, use_shuffle=True):\r\n def is_image(filename):\r\n return os.path.splitext(filename)[-1][1:].lower() in ['jpg', 'png']\r\n images = list(map(lambda x: os.path.join(path, x), filter(is_image, os.listdir(path))))\r\n # Shuffle with a fixed seed without affecting global state\r\n if use_shuffle:\r\n s = random.getstate()\r\n random.seed(1234)\r\n random.shuffle(images)\r\n random.setstate(s)\r\n return images",
"def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list",
"def feed(self, reset=True): \n if self.reuse:\n image_subdirs = get_random_image_sample(IMAGE_PACKAGE_SIZE, self.image_location, [])\n else:\n image_subdirs = get_random_image_sample(IMAGE_PACKAGE_SIZE, self.image_location, self.used_images)\n if reset:\n reset_directory(self.feed_location, self.image_location)\n images = self.move_images(image_subdirs, self.feed_location, folders=True)\n self.used_images.extend(images)\n return image_subdirs",
"def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs",
"def load_path_list(image_path, gt_path, batch_size, train = True):\r\n\r\n if train:\r\n print(\"Image Load Started..\")\r\n\r\n path_list = os.listdir(gt_path)\r\n \r\n image_size = len(path_list)\r\n Train_size = image_size // batch_size * batch_size\r\n Validation_size = image_size - Train_size\r\n \r\n if Validation_size < 10:\r\n Train_size -= batch_size\r\n Validation_size += batch_size\r\n \r\n print(\"Train data size : \", Train_size)\r\n print(\"Validation data size : \", Validation_size)\r\n else:\r\n path_list = os.listdir(gt_path)\r\n Train_size = 0\r\n Validation_size = 0\r\n print(\"Test data size : \", len(path_list))\r\n\r\n rd.shuffle(path_list)\r\n\r\n\r\n return path_list, Train_size, Validation_size",
"def load_images(path, p=1, feature=None, transform=None):\n\n images = os.listdir(path)\n images = random.sample(images, math.ceil(len(images) * p))\n\n loaded = [\n load_image(\n os.path.join(path, img),\n feature=feature, transform=transform)\n for img in images]\n\n return np.array([x for x in loaded if x is not None])",
"def test_load_fail():\n parameters = {'path': 'foo.bar'}\n\n images.load(parameters)",
"def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images",
"def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')",
"def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)",
"def load_test_data(image_path):\n raw = []\n image_filename = dict()\n count = 0\n for filename in glob.glob(image_path):\n name = os.path.basename(filename)[:-4]\n try:\n im = Image.open(filename)\n im = im.convert('L')\n im = im.resize((img_rows, img_cols))\n raw.append(np.array(im))\n image_filename[count] = name\n count += 1\n im.close()\n except IOError:\n print('Error loading image ', filename)\n return [raw, image_filename]",
"def test_RawRun_imagepaths():\n p1 = r.imagepaths[0]\n path = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n assert(os.path.samefile(p1, path))\n assert_equal(len(r.imagepaths), 6)",
"def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array",
"def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)",
"def getRandomFile(path):\n files = os.listdir(path)\n index = random.randrange(0, len(files))\n return files[index]"
]
| [
"0.7551531",
"0.7279827",
"0.6884776",
"0.67798495",
"0.67052203",
"0.6687566",
"0.66579896",
"0.66472167",
"0.6636999",
"0.66174686",
"0.6566692",
"0.6547921",
"0.6532665",
"0.65265673",
"0.6523696",
"0.64311254",
"0.6420229",
"0.6414222",
"0.6388513",
"0.6381001",
"0.63773704",
"0.6368015",
"0.6362733",
"0.63579",
"0.63488984",
"0.63350886",
"0.6322836",
"0.6306654",
"0.6305076",
"0.62978876"
]
| 0.7617784 | 0 |
Given an spotify track id, returns the audio features from the api | def get_song_features(tid):
# dictionary of features to return
spotify_track_data = SpotifyData[tid]
features = {}
features['name'] = spotify_track_data.name
features['artists'] = spotify_track_data.artists
features['popularity'] = spotify_track_data.popularity
features['album'] = spotify_track_data.album_name
features['danceability'] = spotify_track_data.danceability
features['energy'] = spotify_track_data.energy
features['key'] = spotify_track_data.key
features['loudness'] = spotify_track_data.loudness
features['mode'] = spotify_track_data.mode
features['speechiness'] = spotify_track_data.speechiness
features['acousticness'] = spotify_track_data.acousticness
features['instrumentalness'] = spotify_track_data.instrumentalness
features['liveness'] = spotify_track_data.liveness
features['valence'] = spotify_track_data.valence
features['tempo'] = spotify_track_data.tempo
features['duration_ms'] = spotify_track_data.duration_ms
features['time_signature'] = spotify_track_data.time_signature
return features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_spotify_features(search):\n\t\n\t# Configure API credentials\n\tclient_credentials_manager = SpotifyClientCredentials(client_id=config.SPOTIFY_CID, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient_secret=config.SPOTIFY_SECRET)\n\tsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)\n\t\n\t# Find song ID\n\tquery = sp.search(search)\n\tsong_id = query['tracks']['items'][0]['id']\n\n\t# Use song ID to pull metadata\n\taudio_feature = sp.audio_features(song_id)[0]\n\t\n\treturn audio_feature",
"def _get_audio_features(self, sp, trackids):\n\n cols = ['acousticness', 'danceability', 'duration_ms', 'energy',\n 'instrumentalness', 'key', 'liveness', 'loudness', 'mode',\n 'speechiness', 'tempo', 'time_signature', 'valence', 'id']\n\n total_track = len(trackids)\n features = []\n start = 0\n while len(features) < total_track:\n end = start + 100 if start + 100 < total_track else total_track\n\n features += sp.audio_features(tracks=trackids[start: end])\n start = start + 100\n\n return pd.DataFrame.from_records(features, columns=cols)",
"def track_features(tracks, authorizer, verbose=False):\n spotify_endpoint = 'https://api.spotify.com/v1/audio-features'\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n\n remainder = len(tracks)\n offset = 0\n stride = 100\n features = []\n while remainder > 0:\n params = {'ids': ','.join(tracks[offset:offset + stride])} # spotify can only process 100 tracks at a time\n\n response = requests.get(spotify_endpoint, params=params, headers=headers)\n\n if response.status_code == 200:\n features += response.json()['audio_features']\n offset += stride\n remainder -= stride\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text))\n return None\n\n return zip(tracks, features)",
"def get_song_features(self, song_id: str) -> List[float]:\n user = self.init_user()\n user.trace = True\n features = user.audio_features(song_id)[0]\n return [features['acousticness'], features['danceability'],\n features['energy'], features['duration_ms'],\n features['instrumentalness'], features['valence'],\n features['tempo'], features['liveness'],\n features['loudness'], features['speechiness'],\n features['key']]",
"def gettrackinfo(accesstoken, playlist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n offset = 0\n\n needattributes = [track.trackid for track in playlist.tracks]\n\n while offset < len(needattributes):\n params = {'ids': ','.join(needattributes[offset:100+offset])}\n r = requests.get(\"https://api.spotify.com/v1/audio-features/\",\n headers=headers,\n params=params)\n\n response = r.json()\n\n if \"audio_features\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait correct amount\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n needinfo = True\n while needinfo:\n r = requests.get(\"https://api.spotify.com/v1/audio-features/\",\n headers=headers,\n params=params)\n response = r.json()\n if \"audio_features\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print('error: gettrackinfo failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: gettrackinfo failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: gettrackinfo failed')\n print('no error response')\n return(None)\n\n for i in range(len(response[\"audio_features\"])):\n try:\n playlist.tracks[i+offset].danceability = response[\"audio_features\"][i][\"danceability\"]\n playlist.tracks[i+offset].energy = response[\"audio_features\"][i][\"energy\"]\n playlist.tracks[i+offset].key = response[\"audio_features\"][i][\"key\"]\n playlist.tracks[i+offset].loudness = response[\"audio_features\"][i][\"loudness\"]\n playlist.tracks[i+offset].mode = response[\"audio_features\"][i][\"mode\"]\n playlist.tracks[i+offset].speechiness = response[\"audio_features\"][i][\"speechiness\"]\n playlist.tracks[i+offset].acousticness = response[\"audio_features\"][i][\"acousticness\"]\n playlist.tracks[i+offset].instrumentalness = response[\"audio_features\"][i][\"instrumentalness\"]\n playlist.tracks[i+offset].liveness = response[\"audio_features\"][i][\"liveness\"]\n playlist.tracks[i+offset].loudness = response[\"audio_features\"][i][\"loudness\"]\n playlist.tracks[i+offset].valence = response[\"audio_features\"][i][\"valence\"]\n playlist.tracks[i+offset].tempo = response[\"audio_features\"][i][\"tempo\"]\n playlist.tracks[i+offset].duration_ms = response[\"audio_features\"][i][\"duration_ms\"]\n playlist.tracks[i+offset].time_signature = response[\"audio_features\"][i][\"time_signature\"]\n except Exception as e:\n print('error: error getting attributes from returned JSON')\n print('this piece of json looks like:\\n{}'.format(response[\"audiofeatures\"][i]))\n\n offset = offset + len(response[\"audio_features\"])\n\n\n # t.printattributes()",
"def songfeature_songid_get(songid): # noqa: E501\n query = \"SELECT * FROM SongFeatures WHERE SongID = '{}'\".format(songid)\n results = query_to_dict(query)\n features_list = []\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list",
"def audio_features(self, track=None, tracks=None, with_cache=True, **kwargs):\n if track:\n _id = self._get_track_id(track)\n # pylint: disable=no-member\n return self._get(API.AUDIO_FEATURES_SINGLE.value.format(id=_id), **kwargs)\n\n tracks = list(map(self._get_track_id, tracks or []))\n cached_tracks = []\n if with_cache:\n with db_session:\n cached_tracks = select(a for a in AudioFeatures if a.id in tracks)[:]\n tracks = list(set(tracks) - {a.id for a in cached_tracks})\n batches = [tracks[i : i + 100] for i in range(0, len(tracks), 100)]\n audio_features = [\n self._get(API.AUDIO_FEATURES_MULTIPLE.value, ids=\",\".join(t), **kwargs)\n for t in batches\n ]\n with db_session:\n audio_features = [\n AudioFeatures.from_dict(t) for t in chain.from_iterable(audio_features)\n ] + cached_tracks\n return audio_features",
"def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list",
"def append_audio_features(df,spotify_auth, return_feat_df = False):\r\n audio_features = spotify_auth.audio_features(df[\"track_id\"][:])\r\n #catch and delete songs that have no audio features\r\n if None in audio_features:\r\n NA_idx=[i for i,v in enumerate(audio_features) if v == None]\r\n df.drop(NA_idx,inplace=True)\r\n for i in NA_idx:\r\n audio_features.pop(i)\r\n assert len(audio_features) == len(df[\"track_id\"][:])\r\n feature_cols = list(audio_features[0].keys())[:-7]\r\n features_list = []\r\n for features in audio_features:\r\n try:\r\n song_features = [features[col] for col in feature_cols]\r\n features_list.append(song_features)\r\n except TypeError:\r\n pass\r\n df_features = pd.DataFrame(features_list,columns = feature_cols)\r\n df = pd.concat([df,df_features],axis = 1)\r\n if return_feat_df == False:\r\n return df\r\n else:\r\n return df,df_features",
"def get_track_info(track_id):\n items = spotify.track(track_id)\n name = items[\"name\"]\n artists_names = \", \".join([\n items[\"artists\"][x][\"name\"]\n for x in range(len(items[\"artists\"]))\n ])\n album_artists = \", \".join([\n items[\"album\"][\"artists\"][x][\"name\"]\n for x in range(len(items[\"album\"][\"artists\"]))\n ])\n album_type = items[\"album\"][\"album_type\"]\n album_name = items[\"album\"][\"name\"]\n album_release = items[\"album\"][\"release_date\"]\n album_track_number = items[\"track_number\"]\n track_duration = items[\"duration_ms\"]\n images_link = items[\"album\"][\"images\"]\n max_image_res = 0\n max_icon_size = 0\n image_link = \"\"\n icon_link = \"\"\n for image in images_link:\n if image[\"height\"] * image[\"width\"] > max_image_res:\n image_link = image[\"url\"]\n max_image_res = image[\"height\"] * image[\"width\"]\n if image[\"height\"] < 400:\n if image[\"height\"] > max_icon_size:\n max_icon_size = image[\"height\"]\n icon_link = image[\"url\"]\n track = {\"name\": name,\n \"Artist(s)\": artists_names,\n \"Album Artist(s)\": album_artists,\n \"Album Type\": album_type,\n \"Album Name\": album_name,\n \"Album Release\": album_release,\n \"Track Number\": album_track_number,\n \"Track Duration (ms)\": track_duration,\n \"Image Link\": image_link,\n \"Icon Link\": icon_link\n }\n\n for artist in artists_names.split(', '):\n \"\"\"\n Checks for lyrics with song name and artist names\n combination until one is found.\n \"\"\"\n try:\n lyrics = lyricwikia.get_lyrics(artist, name)\n track['lyrics'] = lyrics\n break\n except lyricwikia.LyricsNotFound:\n pass\n\n return track",
"def get_audio_features_of_tracks(self, playlist_items: List[Dict]):\n audio_features_vectors = []\n for track_object in playlist_items:\n track_id = _get_id(track_object)\n track_features = self.spotify_client.get_audio_features(track_id)\n audio_features_vectors.append(list(track_features.values()))\n return np.array([vec for vec in audio_features_vectors])",
"def get_track(a=None, id=0):\n return {\n 'track':'info'\n }",
"def songfeature_get(): # noqa: E501\n query = 'SELECT * FROM SongFeatures'\n results = query_to_dict(query)\n features_list = []\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list",
"def get_tracklist_features(tracks):\n\n # first we construct a list of all track ids and tracknames\n track_ids = []\n track_names = []\n for collection_type in tracks:\n tid = collection_type['id']\n if tid:\n track_ids.append(collection_type['id'])\n track_name = f'{collection_type[\"artists\"][0][\"name\"]} - {collection_type[\"name\"]}'\n track_names.append(track_name)\n # we can only load data in batches\n batch_size = 50\n offset = 0\n\n features = []\n\n while offset + batch_size <= len(track_ids):\n # get one batch of tracks per iteration\n new_features = SP.audio_features(track_ids[offset:offset+batch_size])\n\n # we want to add the trackname to the dataframe\n for i, feature in enumerate(new_features):\n feature['name'] = track_names[offset+i]\n features += new_features\n\n offset += batch_size\n\n # get the remaining tracks that couldnt fill a batch\n features += SP.audio_features(track_ids[offset:])\n return pd.DataFrame(features)",
"def get_tempo(track_id: str) -> float:\n token = _get_token()\n headers = dict(Authorization=f'Bearer {token}')\n endpoint = f'https://api.spotify.com/v1/audio-features/{track_id}'\n response = requests.get(endpoint, headers=headers)\n if response.status_code == 200:\n return response.json().get('tempo')\n else:\n raise SpotifyAPIError(response.json())",
"def get_playlist_feats(playlist_id):\r\n sourcePlaylistID = playlist_id\r\n sourcePlaylist = sp.user_playlist(username, sourcePlaylistID);\r\n tracks = sourcePlaylist[\"tracks\"];\r\n songs = tracks[\"items\"];\r\n\r\n track_ids = []\r\n track_names = []\r\n track_artists = []\r\n\r\n\r\n for i in range(0, len(songs)):\r\n if songs[i]['track']['id'] != None: # Removes the local tracks in your playlist if there is any\r\n track_ids.append(songs[i]['track']['id'])\r\n track_names.append(songs[i]['track']['name'])\r\n track_artists.append(songs[i]['track']['artists'])\r\n\r\n\r\n features = []\r\n for i in range(0,len(track_ids)):\r\n audio_features = sp.audio_features(track_ids[i])[0]\r\n track_popularity = {'popularity': sp.track(track_ids[i])['popularity']}\r\n genre = {'genres': sp.artist(track_artists[i][0]['uri'])['genres']}\r\n audio_features = dict(audio_features, **track_popularity, **genre)\r\n features.append(audio_features)\r\n\r\n\r\n playlist_df = pd.DataFrame(features, index = track_names)\r\n return playlist_df",
"def get_infos(artist, track):\n params = {\n \"include\" : \"minimal\",\n \"method\" : \"track.getInfo\",\n \"api_key\" : api_key,\n \"artist\" : artist,\n \"track\" : track,\n \"format\" : \"json\",\n }\n response = requests.get(\"http://ws.audioscrobbler.com/2.0\", params = params, verify = False)\n track_infos = json.loads(response.text)\n if 'error' in track_infos:\n return None\n return track_infos",
"def get_tracks(num=1):\n pass",
"def get_audio_features(uri):\n try:\n uri = str(uri)\n res = re.findall(r':(?: *([\\w.-]+):)', uri)\n str_res = ' '.join([str(word) for word in res])\n\n if str_res in ['playlist', 'userplaylist']:\n # from the playlist get URIs for each artist\n artist_uris_total = get_artists_from(uri)\n # from artist uris get a list of album uris\n albums_uris_total = []\n for artist_uri in artist_uris_total:\n album_uris = get_albums_from(artist_uri)\n albums_uris_total.extend(album_uris)\n # from a list of albums get tracks\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.extend(tracks_uris)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'artist':\n albums_uris_total = get_albums_from(uri)\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.extend(tracks_uris)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'album':\n track_uris_total = get_tracks_from(uri)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'track':\n features_to_db(uri)\n\n except Exception as e:\n print(\"Error processing {}: {}\".format(uri, e))\n raise e\n\n else:\n DB.session.commit()",
"def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged",
"def songfeature_filter_get(songid=None, genre=None, artist=None, name=None): # noqa: E501\n query = 'SELECT * FROM SongFeatures'\n multi_flag = \"WHERE\"\n if genre and not artist:\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness,Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID,\n Songs.SongName\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n AND Songs.SongGenre = '{}'\n \"\"\".format(genre)\n multi_flag = \"AND\"\n\n if artist and not genre:\n #Query too complicated, separate entity\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness,Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n JOIN Artists\n ON Songs.ArtistID = Artists.ArtistID\n WHERE Artists.ArtistName = '{}'\n \"\"\".format(artist)\n multi_flag = \"AND\"\n\n if artist and genre:\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness, Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n JOIN Artists\n ON Songs.ArtistID = Artists.ArtistID\n WHERE Artists.ArtistName = '{}'\n AND Songs.SongGenre = '{}'\n \"\"\".format(artist, genre)\n\n if songid:\n query = query + \" {} SongFeatures.SongID = '{}'\".format(songid)\n\n if name:\n query = query + \" JOIN Songs ON Songs.SongID = SongFeatures.SongID WHERE Songs.SongName = '{}'\".format(name)\n\n results = query_to_dict(query)\n features_list = []\n\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list",
"def search_for_tracks(album_id):\n \n track_results = spotifyObject.album_tracks(album_id)\n track_results = track_results['items']\n ids = [track['id'] for track in track_results]\n\n return ids",
"def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj",
"def get_tracks(search_string=None):\n if search_string is None:\n print('Please use a search string with get_tracks function')\n exit(0)\n item_type = \"tracks\"\n info_dict = spotify.search(q=search_string, limit=10, type='track')\n items = info_dict[item_type][\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"album\"][\"name\"]\n album_type = items[i][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"artists\"]))\n ])\n track_name = items[i][\"name\"]\n track_id = items[i][\"id\"]\n track_popularity = items[i][\"popularity\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": track_popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def getTracks(playlist_id):\n\n tracks = crud.getTracks(session, playlist_id)\n\n return tracks",
"def get(self, request: Request, track_id: str) -> Response:\n track = Track.objects.filter(id=track_id)\n if not track:\n return Response(\n f\"Track id '{track_id}' not found\", status=status.HTTP_404_NOT_FOUND\n )\n track_serd = TrackSerializer(track[0])\n return Response(track_serd.data, status=status.HTTP_200_OK)",
"def get_playlist_tracks(playlist_id):\n\n results = spotifyObject.playlist_tracks(playlist_id)\n tracks = results['items']\n while results['next']:\n results = spotifyObject.next(results)\n tracks.extend(results['items'])\n return tracks",
"def get_all_tracks():\n query_format = f\"track:\"\n\n search_string_letter_ids = [0]\n\n tracks = {}\n\n total = 0\n\n while search_string_letter_ids is not None:\n search_string = construct_search_string(search_string_letter_ids)\n count = track_count(query_format + search_string)\n print(f\"{search_string} : {count}\")\n if count < 2000:\n for i in range(0, count, 50):\n track_results = sp.search(query_format + search_string, type='track', limit=50, offset=i)\n for t in track_results['tracks']['items']:\n if t['id'] not in tracks:\n total += 1\n tracks[t['id']] = {'name': t['name']}\n\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=True)\n else:\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=False)\n\n print(f\"Tracks Saved In File: {total}\")\n\n file = save_to_json(tracks, f\"tracks.json\")\n return file",
"def get_track_adapter(json):\n return TrackInfo(\n json['name'], # Track name\n json['artists'][0]['name'], # Artist\n json['album']['name'], # Album name\n json['uri'], # Track id\n int(json['duration_ms']) / 1000) # Track length in seconds",
"def get(self, track_id):\n\t\tdb = getattr(g, 'db', None)\n\n\t\twith db as cur:\n\t\t\tqry = \"SELECT title,path FROM music WHERE id=%s;\"\n\t\t\tcur.execute(qry, (track_id,))\n\t\t\tresult = cur.fetchone()\n\t\t\tif result != None:\n\t\t\t\treturn {'status':'TRACK_FOUND', 'title':result[0], 'path':result[1]}\n\n\t\treturn {'status':'TRACK_UNKNOWN'}"
]
| [
"0.7792372",
"0.71938753",
"0.7018876",
"0.6849401",
"0.6807805",
"0.66801125",
"0.65858024",
"0.6479527",
"0.6410977",
"0.64090264",
"0.63816774",
"0.6330502",
"0.6310716",
"0.6288286",
"0.62875885",
"0.62578976",
"0.60792536",
"0.60729116",
"0.6036491",
"0.5983391",
"0.59816515",
"0.592467",
"0.58677554",
"0.5779093",
"0.57753736",
"0.5745322",
"0.5711435",
"0.5686141",
"0.56750697",
"0.5659994"
]
| 0.7500363 | 1 |
Given a list of seed track ids and candidate_tids returns a dataframe summarizing the differences between each candidate track's features and seed features. | def compute_df_features(seed_tids, candidate_tids, relevences):
seed_features = compute_seedset_features(seed_tids)
# drop candidate songs w/0 all features
candidate_df = get_features_dataframe(candidate_tids)
candidate_df['relevence'] = relevences
candidate_df.dropna(axis=0)
df = {}
df['relevence'] = candidate_df['relevence']
df['artist_overlap'] = [
1 if len(np.intersect1d(x, seed_features['artists'])) else 0
for x in candidate_df['artists']
]
df['album_overlap'] = [
1 if x in seed_features['albums'] else 0
for x in candidate_df['album']
]
numeric = [
'popularity', 'danceability', 'energy', 'key', 'loudness',
'mode', 'speechiness', 'acousticness', 'instrumentalness',
'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature'
]
for feat in numeric:
diff = (candidate_df[feat] - seed_features[feat]).abs()
df[f'{feat}_diff'] = diff
df = pd.DataFrame.from_dict(df)
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_seedset_features(tids):\n seed_dataframe = get_features_dataframe(tids)\n summary_feats = {}\n summary_feats['names'] = list(seed_dataframe['name'])\n summary_feats['artists'] = list(\n itertools.chain.from_iterable(seed_dataframe['artists'])\n )\n summary_feats['albums'] = list(seed_dataframe['album'])\n numeric = [\n 'popularity', 'danceability', 'energy', 'key', 'loudness',\n 'mode', 'speechiness', 'acousticness', 'instrumentalness',\n 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature'\n ]\n for feat in numeric:\n summary_feats[feat] = seed_dataframe[feat].dropna().mean()\n return summary_feats",
"def get_features_dataframe(tids):\n\n Data = {}\n for tid in tids:\n Data[tid] = get_song_features(tid)\n return pd.DataFrame.from_dict(Data, orient='index')",
"def read_triplets(seed_candidates):\n if \"pickle\" in seed_candidates:\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n with open(seed_candidates, 'rb') as f:\n data = pickle.load(f)\n new_data = []\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n # idx = int(dd[0][10:])\n # new_data.append((idx, dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n column_names = ['evtid', 'h1', 'h2', 'h3']\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n df_seed_tmp = pd.read_csv(file_name, header=None, names=column_names,)\n new_data.append(df_seed_tmp)\n df_seed = pd.concat(new_data)\n else:\n df_seed = pd.read_csv(seed_candidates, header=None,\n names=column_names)\n return df_seed",
"def generate_features(df, suffix = '_diff_', step=1, relevant_features=[], ignore_columns=[]):\n # cols = self.get_active_columns(df, ignore_columns)\n cols = relevant_features\n deltas = {}\n for c in cols:\n deltas['%s%s'% (c, suffix)] = subtract_from_prev_val(df, c, step=step)\n df_new = pd.DataFrame(deltas)\n return df_new",
"def create_features_from_vids():\n\n dtype = get_dtype()\n feature_extractor = FeatureExtractor()\n feature_extractor.eval()\n feature_extractor.type(dtype)\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n dataset = StrokesDataset('../dataset/my_dataset/patches/labels.csv', '../dataset/my_dataset/patches/',\n transform=transforms.Compose([ToTensor(), normalize]), use_features=False)\n batch_size = 32\n count = 0\n for vid in dataset:\n count += 1\n frames = vid['frames']\n print(len(frames))\n\n features = []\n for batch in frames.split(batch_size):\n batch = batch.type(dtype)\n with torch.no_grad():\n # forward pass\n batch_features = feature_extractor(batch)\n features.append(batch_features.cpu().numpy())\n\n df = pd.DataFrame(np.concatenate(features, axis=0))\n\n outfile_path = os.path.join('../dataset/my_dataset/patches/', os.path.splitext(vid['vid_name'])[0] + '.csv')\n df.to_csv(outfile_path, index=False)\n\n print(count)",
"def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df",
"def get_tracklist_features(tracks):\n\n # first we construct a list of all track ids and tracknames\n track_ids = []\n track_names = []\n for collection_type in tracks:\n tid = collection_type['id']\n if tid:\n track_ids.append(collection_type['id'])\n track_name = f'{collection_type[\"artists\"][0][\"name\"]} - {collection_type[\"name\"]}'\n track_names.append(track_name)\n # we can only load data in batches\n batch_size = 50\n offset = 0\n\n features = []\n\n while offset + batch_size <= len(track_ids):\n # get one batch of tracks per iteration\n new_features = SP.audio_features(track_ids[offset:offset+batch_size])\n\n # we want to add the trackname to the dataframe\n for i, feature in enumerate(new_features):\n feature['name'] = track_names[offset+i]\n features += new_features\n\n offset += batch_size\n\n # get the remaining tracks that couldnt fill a batch\n features += SP.audio_features(track_ids[offset:])\n return pd.DataFrame(features)",
"def _get_audio_features(self, sp, trackids):\n\n cols = ['acousticness', 'danceability', 'duration_ms', 'energy',\n 'instrumentalness', 'key', 'liveness', 'loudness', 'mode',\n 'speechiness', 'tempo', 'time_signature', 'valence', 'id']\n\n total_track = len(trackids)\n features = []\n start = 0\n while len(features) < total_track:\n end = start + 100 if start + 100 < total_track else total_track\n\n features += sp.audio_features(tracks=trackids[start: end])\n start = start + 100\n\n return pd.DataFrame.from_records(features, columns=cols)",
"def get_features_test(tweets):\n feats = get_feature_array(tweets)\n tfidf = vectorizer.transform(tweets).toarray()\n M = np.concatenate([tfidf,feats],axis=1)\n return M",
"def create_feables(matches, fifa_stats, bookkeepers, verbose=True):\n\n if verbose:\n print(\"Generating match features...\")\n start = time()\n\n # Get match features for all matches (apply to each row)\n match_stats = matches.apply(lambda match: get_match_features(match, matches), axis=1)\n\n # Create dummies for league ID feature\n # deleting this as i am only looking at EPL\n # dummies = pd.get_dummies(match_stats['league_id']).rename(columns=lambda x: 'League_' + str(x))\n # match_stats = pd.concat([match_stats, dummies], axis=1)\n match_stats.drop(['league_id'], inplace=True, axis=1)\n\n end = time()\n if verbose:\n print(\"Match features generated in {:.1f} minutes\".format((end - start) / 60))\n\n if verbose:\n print(\"Generating match labels...\")\n start = time()\n\n # Create match labels\n labels = matches.apply(get_match_label, axis=1)\n end = time()\n if verbose:\n print(\"Match labels generated in {:.1f} minutes\".format((end - start) / 60))\n\n # if verbose == True:\n # print(\"Generating bookkeeper data...\")\n # start = time()\n # Get bookkeeper quotas for all matches\n # bk_data = get_bookkeeper_data(matches, bookkeepers, horizontal=True)\n # bk_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']\n # end = time()\n # if verbose == True:\n # print(\"Bookkeeper data generated in {:.1f} minutes\".format((end - start) / 60))\n\n # Merges features and labels into one frame\n features = pd.merge(match_stats, fifa_stats, on='match_api_id', how='left')\n # features = pd.merge(features, bk_data, on='match_api_id', how='left')\n # features = match_stats\n feables = pd.merge(features, labels, on='match_api_id', how='left')\n\n # Drop NA values\n feables.dropna(inplace=True)\n\n # Return preprocessed data\n return feables",
"def add_features(df_in, rolling_win_size,columns_to_treat):\n \n av_cols = [nm+'__av' for nm in columns_to_treat]\n sd_cols = [nm+'__sd' for nm in columns_to_treat]\n min_cols =[nm+'__min' for nm in columns_to_treat]\n max_cols =[nm+ '__max' for nm in columns_to_treat]\n \n df_out = pd.DataFrame()\n \n ws = rolling_win_size\n \n #calculate rolling stats for each engine (engine.id)\n \n for m_id in pd.unique(df_in['id.engine.id']):\n \n # get a subset for each engine sensors\n df_engine = df_in[df_in['id.engine.id'] == m_id]\n df_sub = df_engine[columns_to_treat]\n\n \n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = av_cols\n \n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sd_cols\n\n # get rolling rolling max for the subset\n max = df_sub.rolling(ws, min_periods=1).max()\n max.columns = max_cols\n \n # get the rolling standard deviation for the subset\n min = df_sub.rolling(ws, min_periods=1).min().fillna(0)\n min.columns = min_cols\n \n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd,min,max], axis=1)\n \n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n \n return df_out",
"def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr",
"def generate_testing_matrix(full_df, feat_days):\n pred_ticker = full_df.ticker.unique()[0]\n feature_tickers = [i for i in full_df.ticker.unique() if i != pred_ticker]\n dfml = full_df[full_df.ticker == pred_ticker].drop('ticker', axis=1)\n dfml.rename({'percent_change_feat': f'{pred_ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n for ticker in feature_tickers:\n help_df = full_df[full_df.ticker == ticker][['past_date', 'current_date', 'prediction_date', 'percent_change_feat']]\n help_df.rename({'percent_change_feat': f'{ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml = pd.merge(dfml, help_df,\n left_on=['past_date', 'current_date', 'prediction_date'],\n right_on=['past_date', 'current_date', 'prediction_date'],\n how='left')\n return dfml.drop('percent_change_pred', axis=1)",
"def create_input(ids):\n\n df = ids.copy()\n df = create_dataframes_with_features(df)\n lines = to_vwlines(df)\n\n # release memory\n df = None \n gc.collect()\n return lines",
"def sift_candidates(\n candidates: List[Union[PrecoveryCandidate, FrameCandidate]]\n) -> Tuple[List[PrecoveryCandidate], List[FrameCandidate]]:\n precovery_candidates = []\n frame_candidates = []\n for candidate in candidates:\n if isinstance(candidate, PrecoveryCandidate):\n precovery_candidates.append(candidate)\n elif isinstance(candidate, FrameCandidate):\n frame_candidates.append(candidate)\n else:\n raise TypeError(f\"Unexpected candidate type: {type(candidate)}\")\n\n precovery_candidates = sorted(\n precovery_candidates, key=lambda c: (c.mjd, c.observation_id)\n )\n frame_candidates = sorted(frame_candidates, key=lambda c: c.exposure_mjd_mid)\n\n return precovery_candidates, frame_candidates",
"def generate_training_df(df, id_csv):\n\n train_df = fetch_training_df(df)\n \n for column_name in ['song_id', 'track_id']:\n train_df[column_name] = train_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n \n train_df.drop(['year'], axis=1, inplace=True)\n train_df = merge_id_into_df(train_df, id_csv)\n train_df.drop(['song_id', 'track_id'], axis=1, inplace=True)\n\n return train_df",
"def merge_dataframes(df_metrics, df_tweets):\r\n df_tweets = df_tweets.rename(columns={'id': 'tweet_ID'})\r\n df_tweets[['tweet_ID']] = df_tweets[['tweet_ID']].astype('int64')\r\n df_metrics[['tweet_ID']] = df_metrics[['tweet_ID']].astype(\r\n \"float64\").astype('int64')\r\n ans = df_tweets.join(\r\n df_metrics.set_index('tweet_ID'), on='tweet_ID', how='inner').dropna()\r\n return ans",
"def id_measures(gtDB, trackDB, threshold):\n res_ids = np.unique(trackDB[:, 1])\n gt_ids = np.unique(gtDB[:, 1])\n\n n_ids_res = len(res_ids)\n n_ids_gt = len(gt_ids)\n\n groundtruth = [gtDB[np.where(gtDB[:, 1] == gt_ids[i])[0], :] for i in range(n_ids_gt)]\n prediction = [trackDB[np.where(trackDB[:, 1] == res_ids[i])[0], :] for i in range(n_ids_res)]\n\n cost = np.zeros((n_ids_gt + n_ids_res, n_ids_res + n_ids_gt), dtype=float)\n cost[n_ids_gt:, :n_ids_res] = sys.maxsize # float('inf')\n cost[:n_ids_gt, n_ids_res:] = sys.maxsize # float('inf')\n\n fp = np.zeros(cost.shape)\n fn = np.zeros(cost.shape)\n\n # cost matrix of all trajectory pairs\n cost_block, fp_block, fn_block = cost_between_gt_pred(groundtruth, prediction, threshold)\n\n cost[:n_ids_gt, :n_ids_res] = cost_block\n fp[:n_ids_gt, :n_ids_res] = fp_block\n fn[:n_ids_gt, :n_ids_res] = fn_block\n\n # computed trajectory match no groundtruth trajectory, FP\n for i in range(n_ids_res):\n cost[i + n_ids_gt, i] = prediction[i].shape[0]\n fp[i + n_ids_gt, i] = prediction[i].shape[0]\n\n # groundtruth trajectory match no computed trajectory, FN\n for i in range(n_ids_gt):\n cost[i, i + n_ids_res] = groundtruth[i].shape[0]\n fn[i, i + n_ids_res] = groundtruth[i].shape[0]\n try:\n matched_indices = linear_assignment(cost)\n except:\n import pdb\n pdb.set_trace()\n\n nbox_gt = sum([groundtruth[i].shape[0] for i in range(n_ids_gt)])\n nbox_st = sum([prediction[i].shape[0] for i in range(n_ids_res)])\n\n IDFP = 0\n IDFN = 0\n for matched in zip(*matched_indices):\n IDFP += fp[matched[0], matched[1]]\n IDFN += fn[matched[0], matched[1]]\n\n IDTP = nbox_gt - IDFN\n assert IDTP == nbox_st - IDFP\n\n IDP = IDTP / (IDTP + IDFP) * 100 # IDP = IDTP / (IDTP + IDFP)\n IDR = IDTP / (IDTP + IDFN) * 100 # IDR = IDTP / (IDTP + IDFN)\n # IDF1 = 2 * IDTP / (2 * IDTP + IDFP + IDFN)\n IDF1 = 2 * IDTP / (nbox_gt + nbox_st) * 100\n\n measures = edict()\n measures.IDP = IDP\n measures.IDR = IDR\n measures.IDF1 = IDF1\n measures.IDTP = IDTP\n measures.IDFP = IDFP\n measures.IDFN = IDFN\n measures.nbox_gt = nbox_gt\n measures.nbox_st = nbox_st\n\n return measures",
"def get_other_tracks(input_df: pd.DataFrame, track_id_list:List[int]) -> List[Tuple[np.ndarray, int]]:\n split_time_stamp = np.unique(input_df[\"TIMESTAMP\"].values)[19]\n others_list = []\n for track_id in track_id_list:\n track = input_df[(input_df[\"TRACK_ID\"] == track_id) & (input_df[\"OBJECT_TYPE\"] != \"AGENT\")][[\"TIMESTAMP\", \"X\", \"Y\"]]\n if len(track) > 0:\n obs = track[track[\"TIMESTAMP\"] <= split_time_stamp][[\"X\",\"Y\"]].to_numpy()\n target = track[track[\"TIMESTAMP\"] > split_time_stamp][[\"X\",\"Y\"]].to_numpy()\n if len(obs) > 0:\n others_list.append((np.concatenate((obs, target)), len(obs)))\n # agent_traj = np.column_stack((agent_x, agent_y))\n return others_list",
"def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df",
"def add_features(df_in, rolling_win_size=15):\n cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']\n other_cols = []\n for i in df_in.columns:\n if i not in cols:\n other_cols.append(i)\n all_cols = cols + other_cols\n\n df_in = df_in[all_cols]\n\n sensor_cols = []\n for i in df_in.columns[5:]:\n sensor_cols.append(i)\n\n sensor_av_cols = [nm+'_av' for nm in sensor_cols]\n sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]\n\n df_out = pd.DataFrame()\n\n ws = rolling_win_size\n\n #calculate rolling stats for each engine id\n\n for m_id in pd.unique(df_in.Turbine_ID):\n\n # get a subset for each engine sensors\n df_engine = df_in[df_in['Turbine_ID'] == m_id]\n df_sub = df_engine[sensor_cols]\n\n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = sensor_av_cols\n\n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sensor_sd_cols\n\n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd], axis=1)\n\n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )\n return df_out",
"def split(interactions: pd.DataFrame, p: float = 0.25) -> Tuple[pd.DataFrame, pd.DataFrame]:\n test = interactions.groupby('track_id').sample(frac=p)\n rows = set((a, b) for _, (a, b, _) in test.iterrows())\n train_mask = [i for i, (_, (a, b, _)) in tqdm(enumerate(interactions.iterrows()), desc=\"Constructing train-set\",\n total=len(interactions)) if (a, b) not in rows]\n train = interactions.iloc[train_mask]\n\n return train, test",
"def get_feature_vector(user_id: str, session: str) -> DataFrame:\n\n #Find the time windows during which the reader is doing the desired task\n activity_data = read_file(user_id, session, 'Activity.csv')\n task_number = mode(activity_data['TaskID'])\n task_name = task_names[(task_number - 1) % len(task_names)]\n tap_windows = get_tap_events(user_id, session)\n data = get_user_session_data(user_id, session)\n add_magnitude_columns(data)\n add_columns_for_taps(data, tap_windows)\n mark_tap_start_and_end(data, delta_in_ms = 200)\n\n column_names = get_feature_names()\n\n #A feature vector for each tap, to be filled in subsequently:\n featureVectors = pd.DataFrame(columns = column_names)\n\n for tap_file in tap_file_names:\n tap_feature = tap_file_to_feature_name[tap_file]\n print(tap_feature)\n window_start_indices = data[data[tap_feature] == 4].index\n window_end_indices = data[data[tap_feature] == 5].index\n if len(window_start_indices) == 0:\n continue\n \n for i in range(len(window_start_indices)):\n start, end = window_start_indices[i], window_end_indices[i]\n window_of_interest = data[start : end + 1]\n features = feature_list(user_id, session, tap_feature, task_name, window_of_interest)\n if features != None:\n featureVectors.loc[featureVectors.shape[0]] = features\n \n return featureVectors",
"def sample_video_df(video_data_df, video_keys):\n feature_columns = list(video_data_df.columns)[:-2]\n output_features =[]\n missing_keys = 0\n missing_key_vals = []\n for user, trial in video_keys:\n user_sample = video_data_df[\"user\"] == user\n trial_sample = video_data_df[\"trial\"] == trial\n one_data_point_df = video_data_df[(user_sample) & (trial_sample)]\n if len(one_data_point_df) > 0 :\n # print()\n features = one_data_point_df[feature_columns]\n one_data_np = features.values.astype(np.float32)\n else:\n #TODO: infer the shape from inputs\n missing_keys+=1\n missing_key_vals.append((user, trial))\n one_data_np = np.zeros((60,714)).astype(np.float32)\n output_features.append(one_data_np)\n if missing_keys > 0:\n # print(\"pct keys missing = \", missing_keys, len(video_keys), missing_key_vals)\n pass\n else:\n pass\n # print(\"NO KEYS MISSING\")\n return np.array(output_features)",
"def create_pairs_list(all_samples):\n dfs = []\n # Find match normals for tumor samples only\n tumor_samples = all_samples[all_samples.sample_type==\"Tumor\"]\n i = 0\n for index, row in tumor_samples.iterrows():\n # Find all samples from same individual (same individual_id, different sample_id)\n patient_samples = all_samples[ (all_samples['participant_id'] == row['participant_id']) \\\n & (all_samples['entity:sample_id'] != row['entity:sample_id']) ]\n\n # NOTE: If more than one match tumor tissue or match normal found, select first one found.\n # The match normal is used to compute allelic fractions in Mutect2, so for now we ignore the conditions it workspaces grown in.\n\n ######## Match normal: Add match normal\n match_normal = patient_samples[ patient_samples['sample_type'] == \"Normal\"]\n # > No match normal found\n if match_normal.empty: \n control_sample_id = \"NA\"\n control_sample_tsca_id = \"NA\"\n # > Match normal found\n elif match_normal.shape[0] > 0:\n match_normal = match_normal.iloc[0]\n control_sample_id = match_normal['entity:sample_id']\n control_sample_tsca_id = match_normal['tsca_id']\n \n # Create DF with Tumor/Normal pair set\n pair_id = \"%s_%s_TN\" % (row['entity:sample_id'], control_sample_id)\n df_dict = {'entity:pair_id': pair_id, 'case_sample_id': row['entity:sample_id'], \\\n 'control_sample_id': control_sample_id, 'participant_id': row['participant_id'], 'match_type': 'tumor_normal', \\\n 'case_sample_tsca_id': row['tsca_id'], 'control_sample_tsca_id': control_sample_tsca_id}\n dfs.append(pd.DataFrame(df_dict, index=[i], columns=df_dict.keys()))\n i+=1\n \n ######## Tumor tissue: Add primary tumor tissue\n match_primary_tumor = patient_samples[ ( patient_samples['external_id_validation'].str.contains('primary|prim|tissue|tiss|Primary|Tissue') ) & \\\n \t\t\t\t\t\t\t\t\t\t(patient_samples['sample_type'] == \"Tumor\")]\n # > No primary tumor tissue found\n if match_primary_tumor.empty:\n control_sample_id = \"NA\"\n control_sample_tsca_id = \"NA\"\n # > Sample itself is a primary tumor tissue\n elif any(substring in row['external_id_validation'] for substring in ['primary', 'prim', 'tissue', 'tiss', 'Primary', 'Tissue']):\n control_sample_id = \"NA\"\n control_sample_tsca_id = \"NA\"\n # > Tumor tissue found\n elif match_primary_tumor.shape[0] > 0:\n match_primary_tumor = match_primary_tumor.iloc[0]\n control_sample_id = match_primary_tumor['entity:sample_id']\n control_sample_tsca_id = match_primary_tumor['tsca_id']\n \n # Create DF with Tumor/Primary pair set\n pair_id = \"%s_%s_TP\" % (row['entity:sample_id'], control_sample_id)\n df_dict = {'entity:pair_id': pair_id, 'case_sample_id': row['entity:sample_id'], \\\n 'control_sample_id': control_sample_id, 'participant_id': row['participant_id'], 'match_type': 'tumor_primary', \\\n 'case_sample_tsca_id': row['tsca_id'], 'control_sample_tsca_id': control_sample_tsca_id}\n dfs.append(pd.DataFrame(df_dict, index=[i], columns=df_dict.keys()))\n i+=1\n \n return pd.concat(dfs, axis=0)",
"def add_features(df_in, rolling_win_size=15):\n\n sensor_cols = []\n index = df_in.columns.get_loc('TTF')\n for i in df_in.columns[2:index]:\n sensor_cols.append(i)\n\n sensor_av_cols = [nm+'_av' for nm in sensor_cols]\n sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]\n\n df_out = pd.DataFrame()\n\n ws = rolling_win_size\n\n #calculate rolling stats for each engine id\n\n for m_id in pd.unique(df_in.Turbine_ID):\n\n # get a subset for each engine sensors\n df_engine = df_in[df_in['Turbine_ID'] == m_id]\n df_sub = df_engine[sensor_cols]\n\n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = sensor_av_cols\n\n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sensor_sd_cols\n\n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd], axis=1)\n\n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )\n return df_out",
"def _convert_tracks_to_tabular_format(tracks: List[Track]) -> pd.DataFrame:\n track_dfs: List[pd.DataFrame] = []\n\n for track in tracks:\n track_df = pd.DataFrame()\n\n observed_states: List[bool] = []\n timesteps: List[int] = []\n positions_x: List[float] = []\n positions_y: List[float] = []\n headings: List[float] = []\n velocities_x: List[float] = []\n velocities_y: List[float] = []\n\n for object_state in track.object_states:\n observed_states.append(object_state.observed)\n timesteps.append(object_state.timestep)\n positions_x.append(object_state.position[0])\n positions_y.append(object_state.position[1])\n headings.append(object_state.heading)\n velocities_x.append(object_state.velocity[0])\n velocities_y.append(object_state.velocity[1])\n\n track_df[\"observed\"] = observed_states\n track_df[\"track_id\"] = track.track_id\n track_df[\"object_type\"] = track.object_type.value\n track_df[\"object_category\"] = track.category.value\n track_df[\"timestep\"] = timesteps\n track_df[\"position_x\"] = positions_x\n track_df[\"position_y\"] = positions_y\n track_df[\"heading\"] = headings\n track_df[\"velocity_x\"] = velocities_x\n track_df[\"velocity_y\"] = velocities_y\n\n track_dfs.append(track_df)\n\n return pd.concat(track_dfs, ignore_index=True)",
"def create_data_frame(td_tags):\n col_names = ('County Name', 'DJT Votes', 'DJT pct', 'HRC Votes', 'HRC pct',\n 'GEJ Votes', 'GEJ pct', 'JES Votes', 'JES pct', 'WRT Votes',\n 'WRT pct', 'Total Votes', 'Total Turnout')\n\n n_cols = len(col_names)\n n_tags = len(td_tags)\n n_rows = n_tags // n_cols\n\n def process_row(tags):\n return dict(zip(col_names, [tag.text.strip() for tag in tags]))\n\n data = [process_row(td_tags[i*n_cols:(i+1)*n_cols]) for i in range(n_rows)]\n df = pd.DataFrame(data)\n pct_cols = ['HRC pct', 'DJT pct', 'GEJ pct', 'JES pct']\n df[pct_cols] = df[pct_cols].applymap(lambda pct: float(pct[:-1]))\n\n return df",
"def get_train_and_test_sets(self,\n all_trades_df: pd.DataFrame,\n train_start: pd.Timestamp,\n train_end: pd.Timestamp,\n test_end: pd.Timestamp) -> pd.DataFrame:\n \n # training set\n feature_and_target_cutoff = train_end - pd.Timedelta('120D') # last 4 months of training set\n trades_features = all_trades_df[(all_trades_df['trade_date_dt'] >= train_start) & (all_trades_df['trade_date_dt'] < feature_and_target_cutoff)]\n trades_target = all_trades_df[(all_trades_df['trade_date_dt'] >= feature_and_target_cutoff) & (all_trades_df['trade_date_dt'] < train_end)]\n training_set = self.get_features_and_target(trades_features, trades_target)\n training_set['train_or_test'] = 'train'\n\n # test set\n trades_features = all_trades_df[(all_trades_df['trade_date_dt'] >= train_start) & (all_trades_df['trade_date_dt'] < train_end)]\n trades_target = all_trades_df[(all_trades_df['trade_date_dt'] >= train_end) & (all_trades_df['trade_date_dt'] < test_end)]\n test_set = self.get_features_and_target(trades_features, trades_target)\n test_set['train_or_test'] = 'test'\n \n customer_df = pd.concat([training_set, test_set], sort=False)\n customer_df.fillna(False, inplace=True)\n customer_df.index.name = 'sf_account_id'\n \n return customer_df",
"def features_past_generation(features_creation_function,\n days,\n feature_names_prefix,\n data,\n indices):\n matches_outcomes=[]\n for i,match_indice in enumerate(indices):\n match=data.iloc[match_indice,:]\n past_matches=data[(data.Date<match.Date)&(data.Date>=match.Date-datetime.timedelta(days=days))]\n match_features_outcome_1=features_creation_function(1,match,past_matches)\n match_features_outcome_2=features_creation_function(2,match,past_matches)\n matches_outcomes.append(match_features_outcome_1)\n matches_outcomes.append(match_features_outcome_2)\n if i%100==0:\n print(str(i)+\"/\"+str(len(indices))+\" matches treated. \"+ features_creation_function.__name__ + str(days))\n train=pd.DataFrame(matches_outcomes)\n train.columns=[feature_names_prefix + \"_\" + str(days) +\"_\" +str(i) for i in range(len(train.columns))]\n \n \n \n return train"
]
| [
"0.6472781",
"0.6158789",
"0.5906265",
"0.54349333",
"0.54249525",
"0.51813656",
"0.51468873",
"0.51135445",
"0.49663937",
"0.4961182",
"0.49079064",
"0.49037227",
"0.4902718",
"0.4884296",
"0.48739198",
"0.48463288",
"0.4835363",
"0.48272344",
"0.482661",
"0.4811474",
"0.4799323",
"0.47973368",
"0.47760317",
"0.47721964",
"0.4756129",
"0.4744384",
"0.47267962",
"0.47176242",
"0.46816868",
"0.46731985"
]
| 0.74968517 | 0 |
Given an input playlist and factors computed from stage 1, returns a df for stage 2 | def compute_df(playlist, song_factors, playlist_factors=None, method='ensemble'):
playlist = playlist.str.replace('spotify:track:', '')
playlist_set = set(playlist)
seed_ids = []
while len(seed_ids) < 2:
rand = list(playlist.sample(n=1))[0]
if rand in tid_to_idx and rand not in seed_ids:
seed_ids.append(rand)
playlist_set.remove(seed_ids[0])
playlist_set.remove(seed_ids[1])
if method == 'song':
wrmf_output = wrmf_helpers.get_top_similar_from_tracks(
song_factors,
seed_ids,
n_similar_songs=10000,
verbose=False
)
elif method == 'playlist':
wrmf_output = wrmf_helpers.get_top_similar_from_playlists(
song_factors,
playlist_factors,
seed_ids,
n_similar_songs=10000,
n_similar_playlists=100
)
elif method == 'ensemble':
wrmf_output = wrmf_helpers.get_top_similar_from_ensemble(
song_factors,
playlist_factors,
seed_ids,
n_similar_songs=10000,
n_similar_playlists=100
)
else:
raise ValueError("invalid method")
wrmf_output_set = set(wrmf_output)
true_matches = playlist_set.intersection(wrmf_output_set)
false_matches = wrmf_output_set.symmetric_difference(true_matches)
X_train_ids = []
Y_train = []
for _ in range(min(len(true_matches), 10)):
X_train_ids.append(true_matches.pop())
Y_train.append(1)
X_train_ids.append(false_matches.pop())
Y_train.append(0)
return compute_df_features(seed_ids, X_train_ids, Y_train) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"def plays(df):\n tp = (\n df.query('play_type in @OFFENSE_PLAY_TYPES')\n .pivot_table(index=['game_id', 'posteam'], \n columns=['play_type'], \n values=['play_id'], \n aggfunc='count',\n fill_value=0)\n .pipe(lambda x: x.set_axis([f'{b}_plays' for a, b in x.columns], axis=1, inplace=False))\n .reset_index()\n ) \n tp['tot_plays'] = tp.loc[:, [c for c in tp.columns if '_plays' in c]].sum(axis=1)\n tp['run_pct'] = tp['run_plays'] / (tp['run_plays'] + tp['pass_plays'])\n tp['pass_pct'] = tp['pass_plays'] / (tp['run_plays'] + tp['pass_plays'])\n return tp.join(time_of_possession(df), on=['game_id', 'posteam'], how='left')",
"def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr",
"def get_playlist_feats(playlist_id):\r\n sourcePlaylistID = playlist_id\r\n sourcePlaylist = sp.user_playlist(username, sourcePlaylistID);\r\n tracks = sourcePlaylist[\"tracks\"];\r\n songs = tracks[\"items\"];\r\n\r\n track_ids = []\r\n track_names = []\r\n track_artists = []\r\n\r\n\r\n for i in range(0, len(songs)):\r\n if songs[i]['track']['id'] != None: # Removes the local tracks in your playlist if there is any\r\n track_ids.append(songs[i]['track']['id'])\r\n track_names.append(songs[i]['track']['name'])\r\n track_artists.append(songs[i]['track']['artists'])\r\n\r\n\r\n features = []\r\n for i in range(0,len(track_ids)):\r\n audio_features = sp.audio_features(track_ids[i])[0]\r\n track_popularity = {'popularity': sp.track(track_ids[i])['popularity']}\r\n genre = {'genres': sp.artist(track_artists[i][0]['uri'])['genres']}\r\n audio_features = dict(audio_features, **track_popularity, **genre)\r\n features.append(audio_features)\r\n\r\n\r\n playlist_df = pd.DataFrame(features, index = track_names)\r\n return playlist_df",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def games(self, competition_id: int, season_id: int) -> DataFrame[Any]:",
"def competitions(self) -> DataFrame[Any]:",
"def teams(self, game_id: int) -> DataFrame[Any]:",
"def create_progression_tables(self, feat_subset, time_col, patient_col, method, bl_index, skip_no_bl=False):\n\n prog_dfs = []\n\n for df in self:\n patients = df[patient_col]\n\n # create dataframe copy to keep from alternating original dataframe\n prog_df = df[feat_subset][::]\n\n for feat in feat_subset:\n\n for patient in patients.unique():\n # collect values for sinlge patient\n pat_inds = df[df[patient_col] == patient].index\n # create value series storing the values of a patient\n values = df.loc[pat_inds, feat]\n values.index = df.loc[pat_inds, time_col]\n\n # skip patient if no baseline value is present\n if skip_no_bl:\n if bl_index not in values.index:\n prog_df.loc[pat_inds, feat] = np.nan\n continue\n\n # calculate scores for patient and reindex to merge back into dataframe copy\n scores = calc_prog_scores(values, bl_index, method)\n\n # if only NaN has been returned as score set patients progression to nan at all visits\n if type(scores) != pd.Series:\n prog_df.loc[pat_inds, feat] = scores\n\n else: # input normal progression scores for visits\n scores.index = pat_inds\n prog_df.loc[pat_inds, feat] = scores\n\n # get columns from original dataframe to concatinate them to resulting DF\n concat_columns = df[[patient_col, time_col]]\n prog_df = pd.concat([concat_columns, prog_df], join=\"outer\", axis=1)\n\n # add prog_df to list\n prog_dfs.append(prog_df)\n\n # keep track of which categorical features are still in the collection\n categorical_feats = list(set(self.categorical_feats).intersection(feat_subset))\n\n return DataCollection(prog_dfs, self.df_names, categorical_feats)",
"def features(upstream, product):\n data = pd.read_parquet(str(upstream[\"get\"]))\n ft = data[\"1\"] * data[\"2\"]\n df = pd.DataFrame({\"feature\": ft, \"another\": ft**2})\n df.to_parquet(str(product))",
"def startrek_data():\n pdf = pd.DataFrame({\n 'title': ['Picard', 'TNG', 'Voyager', 'Enterprise', 'Deep Space Nine', 'Discovery'],\n 'year': [2020, 1987, 1995, 2001, 1993, 2017],\n 'seasons': [1, 7, 7, 4, 7, 2],\n 'watched': [False, True, True, True, False, True],\n 'rating': [9.3, 9.9, 7.4, 6.8, 8.9, 9.0],\n 'aired': ['20200203', '19870612', '19950101', '20011231', '19930102', '20170524']\n })\n pdf['title'] = pdf['title'].astype('string')\n pdf['seasons'] = pdf['seasons'].astype('category')\n pdf['aired'] = pd.to_datetime(pdf['aired'])\n return pdf",
"def extract_pause_features():\n input_files = sys.argv[1]\n pause_statistics = pd.DataFrame(columns=PAUSE_COLUMNS)\n for filename in os.listdir(input_files):\n if filename != '.DS_Store':\n file_pauses = pd.read_csv(input_files+filename)\n print(filename)\n\n # task duration\n cookie_duration = file_pauses[file_pauses[TASK] == COOKIE_THEFT_TASK][AUDIO_FILE_LENGTH].iloc[0]\n reading_duration = file_pauses[file_pauses[TASK] == READING_TASK][AUDIO_FILE_LENGTH].iloc[0]\n memory_duration = file_pauses[file_pauses[TASK] == MEMORY_TASK][AUDIO_FILE_LENGTH].iloc[0]\n\n # length of pauses\n cookie_pause_lengths = file_pauses[file_pauses[TASK] == COOKIE_THEFT_TASK][PAUSE_LENGTH]\n reading_pause_lengths = file_pauses[file_pauses[TASK] == READING_TASK][PAUSE_LENGTH]\n memory_pause_lengths = file_pauses[file_pauses[TASK] == MEMORY_TASK][PAUSE_LENGTH]\n\n # number of pauses\n cookie_pause_number = len(file_pauses[file_pauses[TASK] == COOKIE_THEFT_TASK].index)\n reading_pause_number = len(file_pauses[file_pauses[TASK] == READING_TASK].index)\n memory_pause_number = len(file_pauses[file_pauses[TASK] == MEMORY_TASK].index)\n\n if cookie_duration - cookie_pause_lengths.sum() < 0:\n print(\"NEGATIVE COOKIE TIME \", filename)\n print(cookie_duration, cookie_pause_lengths.sum())\n\n if reading_duration - reading_pause_lengths.sum() < 0:\n print(\"NEGATIVE READING TIME \", filename)\n print(reading_duration, reading_pause_lengths.sum())\n if memory_duration - memory_pause_lengths.sum() < 0:\n print(\"NEGATIVE MEMORY TIME \", filename)\n print(memory_duration, memory_pause_lengths.sum())\n\n pause_statistics = pause_statistics.append({\n TRANSCRIPT_ID: filename[:-4],\n COOKIE_NUMBER_OF_PAUSES: cookie_pause_number,\n COOKIE_MAXIMUM_PAUSE_DURATION: cookie_pause_lengths.max(),\n COOKIE_DURATION: cookie_duration,\n COOKIE_PHONATION_TIME: cookie_duration - cookie_pause_lengths.sum(),\n COOKIE_PROPORTION_OF_TIME_SPENT_SPEAKING: (cookie_duration -\n cookie_pause_lengths.sum()) / cookie_duration,\n COOKIE_PAUSE_RATE: cookie_pause_number/cookie_duration,\n COOKIE_MEAN_PAUSE_LENGTH: cookie_pause_lengths.mean(),\n COOKIE_STD_PAUSE_LENGTH: cookie_pause_lengths.std(),\n\n READING_NUMBER_OF_PAUSES: reading_pause_number,\n READING_MAXIMUM_PAUSE_DURATION: reading_pause_lengths.max(),\n READING_DURATION: reading_duration,\n READING_PHONATION_TIME: reading_duration - reading_pause_lengths.sum(),\n READING_PROPORTION_OF_TIME_SPENT_SPEAKING: (reading_duration -\n reading_pause_lengths.sum()) / reading_duration,\n READING_PAUSE_RATE: reading_pause_number / reading_duration,\n READING_MEAN_PAUSE_LENGTH: reading_pause_lengths.mean(),\n READING_STD_PAUSE_LENGTH: reading_pause_lengths.std(),\n\n MEMORY_NUMBER_OF_PAUSES: memory_pause_number,\n MEMORY_MAXIMUM_PAUSE_DURATION: memory_pause_lengths.max(),\n MEMORY_DURATION: memory_duration,\n MEMORY_PHONATION_TIME: memory_duration - memory_pause_lengths.sum(),\n MEMORY_PROPORTION_OF_TIME_SPENT_SPEAKING: (memory_duration -\n memory_pause_lengths.sum()) / memory_duration,\n MEMORY_PAUSE_RATE: memory_pause_number / memory_duration,\n MEMORY_MEAN_PAUSE_LENGTH: memory_pause_lengths.mean(),\n MEMORY_STD_PAUSE_LENGTH: memory_pause_lengths.std()\n }, ignore_index=True)\n pause_statistics = pause_statistics.set_index(TRANSCRIPT_ID)\n pause_statistics.to_csv('jan27_extracted_pauses.csv', sep=',', header=True)",
"def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all",
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_wf_df(dataset: Dataset) -> pd.DataFrame:\n wf_dict = generate_wf(dataset)\n return pd.DataFrame(list(wf_dict.items()), columns=[\"word\", \"freq\"])",
"def makeVideo():\n weekNumber = 11\n for _ in range(10):\n df = loadDbIntoDf2('trending')\n df_copy = df.copy()\n df_shorter = selectTop(df_copy,'week',weekNumber , 'trending')\n vid_dl = download(df_shorter,weekNumber)\n merge(vid_dl,weekNumber)\n weekNumber = weekNumber + 1",
"def get_dfs(dataset):\n test_slice = False\n if 'test_' in dataset:\n dataset = dataset.replace('test_', '')\n test_slice = True\n ratings_path = BUILTIN_DATASETS[dataset].path\n print('Path to ratings file is: {}'.format(ratings_path))\n if not os.path.isfile(ratings_path):\n download_builtin_dataset(dataset)\n if dataset == 'ml-100k':\n users_path = ratings_path.replace('.data', '.user')\n movies_path = ratings_path.replace('.data', '.item')\n dfs = movielens_to_df(ratings_path, users_path, movies_path)\n elif dataset == 'ml-1m':\n users_path = ratings_path.replace('ratings.', 'users.')\n movies_path = ratings_path.replace('ratings.', 'movies.')\n dfs = movielens_1m_to_df(ratings_path, users_path, movies_path)\n elif dataset == 'ml-20m':\n # there is no user path\n movies_path = ratings_path.replace('ratings.', 'movies.') # .../movies.csv\n dfs = movielens_20m_to_df(ratings_path, movies_path)\n else:\n raise Exception(\"Unknown dataset: \" + dataset)\n \n if test_slice:\n dfs['ratings'] = dfs['ratings'].sample(100, random_state=0)\n # print('Got dfs.\\nDataframe sizes are\\nratings:{}\\nusers:{}\\nmovies:{}'.format(\n # dfs['ratings'].memory_usage(), dfs['users'].memory_usage(), dfs['movies'].memory_usage()\n # ))\n return dfs",
"def input_as_dataframe(self, channel='training'):\n\n data_directories = {\n 'training',\n 'validation',\n 'testing'\n }\n\n if channel in data_directories:\n\n csv_files = glob.glob(os.path.join(f'/opt/ml/input/data/{channel}/*.csv'))\n print(f'Files in {channel} directory: {csv_files}')\n # loop over the list of csv files\n fileBytes = []\n for f in csv_files:\n \n # read the csv file\n df = pd.read_csv(f)\n fileBytes.append(df)\n frame = pd.concat(fileBytes, axis=0, ignore_index=True) \n return frame \n else:\n raise ValueError('Incorrect data channel type. Options are training, validation, and testing.')\n return null",
"def test_6():\n table = pandas.read_csv('data/matches.csv')\n query_result = show.show(table,\n dimensions=['toss_winner'],\n slices=[('season', Filters.EQUAL_TO, 2017)],)\n print(query_result)\n expected_result = \"\"\" toss_winner\n0 Royal Challengers Bangalore\n1 Rising Pune Supergiant\n2 Kolkata Knight Riders\n3 Kings XI Punjab\n4 Royal Challengers Bangalore\n5 Sunrisers Hyderabad\n6 Mumbai Indians\n7 Royal Challengers Bangalore\n8 Rising Pune Supergiant\n9 Mumbai Indians\n10 Kolkata Knight Riders\n11 Mumbai Indians\n12 Gujarat Lions\n13 Sunrisers Hyderabad\n14 Delhi Daredevils\n15 Mumbai Indians\n16 Royal Challengers Bangalore\n17 Delhi Daredevils\n18 Kings XI Punjab\n19 Gujarat Lions\n20 Sunrisers Hyderabad\n21 Mumbai Indians\n22 Gujarat Lions\n23 Delhi Daredevils\n24 Rising Pune Supergiant\n25 Gujarat Lions\n26 Royal Challengers Bangalore\n27 Mumbai Indians\n28 Kolkata Knight Riders\n29 Gujarat Lions\n30 Kolkata Knight Riders\n31 Kings XI Punjab\n32 Royal Challengers Bangalore\n33 Gujarat Lions\n34 Kings XI Punjab\n35 Kolkata Knight Riders\n36 Royal Challengers Bangalore\n37 Rising Pune Supergiant\n38 Delhi Daredevils\n39 Rising Pune Supergiant\n40 Delhi Daredevils\n41 Royal Challengers Bangalore\n42 Sunrisers Hyderabad\n43 Delhi Daredevils\n44 Kolkata Knight Riders\n45 Gujarat Lions\n46 Mumbai Indians\n47 Kolkata Knight Riders\n48 Delhi Daredevils\n49 Mumbai Indians\n50 Delhi Daredevils\n51 Sunrisers Hyderabad\n52 Kolkata Knight Riders\n53 Rising Pune Supergiant\n54 Royal Challengers Bangalore\n55 Mumbai Indians\n56 Kolkata Knight Riders\n57 Mumbai Indians\n58 Mumbai Indians\"\"\"\n\n\n expected_suggestions = \"[]\"\n\n assert(expected_result == query_result[0].to_string())\n assert(expected_suggestions == str(query_result[1]))",
"def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file",
"def find_worth_playlist(self, part_worths, song_list):\r\n history_df = self.extracter.make_history(song_list)\r\n u_df = self.analyser.process_song_df(part_worths.values[0], history_df)\r\n return u_df",
"def split_data(df):\n\n df['ranked_latest'] = df.groupby(['userId'])['timestamp'].rank(method='first', ascending=False)\n train_df = df[df['ranked_latest'] != 1]\n test_df = df[df['ranked_latest'] == 1]\n\n train_df = train_df[['userId', 'movieId', 'rating']]\n test_df = test_df[['userId', 'movieId', 'rating']]\n\n return train_df, test_df",
"def run_pipeline() -> pd.DataFrame:\n\n print('Loading data...')\n data = load_data()\n print('Stage one processing...')\n text = data.text\n text_ = stage_one_preprocessing(text)\n data_ = data.copy()\n data_.text = text_\n #print('Splitting by sentences...')\n #data_ = split_by_sentences(data_)\n print('Stage two processing...')\n text_ = stage_two_preprocessing(data_.text)\n print('Stage three processing...')\n text_ = stage_three_preprocessing(text_)\n data_.text = text_\n print('Saving file...')\n data_.to_csv(r'./data/stage_three_text.csv')\n return data_",
"def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df",
"def separate_file(self):\n df = pd.read_csv(\"nfl_drafts.csv\", names = ['Pick', 'Team', 'Player_name', 'POS', \n 'Age', 'Last_played', 'AP1', 'PB', 'ST', 'CarAV', 'DrAV', 'G_perS', 'PaCmp', 'PaAtt', \n 'PaYds', 'PaTD', 'Int', 'Att', 'Yds', 'RuTD', 'Rec', 'ReYds', 'ReTD', 'Solo', 'DeInt', \n 'Sk', 'Coll/Univ', 'Stat'], error_bad_lines = False)\n return df",
"def create_panda(read):\n k_values = []\n for i in range(1,len(read)+1):\n k_values.append(i)\n observed_kmers = []\n for i in k_values:\n observed_kmers.append((count_kmers_observed(read, i)))\n possible_kmers = []\n for i in k_values:\n possible_kmers.append(count_kmers_possible(read, i))\n df = pd.DataFrame(list(zip(k_values, observed_kmers, possible_kmers)), columns = ['k','observed kmers','possible kmers'])\n df.at['Total', 'observed kmers'] = df['observed kmers'].sum()\n df.at['Total', 'possible kmers'] = df['possible kmers'].sum()\n return(df)",
"def interpret_results(rules):\n df_res = rules.sort_values(by=['lift'], ascending=False)\n # df_res.head()\n return df_res",
"def construct_game_dataframe(game):\n\n # points as index\n # each stat as a column\n # columns: passes, possessions, turnovers, blocks, starting_fence, we_scored\n # from read_frame: starting_fence, ourscore_EOP, theirscore_EOP\n # to be calculated: passes, possessions, turnovers, blocks, we_scored\n\n from .models import Point, Possession\n from django_pandas.io import read_frame\n\n logger = logging.getLogger(__name__)\n\n game_points = game.points.all().order_by('point_ID')\n\n # generate initial dataframe with some columns\n df = read_frame(game_points,\n fieldnames=['startingfence', 'ourscore_EOP', 'theirscore_EOP'],\n index_col='point_ID')\n\n # assign(colname=data) ; data must be a series or series-like object\n\n # bool - did we score this point\n we_scored = pd.Series([bool_we_scored(point) for point in game_points],\n index=df.index)\n\n # goals we scored\n goals = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=GOALS).count() for point in game_points],\n index=df.index)\n\n # goals they scored\n opp_goals = pd.Series([get_events_by_point(point, opposition_events=2).filter(action__in=GOALS).count() for point in game_points],\n index=df.index)\n\n # callahans we threw\n callahans_thrown = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=CALLAHANS).count() for point in game_points],\n index=df.index)\n\n # callahans we caught\n opp_callahans_thrown = pd.Series([get_events_by_point(point, opposition_events=2).filter(action__in=CALLAHANS).count() for point in game_points],\n index=df.index)\n\n # number of passes we made\n passes = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=PASSES).count() for point in game_points],\n index=df.index)\n\n # number of TOTAL possessions\n total_possessions = pd.Series([point.possessions.no_cessation().all().count() for point in game_points],\n index=df.index)\n\n # number of turnovers we had\n turnovers = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=TURNOVERS).count() for point in game_points],\n index=df.index)\n\n # blocks we had\n blocks = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=BLOCKS).count() for point in game_points],\n index=df.index)\n\n # turnovers opponent had\n opp_turns = pd.Series([get_events_by_point(point, opposition_events=2).filter(action__in=TURNOVERS).count() for point in game_points],\n index=df.index)\n\n # our possessions end on:\n # our goals + their blocks + our turnovers + their callahans scored (ours thrown)\n # in our stats, their blocks are not recorded only our turnovers\n possessions = goals + turnovers + callahans_thrown\n\n # their possessions end on:\n # their goals + our blocks + their turnovers + our callahans scored\n opp_possessions = opp_goals + blocks + opp_turns + opp_callahans_thrown\n\n # check our possession calculations\n pos_testframe = pd.concat([total_possessions - (possessions + opp_possessions)], axis=1)\n zeroframe = pd.DataFrame(0, index=pos_testframe.index, columns=pos_testframe.columns)\n if not zeroframe.equals(pos_testframe):\n logger.critical('error in possession calculation, below should be all zeros')\n logger.critical(pos_testframe)\n\n df = df.assign(we_scored=we_scored)\n df = df.assign(goals=goals)\n df = df.assign(passes=passes)\n df = df.assign(turnovers=turnovers)\n df = df.assign(blocks=blocks)\n df = df.assign(possessions=possessions)\n df = df.assign(callahans=opp_callahans_thrown)\n\n df = df.assign(opp_goals=opp_goals)\n df = df.assign(opp_turns=opp_turns)\n df = df.assign(opp_poss=opp_possessions)\n df = df.assign(opp_callahans=callahans_thrown)\n\n return df",
"def loadDbIntoDf2(content):\n #Loading data into DF\n if content == 'trending':\n file = 'dataVideo.txt'\n elif content == 'music':\n file = 'dataVideoChallenge.txt'\n else:\n file = 'dataVideo.txt'\n with open(file,'r') as f:\n videos_dict = json.load(f)\n df = pd.DataFrame.from_dict(videos_dict)\n #filter on challenge\n if content == 'music':\n df = df[df.musicId == \"6745161928949106690\"]\n return df",
"def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df"
]
| [
"0.6053288",
"0.5691462",
"0.5527055",
"0.55186474",
"0.5355124",
"0.52818215",
"0.520463",
"0.5169522",
"0.51587737",
"0.5151688",
"0.51458937",
"0.5117981",
"0.5104984",
"0.5068347",
"0.50469536",
"0.50429755",
"0.5041572",
"0.5026257",
"0.5013502",
"0.49976748",
"0.49916938",
"0.49890506",
"0.4976237",
"0.49730957",
"0.49615806",
"0.49538824",
"0.4949963",
"0.49457526",
"0.49395636",
"0.49367157"
]
| 0.6845031 | 0 |
Updates all FixedAssets data (in FA_DATA directory) for user defined year and frequency | def update_all_fa(year, frequency):
failed_dict = {}
mb_remaining = 100
requests_remaining = 100
fa_table_ids = pybea.get_parameter_values(UserID, 'FixedAssets', ParameterName='TableName', ResultFormat='JSON')
tablenames = fa_table_ids['TableName'].values
for x in tablenames:
temp = pybea.get_data(UserID, 'FixedAssets', TableName=x, Frequency=frequency, Year=year)
# Compute how many megabytes each request is
# print('This request was ', sys.getsizeof(temp) / 1000000, 'megabytes')
size = sys.getsizeof(temp) / 1000000
mb_remaining -= size
requests_remaining -= 1
# print('You have ', mb_remaining, 'more megabytes before throttling and ', requests_remaining,
# 'request/s remaining before throttling.')
temp.to_csv('../FA_DATA/{0}.csv'.format(x))
time.sleep(1)
if mb_remaining < 5:
time.sleep(30)
mb_remaining = 100
if requests_remaining < 2:
time.sleep(45)
requests_remaining = 100
if pybea.JSON_ERROR:
failed_dict[x] = pybea.JSON_ERROR
time.sleep(.75)
return failed_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_fa(tablenames, frequency, year):\n failed_dict = {}\n mb_remaining = 100\n requests_remaining = 100\n\n for x in tablenames:\n print(x)\n temp = pybea.get_data(UserID, 'FixedAssets', TableName=x, Frequency=frequency, Year=year)\n size = sys.getsizeof(temp) / 1000000\n mb_remaining -= size\n requests_remaining -= 1\n print('You have ', mb_remaining, 'more megabytes before throttling and ', requests_remaining,\n 'request/s remaining before throttling.')\n temp.to_csv('../FA_DATA/{0}.csv'.format(x))\n time.sleep(1)\n if mb_remaining < 5:\n time.sleep(30)\n mb_remaining = 100\n if requests_remaining < 2:\n time.sleep(45)\n requests_remaining = 100\n if pybea.JSON_ERROR:\n failed_dict[x] = pybea.JSON_ERROR\n time.sleep(.75)\n\n return failed_dict",
"def estimate_year_data(self, years, frequency):\n data_year = self.price.index.year.unique()\n no_data_year = {pd.Period(year) for year in years} - {pd.Period(year) for year in data_year} # which years do we not have data for\n\n if len(no_data_year) > 0:\n for yr in no_data_year:\n source_year = pd.Period(max(data_year))\n\n source_data = self.price[self.price.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.energy_growth, source_year, yr, frequency)\n self.price = pd.concat([self.price, new_data], sort=True) # add to existing\n\n source_data = self.p_regu[self.p_regu.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.p_regu = pd.concat([self.p_regu, new_data], sort=True) # add to existing\n\n source_data = self.p_regd[self.p_regd.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.p_regd = pd.concat([self.p_regd, new_data], sort=True) # add to existing",
"def fixed_assets(self, table_name: str, year: List[str]) -> Dict:\n\n if year != 'ALL':\n year = ','.join(year)\n\n # Define the parameters.\n params = {\n 'userid': self.api_key,\n 'method': 'GetData',\n 'datasetname': 'FixedAssets',\n 'year': year,\n 'resultformat': self._format,\n 'tablename': table_name\n }\n\n # Make the request.\n response = self._make_request(\n method='get',\n params=params\n )\n\n return response",
"def load_all_data():\r\n\r\n data = dict()\r\n for year in ['2010', '2011', '2014', '2016']:\r\n\r\n data[year] = load_data(int(year))\r\n\r\n # Calculate the dune widths\r\n data[year]['Dune Width'] = data[year]['x_heel'] - data[year]['x_toe']\r\n data[year]['Fenced Dune Width'] = data[year]['x_fence_heel'] - data[year]['x_fence_toe']\r\n data[year]['Fenced Dune System Width'] = data[year]['x_heel'] - data[year]['x_fence_toe']\r\n\r\n # For now, remove all negative widths and volumes, something went wrong with them\r\n width_condition = data[year]['Fenced Dune Width'] <= 0\r\n volume_condition = data[year]['Fenced Dune Volume'] <= 0\r\n\r\n data[year]['y_fence_crest'][width_condition] = np.nan\r\n data[year]['Fenced Dune Width'][width_condition] = np.nan\r\n data[year]['Fenced Dune Volume'][width_condition] = np.nan\r\n\r\n data[year]['y_fence_crest'][volume_condition] = np.nan\r\n data[year]['Fenced Dune Width'][volume_condition] = np.nan\r\n data[year]['Fenced Dune Volume'][volume_condition] = np.nan\r\n\r\n data[year]['Fenced Dune System Width'][data[year]['Fenced Dune System Width'] <= 0] = np.nan\r\n\r\n # Remove instances where the fenced and natural dune crest are not positioned correctly\r\n crest_condition_1 = data[year]['x_fence_crest'] >= data[year]['x_crest']\r\n crest_condition_2 = data[year]['y_fence_crest'] >= data[year]['y_crest']\r\n\r\n data[year]['y_fence_crest'][crest_condition_1] = np.nan\r\n data[year]['Fenced Dune Width'][crest_condition_1] = np.nan\r\n data[year]['Fenced Dune Volume'][crest_condition_1] = np.nan\r\n\r\n data[year]['y_fence_crest'][crest_condition_2] = np.nan\r\n data[year]['Fenced Dune Width'][crest_condition_2] = np.nan\r\n data[year]['Fenced Dune Volume'][crest_condition_2] = np.nan\r\n\r\n data['Fences'] = load_fence_locations(y=0)\r\n\r\n return data",
"def update_freq_dist(filename):\r\n pass",
"def onefile(yr):\r\n\r\n global MF\r\n filename = \"{0}/yob{1:4d}.txt\".format(dirname, yr)\r\n f = open(filename, \"r\")\r\n for l in f:\r\n cols = l.strip().split(\",\")\r\n name = cols[0]\r\n gender = cols[1]\r\n cnt = float(cols[2])\r\n if not (name in MF[gender]):\r\n #print(MF)\r\n #print('this is after')\r\n MF[gender][name] = [0.0 for x in range(firstyr, lastyr)]\r\n #print(MF)\r\n #print(len(MF[gender][name]))\r\n MF[gender][name][yr-firstyr] = cnt\r\n #print(MF)\r\n f.close()",
"def __set_frequency_data(self, fdata):\n assert fdata.shape[-1] == self._nf\n self._in_freq = fdata\n self._in_time = None",
"def update_frequencies():\n pass",
"def performStats(dataArray):\n yearArray = [[0,0] for i in range(20)]\n for entry in dataArray:\n oSum = 0\n nSum = 0\n for k, v in entry.old.items():\n # print(k,v)\n oSum += v\n for k,v in entry.new.items():\n # print(k,v)\n nSum += v\n entry.oldSum = oSum\n entry.newSum = nSum\n idx = int(entry.year)%20 #0-19 index\n yearArray[idx][0] += entry.oldSum\n yearArray[idx][1] += entry.newSum\n return yearArray",
"def gbf_pub_update():\r\n LOG.info(\"Start: Update datasets in RLIDGeo warehouse.\")\r\n month_stamps = [\r\n datetime.date.today().strftime(\"%Y_%m\"),\r\n (\r\n datetime.date.today().replace(day=1)\r\n - datetime.timedelta(days=1)\r\n ).strftime(\"%Y_%m\"),\r\n ]\r\n for month_stamp in month_stamps:\r\n snapshot_db_path = SNAPSHOT_DB_PATH.format(month_stamp)\r\n if not os.path.exists(snapshot_db_path):\r\n LOG.warning(\"Snapshot database %s does not exist.\", snapshot_db_path)\r\n continue\r\n\r\n for _dataset in DATASETS:\r\n arcetl.features.update_from_dicts(\r\n dataset_path=_dataset.path(\"pub\"),\r\n update_features=source_rows(snapshot_db_path, _dataset.path(\"source\")),\r\n id_field_names=_dataset.id_field_names,\r\n field_names=_dataset.field_names,\r\n delete_missing_features=False,\r\n use_edit_session=False,\r\n )\r\n LOG.info(\"End: Update.\")",
"def update(self, ti=None, tf=None):\n if failedobspyimport:\n raise ImportError('ObsPy import failed, cannot update data.')\n\n makedir('_tmp')\n\n # default data range if not given \n ti = ti or datetime(self.tf.year,self.tf.month,self.tf.day,0,0,0)\n tf = tf or datetime.today() + _DAY\n \n ti = datetimeify(ti)\n tf = datetimeify(tf)\n\n ndays = (tf-ti).days\n\n # parallel data collection - creates temporary files in ./_tmp\n pars = [[i,ti] for i in range(ndays)]\n p = Pool(6)\n p.starmap(get_data_for_day, pars)\n p.close()\n p.join()\n\n # special case of no file to update - create new file\n if not self.exists:\n shutil.copyfile('_tmp/_tmp_fl_00000.dat',self.file)\n self.exists = True\n shutil.rmtree('_tmp')\n return\n\n # read temporary files in as dataframes for concatenation with existing data\n dfs = [self.df[datas]]\n for i in range(ndays):\n fl = '_tmp/_tmp_fl_{:05d}.dat'.format(i)\n if not os.path.isfile(fl): \n continue\n dfs.append(pd.read_csv(fl, index_col=0, parse_dates=[0,], infer_datetime_format=True))\n shutil.rmtree('_tmp')\n self.df = pd.concat(dfs)\n\n # impute missing data using linear interpolation and save file\n self.df = self.df.loc[~self.df.index.duplicated(keep='last')]\n self.df = self.df.resample('10T').interpolate('linear')\n\n # remove artefact in computing dsar\n for i in range(1,int(np.floor(self.df.shape[0]/(24*6)))): \n ind = i*24*6\n self.df['dsar'][ind] = 0.5*(self.df['dsar'][ind-1]+self.df['dsar'][ind+1])\n\n self.df.to_csv(self.file, index=True)\n self.ti = self.df.index[0]\n self.tf = self.df.index[-1]",
"def FS2Years(inputFolderPath = './FormattedFilesWithoutMissingToNextYear', outputFolderPath = './FormattedFilesWithoutMissingToNextYear'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4)+'N')]\n\t\t\t regex = re.compile(\"....N:.*\")\n\t\t\t nextYearIDs = [idx for idx, item in enumerate(header) if regex.search(item)]\n\t\t\t nextYearCount = len(nextYearIDs)\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\t# X = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\tX = dataset[:,nextYearCount:dataset.shape[1]]\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\n\t\t\tk = 40\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[nextYearCount+1:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\tprint 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('FeatureSelectionIndicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()",
"def loop(self):\n catalog_copy = self.catalog.copy() # Keep the original to apply the merge.\n self.catalog['AE'] = np.nan\n\n for year in self.unique_years:\n print(f'Merging AE data for year={year}')\n # Try to load the AE file.\n try:\n self.ae = self.load_ae(year)\n except AssertionError as err:\n if 'No AE files found.' in str(err):\n print(err)\n continue\n else:\n raise\n\n merged = pd.merge_asof(catalog_copy, self.ae, left_index=True, \n right_index=True, tolerance=pd.Timedelta(minutes=1),\n direction='nearest')\n self.catalog.update(merged)\n return",
"def update_all_fa_tag():\n failed_dict = {}\n mb_remaining = 100\n requests_remaining = 100\n\n fa_table_ids = pybea.get_parameter_values(UserID, 'FixedAssets', ParameterName='TableName', ResultFormat='JSON')\n tablenames = fa_table_ids['TableName'].values\n\n table_name_col = []\n series_code_col = []\n period_col = []\n data_val_col = []\n line_description_col = []\n\n for x in tablenames:\n temp = pybea.get_data(UserID, 'FixedAssets', TableName=x, Year='ALL')\n # Compute how many megabytes each request is\n size = sys.getsizeof(temp) / 1000000\n mb_remaining -= size\n requests_remaining -= 1\n\n table_name = temp['TableName']\n series_code = temp['SeriesCode']\n period = temp['TimePeriod']\n data_val = temp['DataValue']\n line_description = temp['LineDescription']\n\n table_name_col.extend(table_name)\n series_code_col.extend(series_code)\n period_col.extend(period)\n data_val_col.extend(data_val)\n line_description_col.extend(line_description)\n\n time.sleep(1)\n if mb_remaining < 5:\n time.sleep(55)\n mb_remaining = 100\n requests_remaining = 100\n if requests_remaining < 2:\n time.sleep(45)\n mb_remaining = 100\n requests_remaining = 100\n if pybea.JSON_ERROR:\n failed_dict[x] = pybea.JSON_ERROR\n time.sleep(1)\n\n aggregate_fa = pd.DataFrame()\n aggregate_fa['line_number'] = table_name_col\n aggregate_fa['line_name_short'] = line_description_col\n aggregate_fa['series_code'] = series_code_col\n aggregate_fa['year'] = period_col\n aggregate_fa['value'] = data_val_col\n\n aggregate_fa.to_csv('../FA_ALL/aggregate_fa.csv', index=False)\n aggregate_fa.to_csv('aggregate_fa.csv', index=False)\n\n\n return failed_dict",
"def readdata(self, fname):\n\t\tif not hasattr(self, 'sweepNumber') or not hasattr(self, 'channel'):\n\t\t\tself.sweepNumber=0\n\t\t\tself.channel=0\n\n\t\t# additional meta data\n\t\tself.fileFormat='abf'\n\n\t\tabf=pyabf.ABF(fname)\n\t\tabf.setSweep(sweepNumber=self.sweepNumber, channel=self.channel)\n\t\tscale=self._currentScale(abf)\n\n\t\t# If the Fs attribute doesn't exist set it\n\t\tif not hasattr(self, 'Fs'):\t\n\t\t\tself.Fs=abf.dataRate\n\t\t# else check if it s the same as before\n\t\telse:\n\t\t\tif self.Fs!=abf.dataRate:\n\t\t\t\traise metaTrajIO.SamplingRateChangedError(\"The sampling rate in the data file '{0}' has changed.\".format(f))\n\n\t\treturn abf.sweepY*scale",
"def update_fft(data):\n if data is None or data['rate'] is None:\n raise PreventUpdate\n x = np.fft.rfftfreq(len(data['val_list']), d=data['rate'])[10:]\n y = np.abs(np.fft.rfft(data['val_list']))[10:]\n return {'x': [x], 'y': [y]}, [0], len(y)",
"def Set_dict_DF(FD, LD):\r\n global ddf\r\n global df\r\n for d in range(FD.year-1, (LD.year + 2)):\r\n #if year isn't already in the key list of the dictionnary\r\n if(d not in ddf.keys()):\r\n #Generate a new instance of df in the dictionnary with a link year key\r\n ddf[d] = df",
"def update_grad_data():\n t_file = 'hcapgrd1_full_data_*.fits*'\n out_dir = deposit_dir + '/Grad_save/'\n tdir = out_dir + 'Gradcap/'\n#\n#--- read grad group name\n#\n gfile = house_keeping + 'grad_list'\n grad_list = mcf.read_data_file(gfile)\n\n [tstart, tstop, year] = ecf.find_data_collecting_period(tdir, t_file)\n\n get_data(tstart, tstop, year, grad_list, out_dir)",
"def growth_rate(filenames, time_model=arai_time_model):\n # file ID\n\n print(\"storing casename and Reynolds number\\n\\n\")\n casename, Re, We = file_id(filenames[1])\n\n print(\"\\nNow calculating FFTs\\n\\n\")\n # calculating ffts\n\n t, freqs, loc0_diameter_fft, loc0_centroid_fft = fft_output(filenames[0])\n t, freqs, loc1_diameter_fft, loc1_centroid_fft = fft_output(filenames[1])\n t, freqs, loc2_diameter_fft, loc2_centroid_fft = fft_output(filenames[2])\n t, freqs, loc3_diameter_fft, loc3_centroid_fft = fft_output(filenames[3])\n t, freqs, loc4_diameter_fft, loc4_centroid_fft = fft_output(filenames[4])\n t, freqs, loc5_diameter_fft, loc5_centroid_fft = fft_output(filenames[5])\n t, freqs, loc6_diameter_fft, loc6_centroid_fft = fft_output(filenames[6])\n t, freqs, loc7_diameter_fft, loc7_centroid_fft = fft_output(filenames[7])\n t, freqs, loc8_diameter_fft, loc8_centroid_fft = fft_output(filenames[8])\n t, freqs, loc9_diameter_fft, loc9_centroid_fft = fft_output(filenames[9])\n\n # real amplitudes from morozumi equation\n\n loc0_diameter_amp = np.sqrt((4/t)*loc0_diameter_fft)\n loc0_centroid_amp = np.sqrt((4/t)*loc0_centroid_fft)\n\n loc1_diameter_amp = np.sqrt((4/t)*loc1_diameter_fft)\n loc1_centroid_amp = np.sqrt((4/t)*loc1_centroid_fft)\n\n loc2_diameter_amp = np.sqrt((4/t)*loc2_diameter_fft)\n loc2_centroid_amp = np.sqrt((4/t)*loc2_centroid_fft)\n\n loc3_diameter_amp = np.sqrt((4/t)*loc3_diameter_fft)\n loc3_centroid_amp = np.sqrt((4/t)*loc3_centroid_fft)\n\n loc4_diameter_amp = np.sqrt((4/t)*loc4_diameter_fft)\n loc4_centroid_amp = np.sqrt((4/t)*loc4_centroid_fft)\n\n loc5_diameter_amp = np.sqrt((4/t)*loc5_diameter_fft)\n loc5_centroid_amp = np.sqrt((4/t)*loc5_centroid_fft)\n\n loc6_diameter_amp = np.sqrt((4/t)*loc6_diameter_fft)\n loc6_centroid_amp = np.sqrt((4/t)*loc6_centroid_fft)\n\n loc7_diameter_amp = np.sqrt((4/t)*loc7_diameter_fft)\n loc7_centroid_amp = np.sqrt((4/t)*loc7_centroid_fft)\n\n loc8_diameter_amp = np.sqrt((4/t)*loc8_diameter_fft)\n loc8_centroid_amp = np.sqrt((4/t)*loc8_centroid_fft)\n\n loc9_diameter_amp = np.sqrt((4/t)*loc9_diameter_fft)\n loc9_centroid_amp = np.sqrt((4/t)*loc9_centroid_fft)\n\n # setting up storage array for the z_locations\n z_locations = np.zeros(10)\n\n # using filenames to ID z locations\n for i in range(len(filenames)):\n # separate into the paramaters\n underscore_split = filenames[i].split('_')\n # identify the last parameter, split by the . and then take the first\n # value as this will be the z_location\n z_loc = underscore_split[-1].split('.')[0]\n z_locations[i] = int(z_loc)\n\n # calculating velocity\n u = velocity_calculator(int(Re))\n\n # converting z_locations into real distances\n zs_metres = 0.02*z_locations/1000\n\n # time model can be changed as needed\n z_times = time_model(u, zs_metres, float(We))\n\n # initialising storage arrays for growth rates\n diameter_growth_rates = np.zeros((len(loc0_diameter_amp)))\n diameter_a0 = np.zeros((len(loc0_diameter_amp)))\n diameter_errs = np.zeros((len(loc0_diameter_amp)))\n\n centroid_growth_rates = np.zeros((len(loc0_centroid_amp)))\n centroid_a0 = np.zeros((len(loc0_centroid_amp)))\n centroid_errs = np.zeros((len(loc0_centroid_amp)))\n\n # performing loop to work out growth rates of diameter from curve fitting\n # various z locations (z times)\n\n print(\"\\n\\nNow calculating the diameter growth rates:\\n\\n\")\n # i is an indexer for the length of the array, equal to the frame number\n for i in range(len(loc0_diameter_amp)):\n # progress calculator\n if (i % 1000) == 0:\n print(\"Progress: {:.1f}%\".format(i*100/len(loc0_diameter_amp)))\n # assign a local array which takes the diameter amp at the current\n # index across the 10 z locations\n local_amps = np.array((loc0_diameter_amp[i], loc1_diameter_amp[i],\n loc2_diameter_amp[i], loc3_diameter_amp[i],\n loc4_diameter_amp[i], loc5_diameter_amp[i],\n loc6_diameter_amp[i], loc7_diameter_amp[i],\n loc8_diameter_amp[i], loc9_diameter_amp[i]))\n # work out the local a_0, growth rate, and error in curve fit\n # using the curve fit function defined earlier\n loc_a_0, loc_omega, loc_err = param_extractor(z_times, local_amps)\n # assign local variables to global array\n diameter_a0[i] = loc_a_0\n diameter_growth_rates[i] = loc_omega\n diameter_errs[i] = loc_err\n\n print('diameter growth rate calculation complete')\n\n print(\"\\n\\nNow calculating the centroid growth rates:\\n\\n\")\n for i in range(len(loc0_centroid_amp)):\n # progress calculator\n if (i % 1000) == 0:\n print(\"Progress: {:.1f}%\".format(i*100/len(loc0_centroid_amp)))\n # assign a local array which takes the centroid amp at the current\n # index across the 10 z locations\n local_amps = np.array((loc0_centroid_amp[i], loc1_centroid_amp[i],\n loc2_centroid_amp[i], loc3_centroid_amp[i],\n loc4_centroid_amp[i], loc5_centroid_amp[i],\n loc6_centroid_amp[i], loc7_centroid_amp[i],\n loc8_centroid_amp[i], loc9_centroid_amp[i]))\n # work out the local a_0, growth rate, and error in curve fit\n # using the curve fit function defined earlier\n loc_a_0, loc_omega, loc_err = param_extractor(z_times, local_amps)\n # assign local variables to global array\n centroid_a0[i] = loc_a_0\n centroid_growth_rates[i] = loc_omega\n centroid_errs[i] = loc_err\n\n # create filename by taking the first portion of the input filename\n output_filename = casename[0:-12] + '_fft.csv'\n\n # stack the arrays together so they can be saved as a single file along\n # the first axis\n output_arr = np.stack((freqs, diameter_a0, diameter_growth_rates,\n diameter_errs, centroid_a0, centroid_growth_rates,\n centroid_errs), axis=1)\n\n # save the array with a header that is for user experience, this is\n # ignored by numpy.loadtxt\n np.savetxt(output_filename, output_arr,\n fmt='%f', delimiter=',',\n header='freqs, diameter_a0, diameter_growth_rates,\\\n diameter_errs, centroid_a0, centroid_growth_rates,\\\n centroid_errs')\n\n # POST PROCESSING TESTING, NOT FOR DEPLOYMENT\n\n fig, ax = plt.subplots()\n ax.plot(freqs, diameter_growth_rates, '.', color='yellow')\n ax.set_xlim(0, 1000)\n ax.set_ylim(0, 150)\n ax.set_title(\"Growth rates vs frequencies\")\n ax.set_xlabel(\"Frequencies\")\n ax.set_ylabel(\"Growth rates\")\n\n print(\"minimum error is:\", diameter_errs.min())\n\n minimum_location = diameter_errs.argmin()\n print(minimum_location)\n print(\"minimum error frequency:\", freqs[minimum_location])\n\n # 1253 is the location of 290.04 Hz\n amps_reg = np.array([loc0_diameter_amp[minimum_location],\n loc1_diameter_amp[minimum_location],\n loc3_diameter_amp[minimum_location],\n loc2_diameter_amp[minimum_location],\n loc4_diameter_amp[minimum_location],\n loc5_diameter_amp[minimum_location],\n loc6_diameter_amp[minimum_location],\n loc7_diameter_amp[minimum_location],\n loc8_diameter_amp[minimum_location],\n loc9_diameter_amp[minimum_location]])\n\n amps = amps_reg/diameter_a0[minimum_location]\n\n fig1, ax1 = plt.subplots()\n ax1.plot(z_times, amps, 'o', label='Experimental amplitudes')\n\n modelling_ts = np.linspace(0, 0.02, 1000)\n modelamps_r = (model_growth_rate(modelling_ts,\n diameter_a0[minimum_location],\n diameter_growth_rates[minimum_location]))\n model_amps = modelamps_r/diameter_a0[minimum_location]\n\n ax1.plot(modelling_ts, model_amps,\n label='Curve fit ($\\\\zeta = \\\\zeta_0e^{\\\\omega t}$)')\n ax1.set_xlabel(\"Modelled time (seconds)\", fontsize=12)\n ax1.set_ylabel('$\\\\frac{\\\\zeta}{\\\\zeta_0}$', fontsize=16)\n ax1.set_xlim(0, 0.0125)\n ax1.set_ylim(1, 3)\n ax1.grid()\n ax1.legend()\n ax1.tick_params(axis='both', labelsize=8)\n fig1.set_size_inches(5.5, 4)\n fig1.savefig(fname='curve_fit_example.pgf', bbox_inches='tight')\n\n fig2, ax2 = plt.subplots()\n ax2.plot(freqs, diameter_errs, '.')\n ax2.set_xlim(0, 1000)\n ax2.set_title('Errors')\n ax2.set_xlabel(\"Frequencies\")\n ax2.set_ylabel(\"Standard deviation of curve fit\")\n\n print(freqs[600])\n\n w = savgol_filter(diameter_growth_rates, 1001, 2)\n fig5, ax5 = plt.subplots()\n ax5.plot(freqs, w)\n ax5.set_title('Savitzky-Golay filter')\n ax5.set_xlim(0, 5000)\n ax5.set_xlabel('Frequencies')\n ax5.set_ylabel('Growth rate')\n\n ax.plot(freqs, w, label='Savitzky-Golay', color='red')\n ax.legend()\n\n zero_crossings_w = np.where(np.diff(np.signbit(w)))[0]\n\n print(\"Zeros savgol\", freqs[zero_crossings_w])\n\n Ks = []\n delx = 1/27000\n for i in range(len(loc0_diameter_amp)):\n k = i*(2*np.pi)/(delx*116495)\n Ks.append(k*1e-3)",
"def load_all_archived_data(years=default.arch_data_years):\n\tdataframes = []\n\tdrive_arr = []\n\tplay_arr = []\n\tfor year in years:\n\t\tdir_ = os.path.join('data', 'archived_data', str(year))\n\t\tfname = os.path.join(dir_, 'team-game-statistics.csv')\n\t\ttmp_df = pd.read_csv(fname)\n\t\t# Append season\n\t\tseason = [year for _ in range(tmp_df.shape[0])]\n\t\ttmp_df['Season'] = pd.Series(season, index=tmp_df.index)\n\t\tdataframes.append(tmp_df)\n\t\t# Read in plays and drives\n\t\tdrive_arr.append(pd.read_csv(os.path.join(dir_, 'drive.csv')))\n\t\tplay_arr.append(pd.read_csv(os.path.join(dir_, 'play.csv')))\n\tall_data = pd.concat(dataframes)\n\tdrives = pd.concat(drive_arr)\n\tplays = pd.concat(play_arr)\n\t# Add dates\n\tdates_raw = [d%1e8 for d in all_data['Game Code']]\n\tdates = [datetime.datetime(year=int(d/1e4), month=int((d/1e2)%1e2), day=int(d%1e2))\n\t\tfor d in dates_raw]\n\tall_data['DateUtc'] = pd.Series(dates, index=all_data.index)\n\t# Add total 1st downs\n\ttot_first_down = (all_data['1st Down Pass'] + \n\t\tall_data['1st Down Rush'] + all_data['1st Down Penalty'])\n\tall_data['1st Downs'] = tot_first_down\n\t# Add conversion pct\n\tthird_down_conv = all_data['Third Down Conv'] / all_data['Third Down Att']\n\tall_data['3rd Down Conv'] = third_down_conv.replace(np.nan, 0.)\n\tfourth_down_conv = all_data['Fourth Down Conv'] / all_data['Fourth Down Att']\n\tall_data['4th Down Conv'] = fourth_down_conv.replace(np.nan, 0.)\n\t# Add special teams / defensive TDs\n\tall_data['DEF TDs'] = all_data['Fum Ret TD'] + all_data['Int Ret TD']\n\tall_data['Special Teams TDs'] = all_data['Kickoff Ret TD'] + all_data['Punt Ret TD']\n\t# Total yards\n\tall_data['Total Yards'] = all_data['Pass Yard'] + all_data['Rush Yard']\n\t# Total drives and plays\n\tnDrives = []\n\tnPlays = []\n\tfor row, game in all_data.iterrows():\n\t\t# Get matching games then matching drives\n\t\tdr_games = drives[drives['Game Code'] == game['Game Code']]\n\t\tpl_games = plays[plays['Game Code'] == game['Game Code']]\n\t\tdr_match = dr_games[dr_games['Team Code'] == game['Team Code']]\n\t\tpl_match = pl_games[pl_games['Offense Team Code'] == game['Team Code']]\n\t\tnDrives.append(dr_match.shape[0])\n\t\tnPlays.append(pl_match.shape[0])\n\tall_data['Total Drives'] = pd.Series(nDrives, index=all_data.index)\n\tall_data['Total Plays'] = pd.Series(nPlays, index=all_data.index)\n\t# Yards per\n\tall_data['Yards Per Pass'] = (all_data['Pass Yard'] / all_data['Pass Att']).replace(np.nan, 0.)\n\tall_data['Yards Per Play'] = (all_data['Total Yards'] / all_data['Total Plays']).replace(np.nan, 0.)\n\tall_data['Yards per Rush'] = (all_data['Rush Yard'] / all_data['Rush Att']).replace(np.nan, 0.)\n\t# Is home\n\thome_codes = (all_data['Game Code'].values / 1e12).astype(int)\n\tall_data['is_home'] = np.array(all_data['Team Code'] == home_codes).astype(int)\n\t# Total turnovers\n\tall_data['Turnovers'] = all_data['Pass Int'] + all_data['Fumble Lost']\n\t# Other (calc later)\n\tall_data['conferenceId'] = 0\n\tfor field in default.this_elo_fields:\n\t\tall_data[field[5:]] = 0\n\t# Rename fields and ids to match new data\n\tall_data = rename_fields(all_data)\n\tall_data = map_team_conf_fields(all_data)\n\tall_data = combine_games(all_data)\n\tall_data = remove_unknown_teams(all_data)\n\treturn all_data",
"def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure",
"def readdata(self, prefix, date, county):\n print('F30A: prefix and date', prefix, date)\n\n filetoread = open(prefix + 'EL30A')\n\n lines = []\n for line in filetoread:\n lines.append(line)\n\n linelimit = len(lines)\n# print('LINELIMIT', linelimit)\n linesub = 0\n while linesub < linelimit:\n line = lines[linesub]\n\n if len(line) < 4:\n linesub += 1\n continue\n if len(line.split()) < 2:\n linesub += 1\n continue\n possiblepctnumber = line[0:4]\n if (possiblepctnumber >= '0000') and (possiblepctnumber <= '9999'):\n self._pctnumber = possiblepctnumber\n self._pctname = line[4:].strip()\n if self._pctnumber not in globalpcts:\n print('NEW PCT %s %s' % (self._pctnumber, self._pctname))\n onepct = OnePct(self._pctnumber, self._pctname, '30A')\n else:\n onepct = globalpcts[self._pctnumber]\n\n # here's the new 12 June 2018 kluge to get the numbers\n if 'BALLOTS CAST - TOTAL' in line:\n line = line.replace('.', ' ')\n line = line.replace('BALLOTS CAST - TOTAL', ' ')\n line = line.replace(',', '') # commas in numbers\n\n# linesplit = line.split()\n self._ballots = []\n for item in line.split():\n self._ballots.append(int(item))\n if (self._pctnumber >= '0000') and (self._pctnumber <= '9999'):\n onepct.updatefrom30a(self._registered, self._ballots, county)\n globalpcts[self._pctnumber] = onepct\n linesub += 1",
"def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()",
"def save_fy(app, fiscal_year, casc_model):\n # casc_model = app.casc.query.filter_by(id=casc_model).first()\n fy = app.db.session.query(app.FiscalYear).filter(\n app.FiscalYear.sb_id == fiscal_year.ID).first()\n # fy = app.FiscalYear.query.filter_by(sb_id=fiscal_year.ID).first()\n if fy is None: # The Fiscal Year was not found in the db\n print(\"---------SQL--------- [FiscalYear] Could not find \" +\n \"{} in database...\".format(fiscal_year.name.encode('utf-8')))\n fy = app.FiscalYear(sb_id=fiscal_year.ID,\n url=fiscal_year.URL,\n name=fiscal_year.name,\n total_data=fiscal_year.total_fy_data,\n # Backrefs (need db model):\n casc_id=casc_model.id)\n app.db.session.add(fy)\n else:\n print(\"---------SQL--------- [FiscalYear] Found {} in database...\"\n .format(fiscal_year.name.encode('utf-8')))\n if fy.sb_id != fiscal_year.ID:\n fy.sb_id = fiscal_year.ID\n if fy.name != fiscal_year.name:\n fy.name = fiscal_year.name\n if fy.url != fiscal_year.URL:\n fy.url = fiscal_year.URL\n if fy.total_data != fiscal_year.total_fy_data:\n fy.total_data = fiscal_year.total_fy_data\n # Backrefs (need db model):\n if fy.casc_id != casc_model.id:\n fy.casc_id = casc_model.id\n\n # Add new timestamp\n fy.timestamp = datetime.utcnow()\n\n app.db.session.commit()\n print(\"---------SQL--------- [FiscalYear] Done with {}.\"\n .format(fiscal_year.name.encode('utf-8')))\n return fy",
"def FS1Year(inputFolderPath = './Formatted Files Without Missing', outputFolderPath = './Feature Selection'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t# print files\n\t\t\t# call([\"java\",\"-jar\",\"MINE.jar\",\"./New Formatted Files/\"+files[0],str(targetList[i]+1),\"cv=0.5\"])\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\t# dataset = np.loadtxt('./New Formatted Files/'+files[0], delimiter=\",\", skiprows=1, usecols=tuple(range(1,3240)))\n\t\t\t# dataset = np.genfromtxt('./New Formatted Files/'+files[0], delimiter=\",\", names=True, autostrip=True, max_rows=10, missing_values=np.nan, usecols=tuple(range(1,30)))\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4))]\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\tX = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t# print tuple(range(1,3240))\n\t\t\t# print dataset.dtype.names[0]\n\t\t\t# print dataset.dtype.names[-1]\n\t\t\t# print dataset[0]\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\t\t\t# print dataset[0]\n\t\t\t# print (imputedX.shape, y.shape)\n\t\t\t# print (imputedX.shape, deleteMissingY.shape)\n\t\t\t# print (np.any(np.isnan(imputedX)), np.all(np.isfinite(imputedX)))\n\t\t\t# imputedX_new = SelectKBest(chi2, k=10).fit_transform(imputedX, y)\n\t\t\tk = 30\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t# print (len(selection.get_support()), len(header[1:target+1]+header[target+2:]))\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[1:target+1]+header[target+2:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t# for sf in selectedFeatures:\n\t\t\t# \tprint sf\n\t\t\t# print selection.scores_\n\t\t\t# print selection.get_support()\n\t\t\t# print (imputedX_new.shape, y.shape)\n\t\t\t# print (imputedX_new.shape, deleteMissingY.shape)\n\t\t\t# print imputedX[0,1994]\n\t\t\t# print dataset['3137_Estimates_and_projections_of_the_total_population_by_sex_age_and_rural__urban_areasSexTotal_10year_age_bands__2534_Geographical_coverage__National_Thousands_Persons__ILO']\n\t\t\t# print dataset\n\t\t\t# separate the data from the target attributes\n\t\t\t# X = np.concatenate((imputedDataset[:,0:7],imputedDataset[:,0:7]),axis=1)\n\t\t\t# y = imputedDataset[:,8]\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\t# print 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('Indicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()",
"def Field_data(fdpath, den=\"sDens2017Ls\"):\n\t# ========== Load in the relevant data ==========\n\tfsum = pd.read_csv(\"./data/field/RF_catsum.csv\")\n\tfsum.sort_values(by=[\"sn\"],inplace=True) \n\tfcut = fsum[fsum.sn<64]\n\t# fd18 = pd.read_csv(fdpath)\n\tfd17 = pd.read_csv(\"./data/field/2017data/siteDescriptions.csv\")\n\n\t# ========== Create and Ordered Dict for important info ==========\n\tinfo = OrderedDict()\n\tinfo[\"sn\"] = fd17[\"site number\"]\n\tinfo[\"lat\"] = fd17.strtY\n\tinfo[\"lon\"] = fd17.strtX\n\t\n\t# ========== function to return nan when a value is missing ==========\n\tdef _missingvalfix(val):\n\t\ttry:\n\t\t\treturn float(val)\n\t\texcept Exception as e:\n\t\t\treturn np.NAN\n\n\tdef _fireyear(val):\n\t\ttry:\n\t\t\tyear = float(val)\n\t\t\tif (year <= 2018):\n\t\t\t\treturn year\n\t\t\telse:\n\t\t\t\treturn np.NAN\n\t\texcept ValueError: #not a simple values\n\t\t\ttry:\n\t\t\t\treturn float(str(val[0]).split(\" and \")[-1])\n\t\t\texcept Exception as e:\n\t\t\t\tipdb.set_trace()\n\t\t\t\tprint(e)\n\t\t\t\treturn np.NAN\n\n\tinfo[den] = [_missingvalfix(\n\t\tfcut[fcut.sn == sn][den].values) for sn in info['sn']]\n\n\tinfo[\"RF17\"] = [_missingvalfix(\n\t\tfcut[fcut.sn == sn][\"RF2017\"].values) for sn in info['sn']]\n\t\n\t\t\n\tinfo[\"fireyear\"] = [_fireyear(\n\t\tfd17[fd17[\"site number\"] == sn][\"estimated fire year\"].values) for sn in info['sn']]\n\t# ========== Convert to dataframe and replace codes ==========\n\tRFinfo = pd.DataFrame(info)\n\tRFinfo[\"RF17\"].replace(0.0, \"AR\", inplace=True)\n\tRFinfo[\"RF17\"].replace(1.0, \"RF\", inplace=True)\n\tRFinfo[\"RF17\"].replace(2.0, \"IR\", inplace=True)\n\tRFinfo[\"YearsPostFire\"] = 2017.0 - RFinfo.fireyear\n\treturn RFinfo",
"def getCurrentData(self, entries):\n self.ListOfZipFiles.clear()\n #gets actual year\n year = date.today().year\n firstTimeFail = True\n #cycles until all years have been filtered\n while(1):\n firstTime = True\n zipFile = \"\"\n for entry in entries:\n index = 0\n if re.search(\"{}\".format(year), entry):\n if re.search(\".*rok.*\", entry):\n zipFile = entry\n break\n else:\n for char in entry:\n if char.isdigit():\n break\n index+=1\n if not entry[index:index+3].isdigit():\n if firstTime:\n zipFile = entry\n firstTime = False\n else:\n if zipFile[index:index+2] < entry[index:index+2]:\n zipFile = entry\n else:\n zipFile = entry\n break\n #if the new year don't have any data, tries to catch another one\n if zipFile==\"\":\n if firstTimeFail:\n firstTimeFail = False\n else:\n break\n else:\n #saves zip file name into list of filtered files\n self.ListOfZipFiles.append(zipFile)\n year-=1",
"def ANdatefixer(years):\n\n\n\t# ========== create the new dates ==========\n\t# year = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , 6, 30) for year in years]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates",
"def filename_to_year_set(filename, freq):\n year = year_from_filename(filename)\n if year % freq == 0:\n return int(year / freq)\n else:\n return int(year / freq) + 1",
"def update_cfd_portfolios():\n print(\"Processing CFD portfolios\")\n portfolio_mapping = {\"40675\": \"PB_CFD_SLMSEC_CR\"}\n portfolios = [acm.FPhysicalPortfolio[pname]\n for pname in portfolio_mapping]\n for portfolio in portfolios:\n target_portfolio_name = portfolio_mapping[portfolio.Name()]\n target_portfolio = acm.FPhysicalPortfolio[target_portfolio_name]\n save_additional_info(portfolio,\n \"PS_MirrorCRBook\",\n target_portfolio)"
]
| [
"0.6209893",
"0.6189374",
"0.60861397",
"0.5450324",
"0.54325956",
"0.5320851",
"0.5301557",
"0.5278848",
"0.52665955",
"0.5241639",
"0.5227907",
"0.5178437",
"0.51680624",
"0.5150038",
"0.50933963",
"0.5090828",
"0.50715846",
"0.5067478",
"0.50668526",
"0.503126",
"0.50204515",
"0.5009886",
"0.4999938",
"0.49992087",
"0.49732772",
"0.4963939",
"0.4958189",
"0.4957374",
"0.4935724",
"0.49353534"
]
| 0.6789207 | 0 |
Calculates the dependecy matrix, which is | def calc_dependency_matrix(encoder: nn.Module, latents: torch.Tensor) -> torch.Tensor:
# calculate the jacobian
jacob = jacobian(encoder.forward, latents)
# take the absolute value
return jacob.abs() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decoherence(self,system):\n for i in range(self.n):\n for j in range(i,self.n):\n for item in self.decoherence_matrix[i][j]:\n tmp=Expolist([Expo(item[2],0)])\n t = int(self.index(item[0],item[1]))\n system[int(self.index(i,j))][t]+=tmp\n return system",
"def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)",
"def get_element_density(mt):\r\n fraction_matrix = zeros(100)\r\n \r\n composition = Composition(mt['pretty_formula'])\r\n \r\n for element in composition:\r\n fraction = composition.get_atomic_fraction(element) # get the atomic fraction.\r\n fraction_matrix[element.Z] = fraction\r\n \r\n return fraction_matrix",
"def _core_calc_design(self,prof) :\n\t\tlp_list,ld_list = [],[]\n\t\tcp_list,cd_list = [],[]\n\t\t\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\t\n\t\t\tLp = np.ones(prof.n_sample)\n\t\t\tLd = np.ones(prof.n_sample)\n\t\t\n\t\t\t# Get regressor values\n\t\t\tX_p = [np.log(prof.var[:,reg-1]) for reg in reg_p ]\n\t\t\tX_d = [np.log(prof.var[:,reg-1]) for reg in reg_d ]\n\t\t\t\n\t\t\tLp = np.vstack((Lp,np.array(X_p))).T\n\t\t\tLd = np.vstack((Ld,np.array(X_d))).T\t\t\t\n\n\t\t\t# Calculate Cp\n\t\t\tCp = np.dot(LA.inv(np.dot(Lp.T,Lp)),Lp.T)\n\t\t\tCd = np.dot(LA.inv(np.dot(Ld.T,Ld)),Ld.T)\n\t\t\t# Append Lp,Ld,Cp and Cd to relevant lists\n\t\t\tlp_list.append(Lp)\n\t\t\tld_list.append(Ld)\n\t\t\tcp_list.append(Cp)\n\t\t\tcd_list.append(Cd)\t\t\t\n\t\treturn (lp_list,ld_list,cp_list,cd_list)",
"def _designMatrix_(self, p, tracker, is_X=False):\n temp1 = np.zeros([1, np.sum(tracker)])\n for pv in range(p):\n temp2 = self.data['days'][tracker].reshape(1, np.sum(tracker))**pv\n temp1 = np.vstack([temp1, temp2])\n if is_X: # if it is the design matrix for X, removes intercept\n temp1 = temp1[2:, ]\n else:\n temp1 = temp1[1:, ]\n return temp1.T",
"def assemble_Poisson_6th_order_FD_solver_matrices(Nx, BC):\n\n Poisson_6th_order_FD_solver_matrices = {}\n\n # Nx is the number of active nodes in configuration\n if BC['phi']['x']['type'] == 'PBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros([Nx,Nx])\n for i in range(Nx):\n if i == 0: # first row\n D[i,i] = -2\n D[i,i+1] = 1\n D[i,-1] = 1\n\n elif i == Nx - 1: # last row\n D[i,i] = -2\n D[i,i-1] = 1\n D[i,0] = 1\n else: # interior rows\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros([Nx, Nx])\n for i in range(Nx):\n if i == 0: # first row\n B[i,-2] = -1/240.\n B[i,-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == 1: # second row\n B[i,-1] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif 1 < i < (Nx - 2): # 2 <= row <= third before last\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == (Nx - 2): # second before last row\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,0] = -1/240.\n\n elif i == (Nx - 1): # last row\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,0] = 1/10.\n B[i,1] = -1/240.\n\n\n elif BC['phi']['x']['type'] == 'LDBC_UDBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros([Nx,Nx])\n for i in range(Nx):\n if i == 0 or i == Nx - 1: # last row\n D[i,i] = 1\n else: # interior rows\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros([Nx, Nx])\n for i in range(Nx):\n\n # redundant, included for transparency\n if i == 0 or i == Nx - 1:\n B[i,i] = 0\n\n elif i == 1:\n B[i,i-1] = 3/40.\n B[i,i] = 209/240.\n B[i,i+1] = 1/60.\n B[i,i+2] = 7/120.\n B[i,i+3] = -1/40.\n B[i,i+4] = 1/240.\n\n elif i == Nx-1:\n B[i,i] = 0\n\n elif 1 < i < Nx-2:\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n B[i,i-4] = 1/240.\n B[i,i-3] = -1/40.\n B[i,i-2] = 7/120.\n B[i,i-1] = 1/60.\n B[i,i] = 209/240.\n B[i,i+1] = 3/40.\n\n elif BC['phi']['x']['type'] == 'LNBC_UDBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LNBC row\n D[0,0] = -97/10.\n D[0,1] = 16.\n D[0,2] = -10\n D[0,3] = 5.\n D[0,4] = -3/2.\n D[0,5] = 1/5.\n\n # UDBC row\n D[-1,-1] = 1.\n\n # Poisson's equation rows\n for i in range(1,Nx-1):\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n if i == 0:\n B[i,i] = 317 / 240.\n B[i,i+1] = -133/120.\n B[i,i+2] = 187 / 120.\n B[i,i+3] = -23 / 20.\n B[i,i+4] = 109 / 240.\n B[i,i+5] = -3/40.\n\n elif i == 1:\n\n B[i, i-1] = 3 / 40.\n B[i, i] = 209 / 240.\n B[i,i+1] = 1 / 60.\n B[i,i+2] = 7 / 120.\n B[i,i+3] = -1 / 40.\n B[i,i+4] = 1 / 240.\n\n elif 2 <= i <= Nx-3:\n\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n\n B[i,i+1] = 3 / 40.\n B[i,i] = 209 / 240.\n B[i,i-1] = 1 / 60.\n B[i,i-2] = 7 / 120.\n B[i,i-3] = -1 / 40.\n B[i,i-4] = 1 / 240.\n\n # else i == Nx-1: row of zeros\n\n elif BC['phi']['x']['type'] == 'LDBC_UNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # UDBC row\n D[0,0] = 1.\n\n # LNBC row\n D[-1,-1] = -97/10.\n D[-1,-2] = 16.\n D[-1,-3] = -10\n D[-1,-4] = 5.\n D[-1,-5] = -3/2.\n D[-1,-6] = 1/5.\n\n # Poisson's equation rows\n for i in range(1,Nx-1):\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n # i == 0 row contains all zeros\n\n if i == 1:\n\n B[i, i-1] = 3 / 40.\n B[i, i] = 209 / 240.\n B[i,i+1] = 1 / 60.\n B[i,i+2] = 7 / 120.\n B[i,i+3] = -1 / 40.\n B[i,i+4] = 1 / 240.\n\n elif 2 <= i <= Nx-3:\n\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n\n B[i,i+1] = 3 / 40.\n B[i,i] = 209 / 240.\n B[i,i-1] = 1 / 60.\n B[i,i-2] = 7 / 120.\n B[i,i-3] = -1 / 40.\n B[i,i-4] = 1 / 240.\n\n if i == Nx-1:\n B[i,i-5] = -3/40.\n B[i,i-4] = 109 / 240.\n B[i,i-3] = -23 / 20.\n B[i,i-2] = 187 / 120.\n B[i,i-1] = -133/120.\n B[i,i] = 317 / 240.\n\n elif BC['phi']['x']['type'] == 'LDBC_LNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LDBC row, (row 0)\n D[0,0] = 1.\n\n # LNBC row, (row 1)\n D[1,0] = -97/10.\n D[1,1] = 16.\n D[1,2] = -10\n D[1,3] = 5.\n D[1,4] = -3/2.\n D[1,5] = 1/5.\n\n # Poisson's equation rows\n for i in range(2,Nx):\n D[i,i-2] = 1\n D[i,i-1] = -2\n D[i,i] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(1,B.shape[0]):\n # if i == 0: row of zeros, density is not involved (corresponds to DBC)\n\n if i == 1:\n B[i,i-1] = 317 / 240.\n B[i,i] = -133/120.\n B[i,i+1] = 187 / 120.\n B[i,i+2] = -23 / 20.\n B[i,i+3] = 109 / 240.\n B[i,i+4] = -3/40.\n\n if i == 2:\n B[i, i-2] = 3 / 40.\n B[i, i-1] = 209 / 240.\n B[i,i] = 1 / 60.\n B[i,i+1] = 7 / 120.\n B[i,i+2] = -1 / 40.\n B[i,i+3] = 1 / 240.\n\n elif 3 <= i <= Nx-2:\n B[i,i-3] = -1/240.\n B[i,i-2] = 1/10.\n B[i,i-1] = 97/120.\n B[i,i] = 1/10.\n B[i,i+1] = -1/240.\n\n elif i == Nx-1:\n B[i,i-5] = 1/240.\n B[i,i-4] = -1/40.\n B[i,i-3] = 7/120.\n B[i,i-2] = 1/60.\n B[i,i-1] = 209/240.\n B[i,i] = 3/40.\n\n elif BC['phi']['x']['type'] == 'UDBC_UNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LDBC row, (row Nx-1)\n D[-1,-1] = 1.\n\n # LNBC row, (row Nx-2)\n D[-2,-1] = -97/10.\n D[-2,-2] = 16.\n D[-2,-3] = -10\n D[-2,-4] = 5.\n D[-2,-5] = -3/2.\n D[-2,-6] = 1/5.\n\n # Poisson's equation rows\n for i in range(Nx-2):\n D[i,i] = 1\n D[i,i+1] = -2\n D[i,i+2] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n if i == 0:\n B[i,i] = 3/40.\n B[i,i+1] = 209/240.\n B[i,i+2] = 1/60.\n B[i,i+3] = 7/120.\n B[i,i+4] = -1/40.\n B[i,i+5] = 1/240.\n\n if 1 <= i < Nx-3:\n B[i,i-1] = -1/240.\n B[i,i] = 1/10.\n B[i,i+1] = 97/120.\n B[i,i+2] = 1/10.\n B[i,i+3] = -1/240.\n\n elif i == Nx-3:\n B[i,i-3] = 1/240.\n B[i,i-2] = -1/40.\n B[i,i-1] = 7/120.\n B[i,i] = 1/60.\n B[i,i+1] = 209/240.\n B[i,i+2] = 3/40.\n\n elif i == Nx-2:\n B[i,i+1] = 317 / 240.\n B[i,i] = -133/120.\n B[i,i-1] = 187 / 120.\n B[i,i-2] = -23 / 20.\n B[i,i-3] = 109 / 240.\n B[i,i-4] = -3/40.\n\n # else i == Nx - 1: row of zeros, density is not involved (corresponds to DBC)\n\n Poisson_6th_order_FD_solver_matrices['D'] = D\n Poisson_6th_order_FD_solver_matrices['B'] = B\n\n return Poisson_6th_order_FD_solver_matrices",
"def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)",
"def y34(self, nx, ny, x_des):\n\n [c_d, a1, output] = [self.component_dependency['y_34'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[11 * ny + 2 * nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.pro_int.y34_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output",
"def design_matrix(x, basis, degree=0):\n # TO DO:: Compute desing matrix for each of the basis functions\n if basis == 'polynomial':\n result=None\n for i in range(1,degree+1):\n newMatrix=np.power(x,i)\n if result is None:\n result=newMatrix\n else:\n result=np.hstack((result,newMatrix))\n #initialize a column of ones to concat to final result\n res_rows=result.shape[0]\n ones_col=np.ones((res_rows,1))\n phi=np.hstack((ones_col,result))\n #phi=result[...,2:]\n elif basis == 'ReLU':\n result=None\n newMatrix=np.negative(x)\n newMatrix=np.add(newMatrix,5000)\n\n reLUtrix=np.maximum(newMatrix,0,newMatrix)\n if result is None:\n result=reLUtrix\n else:\n result=np.hstack((result,reLUtrix))\n res_rows=result.shape[0]\n ones_col=np.ones((res_rows,1))\n phi = np.hstack((ones_col,result))\n # Debug statement feel free to comment out\n #print(\"Value of phi\",phi)\n else:\n assert(False), 'Unknown basis %s' % basis\n\n return phi",
"def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad",
"def y3(self, nx, ny, x_des):\n\n [c_d, a1, output] = [self.component_dependency['y_3'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[8 * ny + 2 * nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.pro_int.y3_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n return output",
"def get_linear_discrete_matrices(self):\n\n vf_op, vb_op = get_operating_point_inputs(self.operating_point[1], self.model_type)\n if self.model_type == ModelType.EASY:\n At, Bt, Vf_op, Vd_op = getLinearizedMatrices(self.model_type, self.operating_point[0:6], vf_op, vb_op)\n elif self.model_type == ModelType.GYROMOMENT:\n At, Bt = get_gyro_matrices(self.operating_point, self.operating_point[6] / mc.K_f, vf_op, vb_op)\n if self.model_type == ModelType.EASY:\n Ct = np.array([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0]])\n Dt = np.array([[0, 0],\n [0, 0],\n [0, 0]])\n elif self.model_type == ModelType.GYROMOMENT:\n if self.nOutputs == 3:\n Ct = np.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0]])\n Dt = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [0, 0],\n [0, 0]])\n elif self.nOutputs == 5:\n Ct = np.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1]])\n Dt = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [0, 0],\n [0, 0]])\n\n Ak, Bk, Ck, Dk = discretize_linear_state_space(At, Bt, Ct, Dt, self.timeStep)\n return Ak, Bk, Ck, Dk",
"def getMatrix(self) -> CMatrix4:\n ...",
"def Dmat(self):\n return self._Dmat_cache",
"def det_matrix(self):\n return np.linalg.det(self.take_matrix())",
"def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i",
"def DY(self,X,parameterValues,independentValues):\n jac=numpy.array([])\n if self.dYdp is not None: \n dYdp = self.dYdp\n exec(self.parameterNames + \"=parameterValues\")\n exec(self.independentNames + \"= independentValues\")\n exec(self.Xname+\"=X\")\n if self.WscaledName:\n exec(self.WscaledName + '=' +self.scalingW)\n if self.XscaledName:\n exec(self.XscaledName + \"=\"+self.scalingX)\n if self.Jac_dict:#this is for the common factors to precalculate\n for name in self.Jac_dict.keys():\n if name!='prior' and name!='priorvalues':\n exec(name + \"=\" + self.Jac_dict[name]) \n exec(\"jac=numpy.array(\"+dYdp+\")\")\n return jac",
"def compute_det(self, log_progress=False):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n for k in range(i + 1, size):\n mat[j][k] = (mat[j][k] * mat[i][i]) - (mat[j][i] * mat[i][k])\n if i > 0:\n mat[j][k] //= mat[i - 1][i - 1]\n if log_progress:\n print(i)\n if i > 0:\n for j in range(size):\n mat[j][i - 1] = 0\n mat[i - 1][j] = 0\n\n return mat[size - 1][size - 1]",
"def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres",
"def _dmatrix(kn_u, kn_d):\n d = np.zeros((kn_u.size, 4, 4), np.complex128)\n d_inv = np.zeros_like(d)\n\n d[:, 0, 0] = 1\n d[:, 0, 1] = 1\n d[:, 1, 0] = kn_u\n d[:, 1, 1] = -kn_u\n\n d[:, 2, 2] = 1\n d[:, 2, 3] = 1\n d[:, 3, 2] = kn_d\n d[:, 3, 3] = -kn_d\n\n # an analytic matrix inverse saves time\n inv_kn_u = 0.5 / kn_u\n inv_kn_d = 0.5 / kn_d\n\n d_inv[:, 0, 0] = 0.5\n d_inv[:, 0, 1] = inv_kn_u\n d_inv[:, 1, 0] = 0.5\n d_inv[:, 1, 1] = -inv_kn_u\n\n d_inv[:, 2, 2] = 0.5\n d_inv[:, 2, 3] = inv_kn_d\n d_inv[:, 3, 2] = 0.5\n d_inv[:, 3, 3] = -inv_kn_d\n\n return d, d_inv",
"def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)",
"def y21(self, nx, ny, x_des):\n\n [c_d, a1, output] = [self.component_dependency['y_21'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[5 * ny + nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.aer_int.y21_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output",
"def adjoint(self):\n return self.cofactorMatrix().transpose()",
"def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on",
"def computeTransitionMatrix(self, mu = False):\n\n available_strategies = self.getAvailableStrategies()\n t_matrix = np.matrix([[0. for strategy in available_strategies] for strategy in available_strategies])\n\n for i, resident in enumerate(available_strategies):\n for j, invader in enumerate(available_strategies):\n if (i != j):\n fixation_proba = self.rho(resident, invader)\n if(mu):\n fixation_proba *= self.getExplorationRate()\n t_matrix[i, j] = 1./(len(available_strategies) - 1) * fixation_proba\n\n # calculate diagonal elements\n for i in range(len(available_strategies)):\n t_matrix[i, i] = 1 - t_matrix[i].sum(axis = 1)\n\n return t_matrix",
"def y31(self, nx, ny, x_des):\n\n [c_d, a1, output] = [self.component_dependency['y_31'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[9 * ny + 2 * nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.pro_int.y31_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output",
"def adjoint(self):\n data = []\n for i in range(1, self.rows + 1):\n for j in range(1, self.columns + 1):\n data.append(self._cofactor(i, j))\n\n mat = Matrix(self.rows, self.columns, data)\n return mat.transpose()",
"def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp",
"def DME(nodes, elements):\r\n\r\n nels = elements.shape[0]\r\n IELCON = np.zeros([nels, 2], dtype=np.integer)\r\n DME_mat = np.zeros([nels, 6], dtype=np.integer)\r\n neq, IBC = eqcounter(nodes)\r\n nnodes = 2\r\n for i in range(nels):\r\n for j in range(nnodes):\r\n IELCON[i, j] = elements[i, j+3]\r\n kk = IELCON[i, j]\r\n for l in range(3):\r\n DME_mat[i, 3*j+l] = IBC[kk, l]\r\n return DME_mat, IBC, neq",
"def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot"
]
| [
"0.6056143",
"0.586802",
"0.58234936",
"0.5677216",
"0.56426907",
"0.56066704",
"0.56030965",
"0.55682796",
"0.5565402",
"0.55227166",
"0.5503907",
"0.54893047",
"0.5426093",
"0.5422756",
"0.541555",
"0.54099625",
"0.5407855",
"0.5392639",
"0.53858066",
"0.5381336",
"0.53622663",
"0.535344",
"0.5350836",
"0.5340039",
"0.5338162",
"0.53337485",
"0.5325254",
"0.53169954",
"0.5315706",
"0.5302837"
]
| 0.63176143 | 0 |
Nonnested view of the unfaceted_array_of_objects field | def non_nested_array_of_objects(self, unfaceted_array_of_objects):
return unfaceted_array_of_objects | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def objectFields(self):\n raise NotImplementedError",
"def iflatten(self):\n return _((e for es in self.array for e in es))",
"def _get_embedded_objects(self):\n return [getattr(self, name) for name, field in self._get_fields().items() if isinstance(field, fields.Object)]",
"def do_flatten(obj):\n if type(obj) == list:\n return np.array(obj).flatten()\n return obj.flatten()",
"def objects_rst(self):\n return [_.as_rst for _ in self.objects]",
"def obj_list(self):\n return self._obj_list",
"def serialize_list(self, obj):\n return self.serialize_tuple(obj)",
"def __array__(self):\n return dict2rec(self)",
"def fields(self):",
"def __array__(self):\n return self.array",
"def unindexed_properties(self):\n return getattr(self, '_Entity__unindexed_properties', [])",
"def list(self):\n return self.cell.objects+self.cell.tempObjects",
"def deconstruct(self):\n name, path, args, kwargs = super(DateTimeListField, self).deconstruct()\n kwargs['objects'] = self.objects\n return name, path, args, kwargs",
"def array(self):",
"def displayable_items(self):\r\n return [self]",
"def objects(self):",
"def __array__(self):\n return self.to_array()",
"def scorable_fields(self):\r\n return [i for i, field in enumerate(self) if field.scorable]",
"def __repr__(self):\n return \"[\" + \", \".join([str(member) for member in self.table]).rstrip(\",\") + \"]\"",
"def toarray(self, object_):\n\n raise NotImplementedError",
"def get_objects_data(self):\n pass",
"def as_list(self):\n return self._flattened_inputs",
"def displayable_items(self):\r\n return [self.descriptor]",
"def raw_fields(self):\n pass",
"def to_representation(self, value):\n return [\n {\n \"color\": tag.color,\n \"level\": tag.level,\n \"tag_id\": tag.tag_id,\n \"name\": tag.name,\n \"order_num\": tag.order_num,\n }\n for tag in value.all()\n ]",
"def raw_fields(self):\n list_fields = ['BAROR', None, None] + self.x.tolist() + [self.offt]\n return list_fields",
"def tolist(self, flat=0):\n pass",
"def to_list_flat(self):\n return self.rep.entries()",
"def _metatize_ndarray(obj):\n return metatize(obj.view(HashableNDArray))",
"def dataObjects(self):\n\t\treturn self._objects"
]
| [
"0.6074154",
"0.5800832",
"0.5692128",
"0.5534284",
"0.54768306",
"0.54336363",
"0.54329735",
"0.54007703",
"0.538946",
"0.53849775",
"0.5372095",
"0.53356856",
"0.53252435",
"0.5293318",
"0.52790385",
"0.5278998",
"0.5253122",
"0.5250943",
"0.5238906",
"0.5227962",
"0.52091753",
"0.5203838",
"0.51973957",
"0.5178476",
"0.5171266",
"0.5162739",
"0.5155336",
"0.514038",
"0.51320624",
"0.51234764"
]
| 0.77682173 | 0 |
Load image from file and perform preprocessing. Args | def _load_preprocess_image(self, image_file):
image_raw = tf.io.read_file(image_file)
image = self._preprocess_image(image_raw)
return image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_preprocess_image(self, image_file):\n\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image",
"def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label",
"def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)",
"def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)",
"def process_path(file_path: str):\r\n img = tf.io.read_file(file_path)\r\n img = tf.image.decode_jpeg(img, channels=3)\r\n img = tf.image.resize(img, [IMG_SIZE, IMG_SIZE])\r\n return tf.keras.applications.efficientnet.preprocess_input(img) # Shape: IMG_SIZE x IMG_SIZE x 3\r",
"def load_image(path, preprocess=True):\n x = image.load_img(path, target_size=(H, W))\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n return x",
"def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')",
"def train_image_parse_function(filename, *argv):\n image = read_image(filename)\n image = tf.image.random_flip_left_right(image)\n\n if FLAGS.augmentation:\n print('data augmentation')\n resized_image = resize_and_random_crop_image(image)\n else:\n resized_image = resize_image(image)\n resized_image = scale_image_value(resized_image)\n\n if len(argv) == 1:\n return resized_image, argv[0]\n elif len(argv) == 2:\n return resized_image, argv[0], argv[1]\n else:\n return resized_image",
"def loadRaw(self, path, preprocfunc=None):\n # Only for 8 and 32 bit images\n depth = self.getDepth()\n if depth==1:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_DEPTH)\n \n # Loading the file\n f = file(path, 'rb')\n data = f.read()\n f.close()\n \n # Preprocessing the data if a function was given\n if preprocfunc:\n data = preprocfunc(data)\n \n # Verification over data size\n (w,h) = self.getSize()\n im_size = w*h*(depth/8)\n assert(len(data)==im_size*self.length)\n \n # Loading the data\n for i,im in enumerate(self.seq):\n err = mambaCore.MB_Load(im.mbIm, data[i*im_size:(i+1)*im_size], im_size)\n mamba.raiseExceptionOnError(err)\n self.name = path",
"def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")",
"def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image",
"def openAndPreProcessImage(path, copyOrig=False, preproc={}):\n try:\n im = Image.open(path).convert('L') #Open as a uint8 image\n except FileNotFoundError:\n print(f'Error: {path} not found')\n return\n except OSError:\n print(f'Error: Cannot open {path}, please check image formats supported by PIL.Image')\n return\n im = np.asarray(im)#[125:375,125:375] #Take a smaller region for speed\n \n # Also return an unprocessed copy of original image, if required\n im_orig = im.copy() if copyOrig else None\n \n return preProcessImage(im, **preproc), im_orig",
"def load_and_preprocess_image(path):\n\n img = cv2.imread(path, 0) # Load image into greyscale\n img = cv2.equalizeHist(img) # Histogram equilization\n return img",
"def preprocess(file_path, model_preprocess_function):\n img = image.load_img(file_path, target_size=(224, 224))\n x = image.img_to_array(img)\n # x = np.expand_dims(x, axis=0)\n x = model_preprocess_function(x)\n return x",
"def preprocess(path, img_w, img_h):\n #print(path)\n img = cv2.imread(path)\n #print(img.shape)\n #resizing the image to particular size (64, 128, 3)\n img = fix_size(img, img_w, img_h)\n #print(img.shape)\n \n #assigining values less than zero to zer0 and greater than zero to 1\n img = np.clip(img, 0, 255)\n\n #changing the interger to more useful and complex integer\n img = np.uint8(img)\n\n #convert an image to one color space to another\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n #chaging the values datatype to float\n img = img.astype(np.float32)\n\n #normalization\n img /= 255\n return img",
"def process_image(self):\n pass",
"def load_image(self, **kwargs):\n ...",
"def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_",
"def load_and_preprocess_image(path, max_dim=512):\n f = tf.io.read_file(path)\n img = tf.io.decode_image(f)\n img = resize_min(img, max_dim)\n img = tf.expand_dims(img, axis=0)\n img = vgg_preprocess_input(img)\n return img",
"def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data",
"def process(self, image):",
"def process_image_file(self, image_file):\n image = image_util.load_image_from_file(image_file)\n return self.process_image(image)",
"def img_preprocessing(save_path,img_path, filename):\n\n\tsave_path_filename = save_path + filename\n\n\t#Check if file exits\n\tif not os.path.exists(img_path + filename):\n\t\tlogger.error(\" image path {} does not exit\".\n\t\t\t\t\t\t\t\tformat(img_path + filename))\n\n\timage = plt.imread(img_path + filename)\n\n\timg_gray_orig_0 = rgb2gray(image)\n\n\timg_gray_orig = img_resize(img_gray_orig_0, 2*IMG_SIZE)\n\n\timg_just_bone = img_preprocess_core(img_gray_orig)\n\n\ttry:\n\t img_bone = img_pad_resize(img_just_bone, 2*IMG_SIZE) \n\t #Second iteration of image segmentation\n\t img_just_bone = img_preprocess_core(img_bone)\n\t img_bone = img_pad_resize(img_just_bone, IMG_SIZE)\n\n\t plt.imsave(save_path_filename, img_bone)\n\t \n\texcept ValueError:\n\t\tlogger.error(\"Unable to run 2nd interaton for {}\".format(filename))",
"def process_image(self):\n\n detect.main(self.nn_args)",
"def load_image(file_name):\n image = Image.open(file_name)\n # im = numpy.array(image)\n normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t_list = [transforms.Resize(256), transforms.CenterCrop(224)]\n t_end = [transforms.ToTensor(), normalizer]\n transform = transforms.Compose(t_list + t_end)\n return transform(image)",
"def __parse_image_load(self, image_path: str, image_label: int):\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n if self.rgb:\n flag = cv2.IMREAD_COLOR\n else:\n flag = cv2.IMREAD_GRAYSCALE\n\n img = cv2.imread(image_path, flags=flag)\n img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(\n np.float32)\n\n if self.normalize_images:\n img_mean = np.mean(img, axis=(0, 1))\n img_std = np.std(img, axis=(0, 1))\n\n img = (img - img_mean) / img_std\n\n return img, one_hot",
"def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)",
"def load_image(self):\n if isinstance(self.filename, str):\n self.image = np.asarray(PIL.Image.open(self.filename))\n elif isinstance(self.filename, np.ndarray):\n self.image = np.asarray(self.filename)\n if self.image.ndim < 3:\n self.bw = True\n if self.image.ndim < 2:\n self.image = None\n print(\"file {} is not an appropriate format.\".format(\n self.filename))\n if self.image.ndim == 3:\n if self.image.shape[-1] == 1:\n self.image = np.squeeze(self.image)\n elif self.image.shape[-1] > 3:\n self.image = self.image[..., :-1]\n if (self.image[..., 0] == self.image.mean(-1)).mean() == 1:\n self.image = self.image[..., 0]\n self.bw = True\n return self.image",
"def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label",
"def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)"
]
| [
"0.7445202",
"0.7336954",
"0.732776",
"0.68389344",
"0.67588747",
"0.6732957",
"0.6699492",
"0.6671857",
"0.6556289",
"0.6554106",
"0.65509486",
"0.64734775",
"0.64721584",
"0.64554745",
"0.6452393",
"0.6435409",
"0.6434359",
"0.6419279",
"0.6392069",
"0.63866806",
"0.636376",
"0.634322",
"0.6338036",
"0.6327303",
"0.63267696",
"0.6323581",
"0.6321191",
"0.63162947",
"0.63141584",
"0.6307517"
]
| 0.7423453 | 1 |
Generate the trainingholdout folds from a set of splits. Args | def _get_folds(self, splits):
train = [splits.copy() for i in range(self.n_splits)]
holdout = [train[i].pop(i) for i in range(self.n_splits)]
train_flat = [list(chain(*row)) for row in train]
return list(zip(train_flat, holdout)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_folds(train_path,n_splits):\n df = pd.read_csv(train_path, sep = \",\") \n\n df[\"kfold\"] = -1\n\n df = df.sample(frac = 1).reset_index(drop = True)\n\n kf = model_selection.StratifiedKFold(n_splits = n_splits)\n\n for fold,(trn_,val_) in enumerate(kf.split(X = df,y = df.Prediction.values)):\n df.loc[val_,\"kfold\"] = fold \n \n df.to_csv(\"../input/train_folded.csv\", index = False)",
"def generate_folds(n_samples, n_folds=10, train_fraction=.8, len_blocks=5,\n overlapping=True):\n if overlapping:\n splits = np.array_split(np.arange(n_samples), n_folds)\n\n folds = []\n for s in range(n_folds):\n if s == 0:\n training = np.concatenate(splits[(s + 1):], axis=0)\n elif s == n_folds - 1:\n training = np.concatenate(splits[:s], axis=0)\n else:\n fold_splits = splits[:s] + splits[(s + 1):]\n training = np.concatenate(fold_splits, axis=0)\n validation = splits[s]\n folds.append((training, validation))\n else:\n folds = aone.utils.generate_trnval_folds(\n N=n_samples,\n sampler='cv',\n nfolds=n_folds,\n testpct=(1 - train_fraction),\n nchunks=len_blocks,\n )\n folds = [(training, validation) for training, validation in folds]\n return folds",
"def _generate_validation_fold(self):\n\n for offset in range(self.nb_folds):\n # Load all the data from cache (do this to save memory)\n with open(self.data_cache_path_str + \"data_cache.pkl\", \"rb\") as f:\n data_df, target_df = pickle.load(f)\n\n # Generate train and test sets\n data_dates_lst = data_df[\"date\"].drop_duplicates().sort_values().tolist()\n train_start_day = len(data_dates_lst) - ((self.nb_folds - offset) * self.test_nb_days + self.train_nb_days)\n train_end_day = train_start_day + self.train_nb_days\n test_start_day = train_end_day\n test_end_day = test_start_day + self.test_nb_days\n\n train_dates_lst = data_dates_lst[train_start_day:train_end_day]\n test_dates_lst = data_dates_lst[test_start_day:test_end_day]\n\n # Generate train and test labels\n training_set_df = data_df.loc[data_df[\"date\"].isin(train_dates_lst)].reset_index(drop = True)\n testing_set_df = data_df.loc[data_df[\"date\"].isin(test_dates_lst)].reset_index(drop = True)\n new_target_df = target_df.loc[data_df[\"date\"].isin(train_dates_lst)].reset_index(drop = True)\n truth_df = target_df.loc[data_df[\"date\"].isin(test_dates_lst)].reset_index(drop = True)\n\n # Reduce memory usage\n del data_df, target_df\n gc.collect()\n\n # Return result\n yield (training_set_df, testing_set_df, new_target_df, truth_df)",
"def fold(self, fold, shuffle = True):\n testData = self.mergefolds[fold]\n trainData = sum(self.mergefolds[:fold] +\n self.mergefolds[fold + 1:], [])\n if shuffle:\n random.shuffle(testData)\n random.shuffle(trainData)\n return trainData, testData",
"def get_splits(data, fold, n_folds=10):\n assert fold < n_folds, f'fold value must be less than n_folds value ({n_folds:d})'\n from sklearn.model_selection import StratifiedKFold\n skf = StratifiedKFold(n_splits=n_folds, random_state=0, shuffle=True)\n all_folds = [{'train_ind': train_ind, 'val_ind': val_ind} for train_ind, val_ind in skf.split(\n X=np.zeros(shape=data['cluster'].shape), y=data['cluster'])]\n return all_folds[fold]['train_ind'], all_folds[fold]['val_ind']",
"def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))",
"def assignFolds(folds):\r\n training = []\r\n testing = []\r\n k = len(folds)\r\n fold=0\r\n while fold < k:\r\n testing.append(folds.pop(fold))\r\n training.append([y for x in folds for y in x])\r\n folds.insert(fold, testing[fold])\r\n fold = fold+1\r\n return training, testing",
"def kfold_cross_validation(X, n_splits=5):\n\n #Define variables\n X_train_folds = []\n X_test_folds = []\n indexes = list(range(len(X)))\n index = 0\n\n #Create folds\n for i in range(n_splits):\n test = []\n train = []\n #Determine how many to put in test\n if((len(X) % n_splits) > i):\n numTest = len(X) // n_splits +1\n else:\n numTest = len(X) // n_splits\n for j in range(numTest):\n if(index < len(X)):\n test.append(index)\n indexes.pop(indexes.index(index))\n index = index + 1\n for index1 in indexes:\n train.append(index1)\n X_test_folds.append(test)\n X_train_folds.append(train)\n indexes = list(range(len(X)))\n\n return X_train_folds, X_test_folds",
"def split_cv(length, num_folds):\n splits = [SplitIndices([], []) for _ in range(num_folds)]\n indices = list(range(length))\n random.shuffle(indices)\n fold_length = (int)(length / num_folds)\n\n for y in range(1, num_folds + 1):\n fold_n = 1\n counter = 0\n for x in indices:\n if fold_n == y:\n splits[y - 1].test.append(x)\n else:\n splits[y - 1].train.append(x)\n counter = counter + 1\n if counter % fold_length == 0:\n fold_n = fold_n + 1\n return splits",
"def cv_folds(bco_splits, resampling_ratio):\n folds = resample_bco_splits(bco_splits, resampling_ratio)\n # for each step, choose (n-1) train folds and 1 test fold\n n_folds = len(folds)\n cv_folds = []\n for i in range(n_folds):\n if n_folds > 1:\n fold_indices = np.delete(np.arange(n_folds), i)\n # select & concatenate folds[indices]\n indices_train = combine_folds(folds, fold_indices)\n indices_test = folds[i]\n else:\n indices_train = folds[i]\n indices_test = []\n cv_folds.append((indices_train, indices_test))\n\n return cv_folds",
"def __init__(self, splits):\n\t\tself.kfold = KFold(splits)",
"def make_splits(input_pkl, test_split=0.1, val_split=0.1):\n if (test_split > 1) or (val_split > 1) or (test_split + val_split > 1) or (test_split <= 0) or (val_split <= 0):\n logging.warning('Check the input for make splits, quitting')\n exit()\n\n main_dict = load_pickle(input_pkl)\n data, labels = main_dict['data'], main_dict['labels']\n idx_arr = np.random.choice(len(data), len(data))\n data, labels = data[idx_arr], labels[idx_arr]\n print(len(data[0][-1]))\n # Find the split sizes\n val_split = int(len(data) * val_split)\n test_split = val_split + int(len(data) * test_split)\n\n # Make and save the splits\n save_pickle({'data': data[:val_split], 'labels': labels[:val_split]}, 'data/val.pkl')\n save_pickle({'data': data[val_split:test_split], 'labels': labels[val_split:test_split]}, 'data/test.pkl')\n save_pickle({'data': data[test_split:], 'labels': labels[test_split:]}, 'data/train.pkl')",
"def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds",
"def split_data(y, num_folds=10):\r\n print(f\"Creating splits...\", end=\"\")\r\n\r\n fold_dict = dict()\r\n start_index = 0\r\n # if the number of proteins is not evenly divisible by the number of folds, the last samples are distributed\r\n # evenly across folds\r\n fold_size = math.floor(len(y) / num_folds)\r\n for fold in range(num_folds):\r\n fold_dict[fold] = list(range(start_index, start_index + fold_size))\r\n start_index += fold_size\r\n\r\n # distributing samples which are left over (due to the number of samples not being divisible by the number of folds)\r\n # evenly across folds\r\n fold = 0\r\n while start_index < len(y):\r\n fold_dict[fold] += [start_index]\r\n start_index += 1\r\n fold += 1\r\n\r\n # sanity check that we did not loose any samples while splitting\r\n assert sum([len(fold) for fold in fold_dict.values()]) == len(y), \"Number of samples after splitting does not \" \\\r\n \"match number of samples before splitting.\"\r\n\r\n additional_text = \"\" if len(y) % num_folds == 0 else f\" with {len(y) % num_folds} left over samples \" \\\r\n f\"being distributed evenly among folds\"\r\n print(f\"done! Created {num_folds} splits of size {fold_size}{additional_text}.\")\r\n\r\n # TODO: use the results of this to determine if we should proceed with the current folds\r\n test_stratification(fold_dict, y)\r\n\r\n return fold_dict",
"def _split_sets(X, y, folds, ind=-1, sample_counter=0):\n\n fold = folds.pop(ind) - sample_counter\n X_test = X[fold, ...]\n y_test = y[fold, ...]\n X_train = np.delete(X, fold, axis=0)\n y_train = np.delete(y, fold, axis=0)\n test_fold = fold + sample_counter\n # return X_train, np.squeeze(y_train), X_val, np.squeeze(y_val)\n return X_train, y_train, X_test, y_test, test_fold",
"def kfold_cross_validation(X, n_splits=5):\r\n X_train_folds = []\r\n X_test_folds = []\r\n\r\n x_len = len(X)\r\n \r\n fold_modulus = x_len % n_splits\r\n \r\n start_idx = 0\r\n for fold in range(n_splits): \r\n\r\n if fold < fold_modulus:\r\n fold_size = x_len // n_splits + 1\r\n else:\r\n fold_size = x_len // n_splits\r\n\r\n fold_end = (start_idx + fold_size) - 1\r\n\r\n tmp = []\r\n for i in range(start_idx, fold_end + 1):\r\n tmp.append(i)\r\n X_test_folds.append(tmp)\r\n\r\n tmp = []\r\n for i in range(0, x_len):\r\n if i not in X_test_folds[fold]:\r\n tmp.append(i)\r\n X_train_folds.append(tmp)\r\n\r\n start_idx = fold_size + start_idx \r\n\r\n return X_train_folds, X_test_folds",
"def final_clf_training(Xs, ys, X_holdout, y_holdout, scorer_type, sanity_check=False, oversampling=False):\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # stack the holdout feature vectors on the feature vectors of all folds\n X_all = np.concatenate([X_train, X_holdout], axis=0)\n y_all = np.concatenate([y_train, y_holdout], axis=0)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder for the specific classifer\n scorer_folder_name = scorer_type+\"_final\"\n save_folder = get_save_folder(parent_folder, scorer_folder_name+\"_new\")\n\n # get classifier and only pass a save folder if the classifier should be saved\n clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n #perform oversampling if selected\n if oversampling == True:\n if 'f_ext' in scorer_type:\n print(\"Oversampling not defined for LSTM\")\n exit()\n\n import datetime\n start = datetime.datetime.now().time()\n print(\"Started oversampling/undersampling at: \" + str(start))\n # uncomment following lines for the different sampling methods #####\n # Oversampling\n from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler\n print(\"Oversampling data\")\n #kind = ['regular', 'borderline1', 'borderline2', 'svm']\n #sm = SMOTE(kind='regular',)\n #X_res, y_res = sm.fit_sample(X_all, y_all)\n\n #ros = RandomOverSampler()\n #X_res, y_res = ros.fit_sample(X_all, y_all)\n\n #ada = ADASYN()\n #X_res, y_res = ada.fit_sample(X_all, y_all)\n\n ######################################################\n # Undersampling\n from imblearn.under_sampling import TomekLinks, EditedNearestNeighbours, CondensedNearestNeighbour, \\\n NeighbourhoodCleaningRule, InstanceHardnessThreshold\n # remove Tomek links\n tl = TomekLinks(return_indices=True)\n X_res, y_res, idx_resampled = tl.fit_sample(X_all, y_all)\n\n #enn = EditedNearestNeighbours(random_state=0)\n #X_res, y_res = enn.fit_sample(X_all, y_all)\n\n #cnn = CondensedNearestNeighbour(random_state=0)\n #X_res, y_res = cnn.fit_sample(X_all, y_all)\n\n #ncr = NeighbourhoodCleaningRule(random_state=0)\n #X_res, y_res = ncr.fit_sample(X_all, y_all)\n\n #iht = InstanceHardnessThreshold(random_state=0, estimator=clf)\n #X_res, y_res = iht.fit_sample(X_all, y_all)\n\n\n ##################\n # Combination of Undersampling and oversampling\n\n from imblearn.combine import SMOTEENN, SMOTETomek\n #smote_enn = SMOTEENN(random_state=0)\n #X_res, y_res = smote_enn.fit_sample(X_all, y_all)\n\n #smote_tomek = SMOTETomek(random_state=0)\n #X_res, y_res = smote_tomek.fit_sample(X_all, y_all)\n\n end = datetime.datetime.now().time()\n print(\"Ended oversampling/undersampling at: \" + str(end))\n\n clf.fit(X_res, y_res)\n else: # if oversampling is false\n import datetime\n # fit the final classifier\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n clf.fit(X_all, y_all)\n\n # save the model\n filename = scorer_folder_name + \".sav\"\n save_model(clf, save_folder, filename) # save model with filename to specific folder\n\n # predict on the data the classifier was trained on => should give near perfect score\n if sanity_check == True:\n # get predicted and actual labels\n y_predicted = clf.predict(X_all)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_all]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy, f1 macro\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_all, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_all, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_all, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_all, stance=False)\n\n # printout results\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related,\n f1_stance, score)\n print(\"SANITY CHECK (predict on train data):\")\n print(printout)\n return clf",
"def __init__(self, splits):\n\t\tself.kfold = StratifiedKFold(splits)",
"def validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features, features_dir,\n scorer_type, feat_indices, result_string, learning_rate_string, features):\n # define folder to save the classifier and create it if not existing\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder\n save_folder = get_save_folder(parent_folder, scorer_type+\"_new\")\n\n # only pass a save folder if the classifier should be saved\n best_clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # concat non-bleeding features\n X_train, X_holdout, feat_indices_holdout = concat_non_bleeding_features(\n X_train, X_holdout,\n non_bleeding_features, features_dir, 'holdout')\n\n # test for oversampling: fits the current classifier, oversampled with a given\n # method and checks the score on the holdout set\n use_over_sampling = False\n if use_over_sampling == True:\n from imblearn.over_sampling import SMOTE\n kind = ['regular', 'borderline1', 'borderline2', 'svm']\n for m in kind:\n sm = SMOTE(kind=m)\n X_res, y_res = sm.fit_sample(X_train, y_train)\n best_clf.fit(X_res, y_res)\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n print(\"Score \" + m + \":\" + str(score))\n\n\n #Taken from Benjamins LSTM\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n best_clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n best_clf.fit(X_train, y_train)\n\n # predict labels\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n\n # calc FNC score\n fold_score, cm = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy for related/unrelated and stances\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_holdout, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_holdout, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_holdout, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_holdout, stance=False)\n\n # prepare printout for final results of holdout set\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related, f1_stance, score)\n printout += printout_manager.calculate_confusion_matrix(cm)\n print(printout) # print holdout results\n result_string += printout + \"\\n\"# add results to string that is going to be saved into a file\n\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n printout_manager.save_file(result_string, result_file_folder + \"/fnc_results_holdout.txt\", \"a+\")\n\n #aligned printout for ablation:\n summary = printout_manager.get_holdout_ablation_printout(features, score,f1_stance,save_folder)\n printout_manager.save_file(summary, result_file_folder + \"/fnc_results_holdout_summary.txt\", \"a+\")\n\n # test saving and restoring model\n #filename = scorer_type + \".sav\"\n #save_model(best_clf, save_folder,filename)\n #load_clf = load_model(parent_folder + scorer_type + \"_new_0/\", filename) # the 0th folder should always exist\n #print_score_from_restored_model(load_clf, X_holdout, y_holdout)\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(best_clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += best_clf.get_learning_rates('holdout') + \"\\n\"\n\n # print feature importances\n if scorer_type == 'randomforest':\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n importances = best_clf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in best_clf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n feat_indices.append(feat_indices_holdout)\n\n feat_importance_string = str(feat_indices) + \"\\n\"\n for i in indices:\n feat_importance_string += str(i) + \";\" + str(importances[i]) + \";\" + str(std[i]) + \"\\n\"\n\n # save feature importances as file\n printout_manager.save_file(feat_importance_string, result_file_folder + \"/feat_importance_rf.txt\", \"a+\")\n\n return result_string, learning_rate_string",
"def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))",
"def stratified_kfold_cross_validation(X, y, n_splits=5):\n\n #Define variables\n X_train_folds = []\n X_test_folds = []\n\n #Create dictionary \n y_dict = myutils.group_by(y)\n\n #Split data\n folds = [[] for _ in range(n_splits)]\n for category in y_dict.keys():\n index = y_dict[category]\n for i in range(len(index)):\n folds[i % n_splits].append(index[i])\n\n #Add data to train and testing sets\n for i in range(n_splits):\n train = []\n for j in range(n_splits):\n if i != j:\n for item in folds[j]:\n train.append(item)\n test = folds[i]\n X_train_folds.append(train)\n X_test_folds.append(test)\n \n return X_train_folds, X_test_folds",
"def gen_splits(n_splits, test_size, X, Y, groups=None, random_state=0):\n from sklearn.model_selection import GroupShuffleSplit\n\n gss = GroupShuffleSplit(\n n_splits=n_splits, test_size=test_size, random_state=random_state\n )\n train_test_splits = list(gss.split(X, Y, groups=groups))\n split_indices = list(range(n_splits))\n return train_test_splits, split_indices",
"def get_folds(X, y, k):\n # temporarily change the 1/-1 nature of y to 1/0\n _y = (y + 1) / 2\n # partition the examples into postive and negative sets\n positive_indices = np.where(_y)[0]\n negative_indices = np.where(_y - 1)[0]\n assert len(positive_indices) + len(negative_indices) == len(y)\n\n # shuffle both lists\n np.random.shuffle(positive_indices)\n np.random.shuffle(negative_indices)\n\n # create k buckets of indices of (approximately) equal size\n positive_folds_indices = \\\n np.array(np.array_split(positive_indices, k))\n negative_folds_indices = \\\n np.array(np.array_split(negative_indices, k))\n\n train_X, train_y, test_X, test_y = [], [], [], []\n for i in range(k):\n train_folds = np.concatenate((np.arange(0, i), np.arange(i+1, k)))\n pos_train_indices = np.concatenate(positive_folds_indices[train_folds])\n neg_train_indices = np.concatenate(negative_folds_indices[train_folds])\n pos_test_indices = positive_folds_indices[i]\n neg_test_indices = negative_folds_indices[i]\n\n train_X.append(\n np.concatenate((X[pos_train_indices], X[neg_train_indices]))\n )\n train_y.append(\n np.concatenate((y[pos_train_indices], y[neg_train_indices]))\n )\n test_X.append(\n np.concatenate((X[pos_test_indices], X[neg_test_indices]))\n )\n test_y.append(\n np.concatenate((y[pos_test_indices], y[neg_test_indices]))\n )\n\n return zip(train_X, train_y, test_X, test_y)",
"def get_from_folds(X_vals, y_vals, train_folds, test_folds):\n X_train = []\n y_train = []\n for row in train_folds:\n for i in row:\n X_train.append(X_vals[i])\n y_train.append(y_vals[i])\n\n X_test = []\n y_test = []\n for row in test_folds:\n for i in row:\n X_test.append(X_vals[i])\n y_test.append(y_vals[i])\n\n return X_train, y_train, X_test, y_test",
"def stratified_kfold_cross_validation(X, y, n_splits=5):\r\n indices = [x for x in range(0, len(X))]\r\n labels = []\r\n uniq_feat = []\r\n\r\n for idx,clss in enumerate(y):\r\n\r\n if clss in uniq_feat:\r\n labels[uniq_feat.index(clss)].append(indices[idx])\r\n else:\r\n labels.append([indices[idx]])\r\n uniq_feat.append(clss)\r\n \r\n index = 0\r\n X_test_folds = [[] for _ in range(0, n_splits)]\r\n\r\n for label in labels:\r\n for val in label:\r\n fold_idx = index%n_splits\r\n X_test_folds[fold_idx].append(val)\r\n index += 1\r\n \r\n X_train_folds = [[] for _ in range(0, n_splits)]\r\n\r\n for i in range(0, len(X)):\r\n for j in range(0, n_splits):\r\n if i not in X_test_folds[j]:\r\n X_train_folds[j].append(i)\r\n \r\n return X_train_folds, X_test_folds",
"def train_validation_split(self, threshold=None):\n for train, validation in self._get_k_folds(5, threshold):\n train_provider = train\n validation_provider = validation\n break\n return train_provider, validation_provider",
"def construct_folds(protein=0, fingerprint=4, n_folds=10, seed=0):\n np.random.seed(seed)\n X, Y = load_svmlight_file(os.path.join(c[\"DATA_DIR\"], \\\n proteins[protein]+\"_\"+fingerprints[fingerprint]+\".libsvm\"))\n\n\n skf = sklearn.cross_validation.StratifiedKFold(Y, n_folds=n_folds, random_state = seed)\n folds = []\n for tr_id, ts_id in skf:\n folds.append({\"train_id\":tr_id, \"test_id\":ts_id})\n\n return folds",
"def generate_folds(self) -> list[dict]:\n pass",
"def fold(nb_splits, dataset):\r\n index = np.arange(np.shape(dataset)[0])\r\n splits = np.split(index, nb_splits)\r\n\r\n index = []\r\n\r\n for n_fold in np.arange(nb_splits):\r\n index.append((splits[n_fold].tolist(),(np.concatenate([x for i,x in enumerate(splits) if i!=n_fold])).tolist()))\r\n\r\n return index",
"def make_train_val_test_split_inchikey_lists(train_inchikey_list,\n train_inchikey_dict,\n train_val_test_split_fractions,\n holdout_inchikey_list=None,\n splitting_type='random'):\n if not np.isclose([sum(train_val_test_split_fractions)], [1.0]):\n raise ValueError('Must specify train_val_test_split that sums to 1.0')\n\n if holdout_inchikey_list:\n # filter out those inchikeys that are in the holdout set.\n train_inchikey_list = [\n ikey for ikey in train_inchikey_list\n if ikey not in holdout_inchikey_list\n ]\n\n if splitting_type == 'random':\n return get_random_inchikeys(train_inchikey_list,\n train_val_test_split_fractions)\n else:\n # Assume that splitting_type is the name of a structure family.\n # get_inchikeys_by_family will throw an error if this is not supported.\n return get_inchikeys_by_family(\n train_inchikey_list,\n train_inchikey_dict,\n train_val_test_split_fractions,\n family_name=splitting_type,\n exclude_from_train=True)"
]
| [
"0.68464065",
"0.661796",
"0.64245856",
"0.6398819",
"0.63746804",
"0.6372801",
"0.63497585",
"0.6347038",
"0.634233",
"0.6329367",
"0.63166386",
"0.6315036",
"0.63112",
"0.6283015",
"0.6247967",
"0.6247136",
"0.62002194",
"0.6170478",
"0.6168151",
"0.61529356",
"0.615237",
"0.61395204",
"0.611404",
"0.61120677",
"0.60986525",
"0.60942304",
"0.60909194",
"0.6049838",
"0.6015096",
"0.60130113"
]
| 0.7535942 | 0 |
Display sorted results for each metric. Args | def display_results(self, metrics):
for k, v in self.cv_results_.items():
# sorted_results = sort_results(v)
print(f'Results for {k} metric:')
print()
print(v.sort_values(by=['Metric mean'], ascending=False))
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort_results(self):\n pass",
"def display_metric(metrics_to_print, results, num_refs, args):\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))\n\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))",
"def print_metric(self):\r\n print(f'\\n\\n{self.sort} metric of size {self.n}')\r\n print(f'algorithm: {self.algo}')\r\n print(f'number of comparisons: {self.comps}')\r\n print(f'number of exchanges: {self.exs}')\r\n print(f'regression equation for comparisons: {self.comp_eq}')\r\n print(f'regression equation for exchanges: {self.ex_eq}')\r\n print(f'presorted data: {self.predata}')\r\n print(f'postsorted data: {self.postdata}')",
"def fetch_sorted_metric(self, *args, **kwargs):\n return sorted(self.fetch_metric(*args, **kwargs).items(),\n key=lambda x: float(x[0]))",
"def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')",
"def display_results(results, sizes):\r\n plot.xlabel('Array size')\r\n plot.ylabel('Time')\r\n plot.title('Sorting algorithms comparison')\r\n for name, result in results.items():\r\n plot.plot(sizes, result, label=name)\r\n plot.grid(True)\r\n plot.legend()\r\n plot.show()",
"def displaySorted(self):\r\n os.system('cls')\r\n for i in self.sortedList:\r\n print(str(i[2]) + \": \" + i[0].showRule())",
"def sort_results(metric_results):\n\n means, stds, params_list = metric_results\n dtype = [('index', int), ('params_list', object), ('std', float), ('mean', float)]\n\n #Sort will fail when attempting to rank based on the\n #dictionary 'params_list' when encountering identical mean and\n #standard deviations. To avoid this, use a list of distinct\n #integers to break the tie.\n values = zip(range(len(means)), params_list, stds, means)\n\n a = np.sort(np.array(list(values), dtype=dtype),\n kind='mergesort', order=['mean', 'std', 'index'])\n return np.flip(a, axis=-1)",
"def display_results():\n pass",
"def print_scores(result_collector):\n # print(\"\\n# Metric: Cohen's kappa\")\n # result_collector.set_metric(['k_cohen', 'k'])\n # result_collector.print_all_results()\n print(\"\\n# Metric: Macro avg. F1\")\n result_collector.set_metric([\"macro_avg\", \"fscore\"])\n # result_collector.print_all_results()\n result_collector.print_result_for_level(\"cc\")\n result_collector.print_result_for_level(\"ro\", print_header=False)\n result_collector.print_result_for_level(\"fu\", print_header=False)\n result_collector.print_result_for_level(\"at\", print_header=False)\n\n # print(\"\\nMetric: Positive attachment F1\")\n # result_collector.set_metric(['classwise', '1', 'fscore'])\n # result_collector.print_result_for_level('at')\n print(\"\\n# Metric: Labelled attachment score\")\n result_collector.set_metric([\"accuracy\"])\n result_collector.print_result_for_level(\"lat\")",
"def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)",
"def _sort_results(self, results: dict) -> List:\n return [results[url][\"display_name\"] for url in self.urls_list]",
"def format_results(requests):\n\n keys = get_sorted_keys(requests)\n index = 1\n \n print \"\\nResults:\"\n for key in keys:\n\tfor request in requests[key]:\n\t print \"%s %s\" % (index, request.print_result())\n\t index += 1",
"def results_summary(self, num_models=10, sort_metric=None):\n if self.state.dry_run:\n info(\"Dry-Run - no results to report.\")\n return\n\n # FIXME API documentation\n _results_summary(input_dir=self.state.host.results_dir,\n project=self.state.project,\n architecture=self.state.architecture,\n num_models=num_models,\n sort_metric=sort_metric)",
"def output_results(self, model, limit):\n out = []\n for key in self.results[model].keys():\n qId = key\n count = 1\n sorted_docs = sorted(self.results[model][key].items(), key=operator.itemgetter(1), reverse=True)[:limit] \n for val in sorted_docs:\n dId = val[0]\n dScore = val[1]\n dRank = count\n out.append(str(qId) + \" \" + \"Q0\" + \" \" + str(dId) + \" \" + str(dRank) + \" \" + str(dScore) + \" \" + \"Exp\")\n count += 1\n return out",
"def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()",
"def plot_sorted_accuracies(results):\n ###TODO\n #print(results)\n \n #step 1 -> sort accuracies and get x and y\n # x = setting\n # y = sorted list of accuracies\n #results.sort(key=lambda x:(x['accuracy'])) \n # don't use it ->it will change results from main as well\n \n #print(results)\n\n acc = []\n \n x = list(range(len(results)))\n \n for d in results:\n #print('dict=',d)\n acc.append(d['accuracy'])\n \n acc.sort(key=lambda x:(x))\n #print('acc = ',acc)\n \n #step 2 -> plot figure\n fig1 = plt.figure(1) \n plt.plot(x,acc)\n plt.ylabel('accuracy')\n plt.xlabel('settings')\n \n plt.show()\n \n fig1.savefig('accuracies.png')",
"def get_results(cls):\n cls.all_hoechstzahls.sort(key=attrgetter('value', 'topic.category.weight'), reverse=True)\n for hoechstzahl in cls.all_hoechstzahls:\n yield hoechstzahl",
"def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")",
"def _print_aggregate_results(\n task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]\n) -> None:\n aggregate_task_result = aggregate_nvs_results(task_results[task])\n print(\"\")\n print(f\"Aggregate results for task={task}:\")\n pretty_print_nvs_metrics(aggregate_task_result)\n print(\"\")",
"def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()",
"def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()",
"def plot_sorted_accuracies(results):\n acc = []\n for comb in results:\n acc.append(comb[\"accuracy\"])\n sorted_list = sorted(acc)\n plt.plot(range(42),sorted_list,'bo-')\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Setting\")\n plt.savefig(\"accuracies.png\")",
"def printResults(items, rules):\n for item, support in sorted(items, key=lambda (item, support): support):\n print \"item: %s , %.3f\" % (str(item), support)\n\n print \"\\n------------------------ RULES:\"\n for rule, confidence, support in sorted(rules, key=lambda (rule, confidence, support): confidence):\n pre, post = rule\n print \"Rule: %s ==> %s , %.3f, %.3f\" % (str(pre), str(post), confidence, support)",
"def print_perf_results(results, cpu_info):\n if not results:\n return\n\n print('CPU Info: {}'.format(cpu_info['cpu_info']))\n print('Num cores: {}'.format(cpu_info['num_cpus']))\n print('Frequency: {} MHz'.format(cpu_info['mhz_per_cpu']))\n\n metrics = None\n row_format = None\n\n for name, res_dict in results.items():\n if not metrics:\n metrics = res_dict.keys()\n row_format = '{:<25}' + '{:^15}' * len(metrics)\n\n # print the header.\n print(row_format.format('', *metrics))\n\n results = [res_dict[m] for m in metrics]\n print(row_format.format(name, *results))",
"def get_results(self):\n self.roll_dem_bones.setDisabled(True)\n self.update_stats()\n results = [self.roll_dice(s) for s in (\n 'first', 'second', 'third', 'fourth', 'fifth', 'sixth'\n )]\n results.sort(reverse = True)\n time.sleep(.5)\n qtw.qApp.processEvents()\n self.main_display.append(\n f'\\nYour results sorted highest to lowest are: {results}\\n\\n'\n f'Taking into account the {self.race} bonuses, the stats display '\n 'to the left has been updated. Feel free to adjust priorities/'\n 'race and roll again.\\n'\n )\n stats_dict = {}\n for i,s in enumerate((self.stats)):\n stats_dict[s] = results[i]\n for s in self.bonuses:\n stats_dict[s] = stats_dict[s] + self.bonuses[s]\n self.str_box.setText(f'{stats_dict[\"Strength\"]}')\n self.dex_box.setText(f'{stats_dict[\"Dexterity\"]}')\n self.con_box.setText(f'{stats_dict[\"Constitution\"]}')\n self.int_box.setText(f'{stats_dict[\"Intelligence\"]}')\n self.wis_box.setText(f'{stats_dict[\"Wisdom\"]}')\n self.cha_box.setText(f'{stats_dict[\"Charisma\"]}')\n self.roll_dem_bones.setEnabled(True)",
"def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))",
"def sort_results(boxes):\n return sorted(results[k], key=lambda x : x['score'], reverse=True)",
"def _print_results(results, title=''):\n pstr = '[' + title + ']: ' if title else ''\n for k, v in results.items():\n pstr += '\\t{}: {}'.format(k, v)\n print(pstr)",
"def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')"
]
| [
"0.70512533",
"0.67428845",
"0.6740514",
"0.66059077",
"0.6381608",
"0.63613796",
"0.6359567",
"0.62953067",
"0.6252458",
"0.6167347",
"0.61356187",
"0.6116705",
"0.6112208",
"0.60608053",
"0.6045988",
"0.60451716",
"0.60379475",
"0.6036549",
"0.60357326",
"0.60313785",
"0.6023213",
"0.5998529",
"0.59945536",
"0.59805506",
"0.59581715",
"0.58855075",
"0.58505166",
"0.584714",
"0.5822641",
"0.5803998"
]
| 0.8140085 | 0 |
Builds a dense network. Args | def _build_network(self,
input_dim,
dense_layers,
nodes_per_layer=None,
hidden_act='relu',
output_act='sigmoid',
dropout_layers=None):
if nodes_per_layer is None:
nodes = [10] * dense_layers
else:
nodes = nodes_per_layer
if dropout_layers is None:
do_layers = [0] * dense_layers
else:
do_layers = dropout_layers
self.model.add(Dense(nodes[0], input_dim=input_dim,
activation=hidden_act))
if dense_layers > 1:
for l in range(1, dense_layers - 1):
if do_layers[l - 1] != 0:
self.model.add(Dropout(do_layers[l - 1]))
self.model.add(Dense(nodes[l], activation=hidden_act))
self.model.add(Dense(1, activation=output_act)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_network(self, inputs, targets, training=False):\n raise NotImplementedError",
"def build_dense(self): # Pass state_size and action_size\n model = Sequential()\n model.add(Dense(24, input_dim = grid_size*grid_size+2, activation = 'relu'))\n model.add(Dense(24, activation = 'relu'))\n model.add(Dense(len(ACTIONS), activation = 'linear'))\n model.compile(loss = 'mse', optimizer = RMSprop(lr = alpha))\n\n return model",
"def build_dense_network(data, hidden_layers, **kwargs):\n # Input layer\n with tf.variable_scope(\"layer_1\"): \n weights = tf.get_variable(\"weights\", shape = [input_shape[-1] + 1\n , hidden_layers[0]], initializer = tf.variance_scaling_initializer())\n\n output = tf.nn.leaky_relu(tf.matmul(tf.concat([data, tf.ones(dtype = tf.float32\n , shape = (tf.shape(data)[0], 1))], axis = 1) # concat\n , weights, name = \"multiply\") # matmul\n , α, name = \"output\") # leaky relu\n\n # DROP-OUT after the activation func\n output = tf.nn.dropout(output, keep_prob=δ, name = \"output\") \n\n # Hidden layers 1 to len(hidden_layers) - 1\n for i in range(2, len(hidden_layers)-1+2):\n\n with tf.variable_scope(f\"layer_{i}\"):\n n_nodes = hidden_layers[i-1]\n\n weights = tf.get_variable(\"weights\", shape = [hidden_layers[i-2]+1, hidden_layers[i-1]], initializer = tf.variance_scaling_initializer())\n output = tf.nn.leaky_relu(tf.matmul(tf.concat([output, tf.ones(dtype = tf.float32, shape = (tf.shape(data)[0], 1))], axis = 1), weights, name = \"multiply\"), α, name = \"output\")\n\n # DROP-OUT after the activation func\n output = tf.nn.dropout(output, keep_prob=δ, name = \"output\") \n\n # Output layer\n with tf.variable_scope(f\"layer_{len(hidden_layers)+1}\"):\n\n weights = tf.get_variable(\"weights\", shape = (hidden_layers[1]+1, n_summaries), initializer = tf.variance_scaling_initializer())\n output = tf.identity(tf.matmul(tf.concat([output, tf.ones(dtype = tf.float32, shape = (tf.shape(data)[0], 1))], axis = 1), weights, name = \"multiply\"), name = \"output\")\n # NO DROP-OUT in the last layer\n\n\n return output",
"def build_dense_network(model,\n input_dim,\n dense_layers,\n nodes_per_layer=None,\n hidden_act='relu',\n output_act='sigmoid',\n dropout_layers=None):\n\n if nodes_per_layer is None:\n nodes = [10] * dense_layers\n else:\n nodes = nodes_per_layer\n\n if dropout_layers is None:\n do_layers = [0] * dense_layers\n else:\n do_layers = dropout_layers\n\n model.add(Dense(nodes[0], input_dim=input_dim,\n activation=hidden_act))\n\n if dense_layers > 1:\n for l in range(1, dense_layers - 1):\n if do_layers[l - 1] != 0:\n model.add(Dropout(do_layers[l - 1]))\n\n model.add(Dense(nodes[l], activation=hidden_act))\n\n model.add(Dense(1, activation=output_act))\n\n return model",
"def build_net(nz=100):\n\tif opts.celeba:\n\t\tgen = get_gen_celebA(nz=nz)\n\t\tdis = get_dis_celebA(nz=nz)\n\n\tif opts.mnist:\n\t\tgen = get_gen_mnist(nz=nz)\n\t\tdis = get_dis_mnist(nz=nz)\n\n\treturn gen, dis",
"def build_dnn_model(mode, inputs, columns, config):\r\n features = inputs['features']\r\n\r\n num_ps_replicas = config['dnn_model'].get('num_ps_replicas',0)\r\n input_layer_partitioner = (\r\n partitioned_variables.min_max_variable_partitioner(\r\n max_partitions=num_ps_replicas,\r\n min_slice_size=64 << 20))\r\n \r\n \r\n # parse configurations\r\n units = int(config['dnn_model'].get('units', 1))\r\n dnn_hidden_units = [int(n) for n in config['dnn_model'].get('hiden_units', '512,128,64').split(',')]\r\n dnn_activation_fn = tf.nn.relu\r\n if config['dnn_model'].get('activation_fn', None) is not None:\r\n dnn_activation_fn = eval(config['dnn_model']['activation_fn'])\r\n dnn_dropout = None\r\n if config['dnn_model'].get('dropout', None) is not None:\r\n dnn_dropout = float(config['dnn_model']['dropout'])\r\n batch_norm = False\r\n if config['dnn_model'].get('batch_norm', '').lower() == 'true':\r\n batch_norm = True\r\n \r\n # build dnn part\r\n dnn_logit_fn = dnn._dnn_logit_fn_builder(\r\n units=units,\r\n hidden_units=dnn_hidden_units,\r\n feature_columns=columns,\r\n activation_fn=dnn_activation_fn,\r\n dropout=dnn_dropout,\r\n input_layer_partitioner=input_layer_partitioner\r\n )\r\n\r\n dnn_logits = dnn_logit_fn(features=features, mode=mode) \r\n\r\n return dnn_logits",
"def build_densenet(l_in, input_var=None, first_output=64, growth_rate=32, num_blocks=4, dropout=0):\n \n\n nb_layers = [6, 12, 32, 32] # For DenseNet-169\n nb_layers = [6, 12, 24, 16] # For DenseNet-121\n # initial convolution\n network = Conv2DLayer(l_in, first_output, filter_size=7, stride=2, pad='same',\n W=lasagne.init.HeNormal(gain='relu'),\n b=None, nonlinearity=None, name='pre_conv')\n network = BatchNormLayer(network, name='pre_bn', beta=None, gamma=None)\n network = ScaleLayer(network, name='pre_scale')\n network = BiasLayer(network, name='pre_shift')\n network = dnn.MaxPool2DDNNLayer(network, pool_size=3, stride=2) \n # note: The authors' implementation does *not* have a dropout after the\n # initial convolution. This was missing in the paper, but important.\n # if dropout:\n # network = DropoutLayer(network, dropout)\n # dense blocks with transitions in between\n\n for b in range(num_blocks):\n network = dense_block(network, nb_layers[b], growth_rate, dropout,\n name_prefix='block%d' % (b + 1))\n if b < num_blocks - 1:\n network = transition(network, dropout,\n name_prefix='block%d_trs' % (b + 1))\n # post processing until prediction\n network = ScaleLayer(network, name='post_scale')\n network = BiasLayer(network, name='post_shift')\n network = NonlinearityLayer(network, nonlinearity=rectify, name='post_relu')\n\n return network",
"def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()",
"def build_net(nz=100):\n\tif opts.celeba:\n\t\tgen = get_wgen_celebA(nz=nz)\n\t\tdis = get_wdis_celebA(nz=nz)\n\n\tif opts.mnist:\n\t\tgen = get_wgen_mnist(nz=nz)\n\t\tdis = get_wdis_mnist(nz=nz)\n\n\treturn gen, dis",
"def _build_network(self):\n pass",
"def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)",
"def build_net(nz=100):\n\tif opts.celeba:\n\t\tinput_gen, gen = get_bigan_gen_celebA(nz = nz)\n\t\tinput_enc, enc = get_bigan_enc_celebA(nz = nz)\n\t\tz_dis, x_dis, dis = get_bigan_dis_celebA(nz = nz)\n\n\tif opts.mnist:\n\t\tinput_gen, gen = get_bigan_gen_mnist(nz = nz)\n\t\tinput_enc, enc = get_bigan_enc_mnist(nz = nz)\n\t\tz_dis, x_dis, dis = get_bigan_dis_mnist(nz = nz)\n\n\treturn input_gen, gen, input_enc, enc, dis, z_dis, x_dis",
"def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer",
"def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer",
"def _build_network(self, h_size=16, l_rate=0.001):\n with tf.variable_scope(self.net_name):\n self._X = tf.placeholder(tf.float32, [None, self.input_size], name=\"input_x\")\n net = self._X\n\n net = tf.layers.dense(net, h_size, activation=tf.nn.relu)\n net = tf.layers.dense(net, self.output_size)\n self._Qpred = net\n\n self._Y = tf.placeholder(tf.float32, shape=[None, self.output_size])\n self._loss = tf.losses.mean_squared_error(self._Y, self._Qpred)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=l_rate)\n self._train = optimizer.minimize(self._loss)",
"def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))",
"def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)",
"def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')",
"def dense(in_layer):\n return Dense(neurons,\n kernel_initializer=initializer())(in_layer)",
"def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network",
"def _build_model(self, name, hidden_layers, nodes):\n with tf.variable_scope(name):\n self.inputs_ = tf.placeholder(tf.float32, [None, self.state_size], name='inputs')\n self.actions_ = tf.placeholder(tf.int32, [None], name='actions')\n one_hot_actions = tf.one_hot(self.actions_, self.action_size)\n self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')\n self.layers = list()\n self.layers.append(fully_connected(\"hidden1\", self.inputs_, nodes))\n for layer in range(hidden_layers):\n self.layers.append(fully_connected(f\"hidden{layer+2}\", self.layers[layer], nodes))\n self.output = fully_connected(\"output\", self.layers[-1], self.action_size, activation=None)\n self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)\n self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))\n self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)",
"def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()",
"def create_network(layers):\r\n return NeuronNetwork(layers)",
"def __init__(self, incoming, n_units, flatten_input=False, W=tf.zeros, b=tf.zeros, a=tf.sigmoid, name='DenseLayer'):\n super(DenseLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n if (len(self.incoming_shape) > 2) and flatten_input:\n incoming_shape = [self.incoming_shape[0], np.prod(self.incoming_shape[1:])]\n elif len(self.incoming_shape) == 4:\n incoming_shape = [self.incoming_shape[0], np.prod(self.incoming_shape[1:])]\n elif len(self.incoming_shape) >= 5:\n incoming_shape = [self.incoming_shape[0], self.incoming_shape[1], np.prod(self.incoming_shape[2:])]\n else:\n incoming_shape = self.incoming_shape\n \n # Set init for W\n W = tofov(W, shape=[incoming_shape[-1], n_units], var_params=dict(name='W_dense'))\n \n # Set init for b\n if b is not None:\n b = tofov(b, [n_units], var_params=dict(name='b_dense'))\n \n self.a = a\n self.b = b\n self.W = W\n \n self.n_units = n_units\n self.flatten_input = flatten_input\n self.incoming_shape = incoming_shape\n \n self.out = tf.zeros(self.get_output_shape())\n self.name = name",
"def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)",
"def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError",
"def build_2net(input_size, output_size, n_hidden=[5, 3]):\n\t# Create network and modules\n\tnet = FeedForwardNetwork()\n\tinp = LinearLayer(input_size)\n\th1 = SigmoidLayer(n_hidden[0])\n\th2 = TanhLayer(n_hidden[1])\n\toutp = LinearLayer(output_size)\n\t# Add modules\n\tnet.addOutputModule(outp)\n\tnet.addInputModule(inp)\n\tnet.addModule(h1)\n\tnet.addModule(h2)\n\t# Create connections\n\tnet.addConnection(FullConnection(inp, h1, inSliceTo=6))\n\tnet.addConnection(FullConnection(inp, h2, inSliceFrom=6))\n\tnet.addConnection(FullConnection(h1, h2))\n\tnet.addConnection(FullConnection(h2, outp))\n\t# Finish up\n\tnet.sortModules()\n\treturn net",
"def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim",
"def __init__(self, num_lemmas, num_pos, num_dep, num_directions=5, n_epochs=10, num_relations=2,\n alpha=0.01, lemma_embeddings=None, dropout=0.0, use_xy_embeddings=False, num_hidden_layers=0):\n self.n_epochs = n_epochs\n self.num_lemmas = num_lemmas\n self.num_pos = num_pos\n self.num_dep = num_dep\n self.num_directions = num_directions\n self.num_relations = num_relations\n self.alpha = alpha\n self.dropout = dropout\n self.use_xy_embeddings = use_xy_embeddings\n self.num_hidden_layers = num_hidden_layers\n self.update = True\n\n self.lemma_vectors = None\n if lemma_embeddings is not None:\n self.lemma_vectors = lemma_embeddings\n self.lemma_embeddings_dim = lemma_embeddings.shape[1]\n else:\n self.lemma_embeddings_dim = LEMMA_DIM\n\n # Create the network\n print 'Creating the network...'\n self.builder, self.model, self.model_parameters = create_computation_graph(self.num_lemmas, self.num_pos,\n self.num_dep, self.num_directions,\n self.num_relations,\n self.lemma_vectors,\n use_xy_embeddings,\n self.num_hidden_layers,\n self.lemma_embeddings_dim)\n print 'Done!'",
"def nn(data):\n training_set = SupervisedDataSet*\n\n\n input_nodes = 3\n hidden_layer_1 = 10\n hidden_layer_2 = 10\n output_layer = 5\n\n net = buildNetwork(input_nodes, hidden_layer_1, hidden_layer_2, output_layer, bias=True, hiddenclass=TanhLayer)"
]
| [
"0.6853862",
"0.68205714",
"0.67849225",
"0.6764053",
"0.67236805",
"0.66235286",
"0.66105014",
"0.6556568",
"0.65352154",
"0.6469878",
"0.6422112",
"0.63993216",
"0.6319492",
"0.63183075",
"0.63123035",
"0.62499654",
"0.6205989",
"0.62024623",
"0.6172296",
"0.61618066",
"0.616158",
"0.61387426",
"0.61047405",
"0.609905",
"0.6094224",
"0.60921925",
"0.60800934",
"0.606159",
"0.60479003",
"0.6031809"
]
| 0.6921859 | 0 |
Load image from file and perform preprocessing. Args | def _load_preprocess_image(self, image_file):
image_raw = tf.io.read_file(image_file)
image = self._preprocess_image(image_raw)
return image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_preprocess_image(self, image_file):\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image",
"def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label",
"def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)",
"def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)",
"def process_path(file_path: str):\r\n img = tf.io.read_file(file_path)\r\n img = tf.image.decode_jpeg(img, channels=3)\r\n img = tf.image.resize(img, [IMG_SIZE, IMG_SIZE])\r\n return tf.keras.applications.efficientnet.preprocess_input(img) # Shape: IMG_SIZE x IMG_SIZE x 3\r",
"def load_image(path, preprocess=True):\n x = image.load_img(path, target_size=(H, W))\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n return x",
"def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')",
"def train_image_parse_function(filename, *argv):\n image = read_image(filename)\n image = tf.image.random_flip_left_right(image)\n\n if FLAGS.augmentation:\n print('data augmentation')\n resized_image = resize_and_random_crop_image(image)\n else:\n resized_image = resize_image(image)\n resized_image = scale_image_value(resized_image)\n\n if len(argv) == 1:\n return resized_image, argv[0]\n elif len(argv) == 2:\n return resized_image, argv[0], argv[1]\n else:\n return resized_image",
"def loadRaw(self, path, preprocfunc=None):\n # Only for 8 and 32 bit images\n depth = self.getDepth()\n if depth==1:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_DEPTH)\n \n # Loading the file\n f = file(path, 'rb')\n data = f.read()\n f.close()\n \n # Preprocessing the data if a function was given\n if preprocfunc:\n data = preprocfunc(data)\n \n # Verification over data size\n (w,h) = self.getSize()\n im_size = w*h*(depth/8)\n assert(len(data)==im_size*self.length)\n \n # Loading the data\n for i,im in enumerate(self.seq):\n err = mambaCore.MB_Load(im.mbIm, data[i*im_size:(i+1)*im_size], im_size)\n mamba.raiseExceptionOnError(err)\n self.name = path",
"def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")",
"def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image",
"def openAndPreProcessImage(path, copyOrig=False, preproc={}):\n try:\n im = Image.open(path).convert('L') #Open as a uint8 image\n except FileNotFoundError:\n print(f'Error: {path} not found')\n return\n except OSError:\n print(f'Error: Cannot open {path}, please check image formats supported by PIL.Image')\n return\n im = np.asarray(im)#[125:375,125:375] #Take a smaller region for speed\n \n # Also return an unprocessed copy of original image, if required\n im_orig = im.copy() if copyOrig else None\n \n return preProcessImage(im, **preproc), im_orig",
"def load_and_preprocess_image(path):\n\n img = cv2.imread(path, 0) # Load image into greyscale\n img = cv2.equalizeHist(img) # Histogram equilization\n return img",
"def preprocess(file_path, model_preprocess_function):\n img = image.load_img(file_path, target_size=(224, 224))\n x = image.img_to_array(img)\n # x = np.expand_dims(x, axis=0)\n x = model_preprocess_function(x)\n return x",
"def preprocess(path, img_w, img_h):\n #print(path)\n img = cv2.imread(path)\n #print(img.shape)\n #resizing the image to particular size (64, 128, 3)\n img = fix_size(img, img_w, img_h)\n #print(img.shape)\n \n #assigining values less than zero to zer0 and greater than zero to 1\n img = np.clip(img, 0, 255)\n\n #changing the interger to more useful and complex integer\n img = np.uint8(img)\n\n #convert an image to one color space to another\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n #chaging the values datatype to float\n img = img.astype(np.float32)\n\n #normalization\n img /= 255\n return img",
"def process_image(self):\n pass",
"def load_image(self, **kwargs):\n ...",
"def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_",
"def load_and_preprocess_image(path, max_dim=512):\n f = tf.io.read_file(path)\n img = tf.io.decode_image(f)\n img = resize_min(img, max_dim)\n img = tf.expand_dims(img, axis=0)\n img = vgg_preprocess_input(img)\n return img",
"def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data",
"def process(self, image):",
"def process_image_file(self, image_file):\n image = image_util.load_image_from_file(image_file)\n return self.process_image(image)",
"def img_preprocessing(save_path,img_path, filename):\n\n\tsave_path_filename = save_path + filename\n\n\t#Check if file exits\n\tif not os.path.exists(img_path + filename):\n\t\tlogger.error(\" image path {} does not exit\".\n\t\t\t\t\t\t\t\tformat(img_path + filename))\n\n\timage = plt.imread(img_path + filename)\n\n\timg_gray_orig_0 = rgb2gray(image)\n\n\timg_gray_orig = img_resize(img_gray_orig_0, 2*IMG_SIZE)\n\n\timg_just_bone = img_preprocess_core(img_gray_orig)\n\n\ttry:\n\t img_bone = img_pad_resize(img_just_bone, 2*IMG_SIZE) \n\t #Second iteration of image segmentation\n\t img_just_bone = img_preprocess_core(img_bone)\n\t img_bone = img_pad_resize(img_just_bone, IMG_SIZE)\n\n\t plt.imsave(save_path_filename, img_bone)\n\t \n\texcept ValueError:\n\t\tlogger.error(\"Unable to run 2nd interaton for {}\".format(filename))",
"def load_image(file_name):\n image = Image.open(file_name)\n # im = numpy.array(image)\n normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t_list = [transforms.Resize(256), transforms.CenterCrop(224)]\n t_end = [transforms.ToTensor(), normalizer]\n transform = transforms.Compose(t_list + t_end)\n return transform(image)",
"def process_image(self):\n\n detect.main(self.nn_args)",
"def __parse_image_load(self, image_path: str, image_label: int):\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n if self.rgb:\n flag = cv2.IMREAD_COLOR\n else:\n flag = cv2.IMREAD_GRAYSCALE\n\n img = cv2.imread(image_path, flags=flag)\n img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(\n np.float32)\n\n if self.normalize_images:\n img_mean = np.mean(img, axis=(0, 1))\n img_std = np.std(img, axis=(0, 1))\n\n img = (img - img_mean) / img_std\n\n return img, one_hot",
"def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)",
"def load_image(self):\n if isinstance(self.filename, str):\n self.image = np.asarray(PIL.Image.open(self.filename))\n elif isinstance(self.filename, np.ndarray):\n self.image = np.asarray(self.filename)\n if self.image.ndim < 3:\n self.bw = True\n if self.image.ndim < 2:\n self.image = None\n print(\"file {} is not an appropriate format.\".format(\n self.filename))\n if self.image.ndim == 3:\n if self.image.shape[-1] == 1:\n self.image = np.squeeze(self.image)\n elif self.image.shape[-1] > 3:\n self.image = self.image[..., :-1]\n if (self.image[..., 0] == self.image.mean(-1)).mean() == 1:\n self.image = self.image[..., 0]\n self.bw = True\n return self.image",
"def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label",
"def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)"
]
| [
"0.74256855",
"0.73369807",
"0.73286986",
"0.6840995",
"0.67607754",
"0.67328",
"0.6700069",
"0.6671392",
"0.65570325",
"0.6555786",
"0.6551207",
"0.6473794",
"0.6472941",
"0.6457876",
"0.64540195",
"0.6435529",
"0.64323664",
"0.6421467",
"0.6393144",
"0.638709",
"0.63641036",
"0.634462",
"0.63387656",
"0.6329635",
"0.63265485",
"0.6321966",
"0.6321396",
"0.6315886",
"0.6315865",
"0.6308784"
]
| 0.74474293 | 0 |
Convert raw binary to float64 and scale the pixel values. Args | def _preprocess_image(self, image_raw):
image = tf.io.decode_raw(image_raw, tf.float64)
return image * self.rescale | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE",
"def _preprocess_image(self, image_raw):\n\n image = tf.io.decode_raw(image_raw, tf.float64)\n \n if self.rescale is not None:\n image_out = image * self.rescale\n else:\n image_out = image\n\n return image_out",
"def int2float(img_int):\n img = img_int.astype(np.float64)\n img /= (MAX_VALUE - 1)\n return img",
"def img_as_float64(image, force_copy=False): # reliably restored by inspect\n pass",
"def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im",
"def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im",
"def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data",
"def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img",
"def normalize_real_image(real_data, normalize_with_sigmoid=True):\n if normalize_with_sigmoid:\n return (real_data / 255.0)\n else:\n return (real_data / 127.5 - 1.).astype(np.float32)",
"def _scale_to_zero_one(img):\n if img.dtype == np.uint8:\n img = img.astype(np.float32)\n return np.multiply(img, 1.0 / 255.0)\n else:\n print(\"image values already seem to be float\")\n return img",
"def rescale_toa(arr, dtype=np.float32):\n # First look at raw value dists along bands\n\n arr_trans = np.subtract(arr, arr.min(axis=(1, 2))[:, np.newaxis, np.newaxis])\n arr_rs = np.divide(arr_trans, arr_trans.max(axis=(1, 2))[:, np.newaxis, np.newaxis])\n if dtype == np.uint8:\n arr_rs = np.array(arr_rs*255, dtype=np.uint8)\n return arr_rs",
"def imReadAndConvert(filename: str, representation: int) -> np.ndarray:\r\n return normalize(imgRead(filename,representation)).astype(np.float)",
"def to_data(x):\n if torch.cuda.is_available():\n x = x.cpu()\n x = x.data.numpy()\n x = ((x +1)*255 / (2)).astype(np.uint8) # rescale to 0-255\n return x",
"def imread_float(infile):\n return img_as_float(imread(infile))",
"def _image_to_vector(image):\n return image.flatten().astype(float)",
"def __float__(self):\n return float(self.encoded) / (1 << self.frac_bits)",
"def convert_image_to_floats(image):\n\n if np.max(image) <= 1.0:\n return image\n else:\n return image / 255.0",
"def normalize_for_rgb(raw_frame):\n return tf.cast(raw_frame, tf.float32) / 255.0",
"def preprocess_image(x, mode='caffe'):\n # mostly identical to \"https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py\"\n # except for converting RGB -> BGR since we assume BGR already\n\n # covert always to float32 to keep compatibility with opencv\n x = x.astype(np.float32)\n \n if mode == 'tf':\n x /= 127.5\n x -= 1.\n elif mode == 'caffe':\n x[..., 0] -= 103.939\n x[..., 1] -= 116.779\n x[..., 2] -= 123.68\n\n return x",
"def normalize_image(image):\n image = image.astype(np.float32) / 255.0\n\n return image",
"def postprocess_img(img):\n img = img.transpose((1, 2, 0))\n img += 1.0\n img = (img * 128.0).astype(np.uint8)\n return img",
"def read_as_float_matrix(path):\n\n bgr_image = cv2.imread(path)\n return bgr_image.astype(np.float32) / 255.",
"def bin_to_float(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack('>d', bf)[0]",
"def convert2int(img):\n if img.min() == 0:\n return img\n if img.dtype == \"uint8\":\n return img - 2**8 / 2\n elif img.dtype == \"uint16\":\n return img - 2**16 / 2\n elif img.dtype == \"uint32\":\n return img - 2**32 / 2\n else:\n return img",
"def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def normalize(image):\r\n return image / 127.5 - 1.",
"def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))",
"def convert_stream(self, stream):\n return np.fromstring(stream, \"Float32\")",
"def normalise(image):",
"def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")"
]
| [
"0.66381794",
"0.6144738",
"0.60239863",
"0.59767365",
"0.59652585",
"0.59652585",
"0.582018",
"0.58109397",
"0.57753944",
"0.5748591",
"0.5741926",
"0.5717114",
"0.56944793",
"0.56926674",
"0.56763875",
"0.56397724",
"0.56142133",
"0.5569439",
"0.55578226",
"0.5540915",
"0.54875875",
"0.5477325",
"0.54642045",
"0.54500073",
"0.54491746",
"0.54478943",
"0.54443276",
"0.54442066",
"0.5443124",
"0.5442667"
]
| 0.62331146 | 1 |
Get a Path object for an image file. Args | def get_path_image(path_data, label, filename):
return path_data.joinpath(f'label_{label}', filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_path_to_image():\n file_types = [\n (\"JPEG Image\", '*.jpeg; *jpg'),\n (\"PNG Image\", '*.png'),\n (\"BPM Image\", '*.bmp'),\n (\"Netpbm Image\", '*.ppm; *.pgm; *.pbm; *pnm')\n ]\n\n GlobalVar.file_path = filedialog.askopenfilename(filetypes=file_types)\n GlobalVar.name_original = GlobalVar.file_path.split('/')[-1]\n GlobalVar.is_open_image = True\n\n read_image(GlobalVar.file_path)",
"def path_image(image):\n return bpy.path.abspath(image.filepath, library=image.library).replace(\"\\\\\", \"/\")\n # .replace(\"\\\\\",\"/\") to get only forward slashes as it's what POV prefers,\n # even on windows",
"def imagePath(self):\n return self.path",
"def get_image_path(source_path):\n\n split = source_path.split('\\\\')\n # get filename\n filename = split[-1].lstrip()\n # get folder name\n folder = split[-3]\n # get full data path\n current_path = folder + '/IMG/' + filename\n return current_path",
"def get_path(filename: str = None, folder: str = None) -> str:\n return IMAGE_SET.path(filename, folder)",
"def get_image(image_path):\r\n\r\n return Image.open(image_path)",
"def get_img_file(image, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img')\n # get location title.\n loc_id = db.get_img_loc(int(image))\n if loc_id == None:\n raise ValueError('The image %s could not be found' % image)\n loc = db.get_location(loc_id)\n title = loc['title']\n # add to file name\n img_dir = path.join(img_dir, title, str(image) + '.jpg')\n return img_dir",
"def get_image_path(self) -> Optional[str]:\n if not self.image or not self.image.file_path:\n return None\n return self.image.file_path",
"def get_image_by_path(image_path, target_size=None):\n img = image.load_img(image_path, target_size=target_size)\n return img",
"def file_path(self) -> global___Expression:",
"def get_pathname(self):\n return self.image_data.path",
"def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)",
"def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path",
"def getImagePath():\n currentPath = os.path.dirname(__file__)\n resourcesPath = os.path.join(currentPath, \"Resources\")\n imagesPath = os.path.join(resourcesPath, \"Images\")\n return imagesPath",
"def get_image_path(self):\n\t\treturn call_sdk_function('PrlVmDev_GetImagePath', self.handle)",
"def get_image_path(self) -> Optional[str]:\n try:\n return self.localised_faces.all()[0].get_image_path()\n except IndexError:\n logging.exception(\"Failed to find an image for %s\", self)\n return None",
"def get_file(file_info):\n if session_vars.filepath == file_info['filepath']:\n img_file = session_vars.img_file\n else:\n print('loading', file_info['filepath'])\n if file_info['ext']=='fits':\n print('Detected fits image type')\n pyfits = import_fits()\n img_file = pyfits.open(file_info['filepath'])\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img_file = Image.open(file_info['filepath'])\n session_vars.filepath = file_info['filepath']\n session_vars.img_file = img_file\n return img_file",
"def image_path_from_index(self, index):\n assert self.image_set_index is not None, \"Dataset not initialized\"\n name = self.image_set_index[index]\n image_file = os.path.join(self.image_dir, 'images', name)\n assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)\n return image_file",
"def FindImage(image_path):\n\n if os.path.isdir(image_path):\n # Assume base image.\n image_file = os.path.join(image_path, constants.BASE_IMAGE_NAME + '.bin')\n if not os.path.exists(image_file):\n raise ValueError('Cannot find base image %s' % image_file)\n elif os.path.isfile(image_path):\n image_file = image_path\n else:\n raise ValueError('%s is neither a directory nor a file' % image_path)\n\n return image_file",
"def path(sc, file_path):\n path_class = sc._gateway.jvm.org.apache.hadoop.fs.Path\n path_obj = path_class(file_path)\n return path_obj",
"def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None",
"def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])",
"def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])",
"def path(self):\n\n if self.file_func:\n path = self.file_func(self.lookup_obj, **self.pattern_params)\n return FilePath(path=path)\n return FilePath(path=\"\")",
"def get_image_path(instance, filename):\n filename, file_extension = path.splitext(filename)\n return path.join(str(uuid4()) + file_extension)",
"def image_path_at(self, i):\n image_path = os.path.join(self._image_path, self._image_index[i])\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path",
"def get_input(path):\n img = imread(path)\n return img",
"def get_image(path):\n\n # Check if the picture exists or not.\n if not os.path.isfile(path):\n print('Cannot open the image. Please try again!')\n exit(1)\n\n try:\n # Open the image.\n image = Image.open(path)\n\n # If everything is okay return it.\n return image\n # If an error occurred.\n except Exception as err:\n print('Error occurred while trying to open the image:', err, 'Please try again!')\n exit(1)",
"def real_image_path(self, index):\r\n\r\n index = index.replace(\"\\\\\", \"/\")\r\n\r\n if not os.path.exists(index):\r\n image_file = os.path.join(self.prefix_path, index)\r\n else:\r\n image_file = index\r\n if not image_file.endswith('.jpg'):\r\n image_file = image_file + '.jpg'\r\n assert os.path.exists(\r\n image_file), 'Path does not exist: {}'.format(image_file)\r\n return image_file",
"def imagePath(image):\n return os.path.join(\":/images\", image)"
]
| [
"0.68334705",
"0.67055607",
"0.6676877",
"0.6605143",
"0.65452087",
"0.6487829",
"0.6407208",
"0.63472",
"0.63395727",
"0.63245505",
"0.62733346",
"0.62279886",
"0.62067974",
"0.6199286",
"0.61989886",
"0.6193733",
"0.6192277",
"0.61828023",
"0.61645263",
"0.61601126",
"0.61222005",
"0.61214226",
"0.61214226",
"0.6119973",
"0.61078626",
"0.61053187",
"0.6092135",
"0.6083034",
"0.6078688",
"0.6078217"
]
| 0.6797976 | 1 |
Sort metric performance results by the mean. Args | def sort_results(metric_results):
means, stds, params_list = metric_results
dtype = [('index', int), ('params_list', object), ('std', float), ('mean', float)]
#Sort will fail when attempting to rank based on the
#dictionary 'params_list' when encountering identical mean and
#standard deviations. To avoid this, use a list of distinct
#integers to break the tie.
values = zip(range(len(means)), params_list, stds, means)
a = np.sort(np.array(list(values), dtype=dtype),
kind='mergesort', order=['mean', 'std', 'index'])
return np.flip(a, axis=-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_sorted_metric(self, *args, **kwargs):\n return sorted(self.fetch_metric(*args, **kwargs).items(),\n key=lambda x: float(x[0]))",
"def display_results(self, metrics):\n\n for k, v in self.cv_results_.items():\n# sorted_results = sort_results(v)\n print(f'Results for {k} metric:')\n print()\n print(v.sort_values(by=['Metric mean'], ascending=False))\n print()",
"def test_result_group_sorts_by_first_metric(self, result_group, index, score):\n assert result_group.results[index].metrics.score == score",
"def __ui_statistics_sort_avg(self, discipline_name):\n try:\n sorted_list = self.__grade_controller.get_averages_at_discipline_sorted_descending(discipline_name)\n if len(sorted_list) == 0:\n print(\"There is no student graded at the given discipline!\")\n return\n\n for student in sorted_list:\n print(str(student) + \"\\n\")\n\n except GradeException as ge:\n print(ge)\n return",
"def sort_results(self):\n pass",
"def mean_average_precision(sort_data):\n count_1 = 0\n sum_precision = 0\n for index in range(len(sort_data)):\n if sort_data[index][1] == 1:\n count_1 += 1\n sum_precision += 1.0 * count_1 / (index + 1)\n return sum_precision / count_1",
"def compute_means(runtimes):\n# tmp = runtimes[kernel_name]\n tmp_ = [ (int(key), float(np.mean(val)))\n for key, val in runtimes.iteritems()\n ]\n return sort_fst(tmp_)",
"def run_sort_home_by_score(self):\n self.homes = self.python_sort(self.homes)",
"def display_contestants_in_decreasing_order_of_average_score(listOfContestants):\r\n\r\n listOfContestants.sort(reverse = True, key = get_average_score_of_contestants)\r\n display_all_contestants(listOfContestants)",
"def sort_results(boxes):\n return sorted(results[k], key=lambda x : x['score'], reverse=True)",
"def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]",
"def top_students(mongo_collection):\n all_items = mongo_collection.find({})\n for item in all_items:\n count = 0\n new_topics = item\n for sta in item.get(\"topics\"):\n count += sta.get(\"score\")\n averageScore = count/len(item.get(\"topics\"))\n\n myquery = {\"name\": item.get(\"name\")}\n newvalues = {\"$set\": {\"averageScore\": averageScore}}\n mongo_collection.update_many(myquery, newvalues)\n\n order = mongo_collection.find().sort(\"averageScore\", DESCENDING)\n\n return order",
"def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())",
"def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]",
"def sort(self):\r\n\t\treturn sorted(self.sample)",
"def analyze_all(q: int = 100, n: int = 75000):\n total_start_time = time.time()\n sort_correct, sort_results = bucket_sort_general(q, n)\n print('sort_correct')\n sort_sorted_list = bucket_sort_sorted_list(q, n)\n print('sort_sorted_list')\n sort_reversed_list = bucket_sort_reversed_list(q, n)\n print('sort_reversed_list')\n sort_unique_list = bucket_sort_unique_list(q, n)\n\n headers = ['Type', 'Avg', 'Min', 'Max', 'Std']\n table = [['Bucket sort normal', sum(sort_results) / len(sort_results), min(sort_results), max(sort_results),\n pstdev(sort_results)],\n ['Bucket sort sorted list', sum(sort_sorted_list) / len(sort_sorted_list), min(sort_sorted_list),\n max(sort_sorted_list), pstdev(sort_sorted_list)],\n ['bucket sort reversed list', sum(sort_reversed_list) / len(sort_reversed_list), min(sort_reversed_list),\n max(sort_reversed_list), pstdev(sort_reversed_list)],\n ['bucket sort unique values', sum(sort_unique_list) / len(sort_unique_list), min(sort_unique_list),\n max(sort_unique_list), pstdev(sort_unique_list)]]\n\n print(f'Running all the metrics took {time.time() - total_start_time} seconds')\n print(f'Bucket sort correct = {sort_correct}')\n print(f'Each metric is calculated with a population of {q} and a list length of {n}')\n print(tabulate(table, headers=headers))\n return table",
"def sort_entries(self):\n if not len(self.student_list):\n print('There is no contents to sort')\n return\n\n opt = self.input_options(['n', 'a', 'g'], 1, 'Sort by name(n) or average(a) or grade(g)')\n if opt.upper() == 'N':\n self.print_dataframe(self.student_list.sort_values(by=['name', 'average'], ascending=[True,False]))\n elif opt.upper() == 'A' or opt.upper() == 'G':\n self.print_dataframe(self.student_list.sort_values(by=['average', 'name'], ascending=[False,True]))",
"def organizeM():\n scores = []\n today_listM = strainer('name', 'sort', 'event')\n today_listM.extend(strainer('name', 'sort', 'todo'))\n data = list(today_listM)\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores",
"def sortby(self):\n ...",
"def addOverallMeans(results, fieldNames, fields):\n # Work out what the values we already have look like\n meanValues = [\"Overall Mean\"]\n geoMeanValues = [\"Overall Geometric Mean\"]\n for name in fieldNames[1:]:\n if name in fields:\n values = [r.__dict__[name] for r in results]\n geoMeanValues.append(geomean(values))\n meanValues.append(mean(values))\n else:\n geoMeanValues.append(0)\n meanValues.append(0)\n\n results.append(measurement(fieldNames, meanValues))\n results.append(measurement(fieldNames, geoMeanValues))\n return results",
"def print_avg():",
"def sort_animals(all_animals):\n def get_key(a):\n return a.row + 0.001 * a.col\n\n all_animals.sort(key=get_key)",
"def _agg_by_mean(self):\n return self._data_grouped_by_manufacturer.agg('mean')[['car_value']]",
"def main_sort():\n \n input_1 = [7, 6, 5, 4, 3, 2, 1]\n print find_median(input_1)\n \n input_2 = [5, 4, 3, 2, 1, 6, 7]\n print find_median(input_2)\n \n input_3 = [1, 2, 3, 4, 5, 7, 6]\n print find_median(input_3)\n \n input_4 = [1, 1, 3, 3, 2, 2, 4]\n print find_median(input_4)",
"def order_scores(doctors):\n\n # return doctors.sort(key=operator.methodcaller('get_review_score'))\n # print doctors\n print\n print\n ret_docs = sorted(doctors, key=operator.itemgetter('review_score'), reverse=True)\n # ret_docs = doctors.sort(key=lambda k: k['review_score'])\n # print ret_docs\n return ret_docs",
"def get_average_scores(directors: Dict[str, List[Movie]]) -> List[Tuple[str, float]]:\n res = []\n for director_name, movies in directors.items():\n if len(movies) >= MIN_MOVIES:\n avg_score = calc_mean_score(movies)\n info = (director_name, avg_score)\n res.append(info)\n\n return sorted(res, key=lambda x: x[1], reverse=True)",
"def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res",
"def __init__(self):\n super().__init__()\n self.metric = 'AVGDIST'",
"def extract_metric(summary, metric):\n\n output = summary[summary.index.str.contains(metric)].T\n output.columns = output.columns.str.replace(f'test_{metric}_', '')\n output.sort_values(by='mean', ascending=False, inplace=True)\n output['lower'] = output['mean'] - 2*output['std']\n output['upper'] = output['mean'] + 2*output['std']\n return output",
"def sort_by_parser_scores(self):\n self.parses.sort(key=lambda parse: -parse.parser_score)"
]
| [
"0.66437477",
"0.6032373",
"0.5961712",
"0.5948859",
"0.5929985",
"0.5761439",
"0.575289",
"0.5731947",
"0.5665318",
"0.5618689",
"0.55005497",
"0.54784197",
"0.5470458",
"0.5452049",
"0.5449651",
"0.5446144",
"0.54368585",
"0.54318",
"0.54064316",
"0.540437",
"0.54033154",
"0.53926945",
"0.5384345",
"0.53806955",
"0.53557616",
"0.5354447",
"0.5333133",
"0.5328572",
"0.53163797",
"0.5314817"
]
| 0.6717352 | 0 |
Task state handler that timestamps new states and logs the duration between state changes using the task's logger. | def timestamper(task, old_state, new_state):
new_state.timestamp = pendulum.now("utc")
if hasattr(old_state, "timestamp"):
duration = (new_state.timestamp - old_state.timestamp).in_seconds()
task.logger.info(
"{} seconds passed in between state transitions".format(duration)
)
return new_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_state(cls, t_state, t_msg):\n cls.state['TASK_STATE'] = t_state\n cls.state['TASK_INFO'] = t_msg\n\n print str(t_state) + ': ' + str(t_msg)\n\n if cls.USE_CELERY:\n # send message to the front end\n current_task.update_state(state=t_state, meta={'result': t_msg})\n\n state = cls.state['TASK_STATE']\n info = cls.state['TASK_INFO']\n ret = {'state': state, 'info': info}\n current_task.info = json.dumps(ret)\n\n print '%s: %s' % (t_state, t_msg)",
"def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))",
"def task_changed(old_task, diff, now_task):",
"def change_state(self, timestamp, state):\n\t\tself.timestamp = timestamp\n\t\tself.state = state",
"def _set_timed_state(self, state_attribute_name, start_time_attribute_name, execution_time_attribute_name,\n new_state):\n current_state = getattr(self, state_attribute_name)\n if current_state == STATE_NOT_STARTED and new_state == STATE_RUNNING:\n setattr(self, start_time_attribute_name, datetime.utcnow())\n\n if current_state == STATE_RUNNING and new_state in [STATE_COMPLETE, STATE_FAILED]:\n execution_time = datetime.utcnow() - getattr(self, start_time_attribute_name)\n execution_time = (execution_time.days * 3600 * 24) + \\\n execution_time.seconds\n setattr(self, execution_time_attribute_name, execution_time)\n\n setattr(self, state_attribute_name, new_state)",
"def celery_task_run_output_updated(self, logger):",
"def update_job_state(self, *tasks, **extra_args):\n pass",
"def __setstate__(self, state: Dict[str, Any]) -> None:\n self.name = state[\"name\"]\n self.logger = _create_logger(self.name)",
"def task_instance_pre_save_handler(instance, **_):\n if instance.state in (SUCCESSFUL, FAILED):\n instance.datetime_finished = timezone.now()",
"def change_task_state(self, new_state):\n self.task_state = new_state",
"def state_changed(target, new_value, old_value, initiator):\n\n if (new_value == _WorkState.RUNNING and\n (old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or\n target.time_started == None)):\n target.time_started = datetime.utcnow()\n target.time_finished = None\n\n elif new_value in (_WorkState.DONE, _WorkState.FAILED):\n target.time_finished = datetime.utcnow()",
"def _update_handler(self, state):\n self._schedule_remaining_events()",
"def log_and_dispatch(self, state_manager, state_change):\n state_change_id = self.raiden.transaction_log.log(state_change)\n events = self.dispatch(state_manager, state_change)\n self.raiden.transaction_log.log_events(\n state_change_id,\n events,\n self.raiden.get_block_number()\n )",
"def stream_state_changed(self,state,arg):\n if opts.verbose:\n print \"*** State changed: %s %r ***\" % (state,arg)\n else:\n pass",
"def task_time_handler(task, time_spent):\n tm = format_time(time_spent)\n msg = \"{task} finished; time took: {time}\".format(task=task, time=tm)\n message_kafka(\"Task Finished\", task, msg)",
"def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))",
"def _state(self, session):\n return self._dostate(session)",
"def set_task_state(self, task, state):\n self._write_transaction(tx.set_task_state, task=task, state=state)",
"def on_task_output(cls, task: Task, config: dict) -> None:",
"def on_state_transition(self, data, suffix=''):\n self._log(\n \"Recording the following state transition for exploration %s: \"\n \"'%s' to '%s'\" % (\n self.oppiaid, data['oldStateName'], data['newStateName']))",
"def transition_to_state(state):\n\n # Clear our \"time-in-state\" counter.\n m.d.ss += cycles_in_state.eq(0)\n\n # If we have any additional entry conditions for the given state, apply them.\n if state in tasks_on_entry:\n m.d.ss += tasks_on_entry[state]\n\n m.next = state",
"def update(self, t):\n self.state.send(t)",
"def log_and_dispatch_to_all_tasks(self, state_change):\n state_change_id = self.raiden.transaction_log.log(state_change)\n manager_lists = self.raiden.identifier_to_statemanagers.itervalues()\n\n for manager in itertools.chain(*manager_lists):\n events = self.dispatch(manager, state_change)\n self.raiden.transaction_log.log_events(\n state_change_id,\n events,\n self.raiden.get_block_number()\n )",
"def model_state_update(model, time_t, state_controller, input_f16):\n pass",
"async def update_state(self, state: dict):\n self.last_update = current_time() * 1000\n self.last_position = state.get('position', 0)\n self.position_timestamp = state.get('time', 0)\n\n try:\n await self.update_title()\n except Exception: # I don't want the task to finish because of a stupid thing\n pass\n\n event = PlayerUpdateEvent(\n self, self.last_position, self.position_timestamp)\n await self.node._dispatch_event(event)",
"def _update_state(context, node, instance, state):\n values = {'task_state': state}\n if not instance:\n values['instance_uuid'] = None\n values['instance_name'] = None\n db.bm_node_update(context, node['id'], values)",
"def internal_event (self):\n self.clock_time += 1\n self.log()",
"def __setstate__(self, state):\n self.__dict__ = state\n self._logger = logging.getLogger(self._name)\n self.level = self._level",
"def time_in(self):\n if self.is_logged():\n self.time_out()\n else:\n TaskLog.objects.create(task=self)",
"def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')"
]
| [
"0.5885212",
"0.58848715",
"0.58534354",
"0.56856",
"0.5671338",
"0.5634042",
"0.56279546",
"0.55943364",
"0.5561087",
"0.55303365",
"0.5510022",
"0.5481808",
"0.5466947",
"0.54583615",
"0.54421115",
"0.5429091",
"0.5358745",
"0.5344362",
"0.53066605",
"0.53047794",
"0.5298846",
"0.5281387",
"0.52804595",
"0.5264981",
"0.52645296",
"0.5222571",
"0.52162474",
"0.5210314",
"0.5204715",
"0.5186306"
]
| 0.73996735 | 0 |
Gathers paths and converts to data frames per the arguments provided. Multiple checks are in place to ensure data exists prior to processing. | def gather_data(instance):
paths = [instance.get('file1'), instance.get('file2')]
if Path(instance.get('file1')).parents[0].is_dir() is True and Path(
instance.get('file2')).parents[0].is_dir() is True:
files = [f for f in paths if os.path.isfile(f)]
if len(files) == 0:
raise Exception('The files you passed do not exist!')
dfs = []
for file in files:
try:
if file.endswith('.csv'):
dfs.append(pd.read_csv(file))
else:
raise Exception('Please pass a file ending in .csv')
except Exception as exc:
formatted = "Unable to locate files! Please ensure you have provided accurate file paths. {}".format(
repr(exc))
raise Exception(formatted)
return dfs, instance
else:
raise Exception('Please pass a valid file path.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Collect1DResults(Path, FolderNames, Left, Right, SavePath, OneD,\n fromf='', tof='', FilterbyName = False):\n\n second = \"=pd.DataFrame()\"\n if fromf == '':\n fromf = 0\n\n for i in range(len(FolderNames)):\n print(str(i) + \"-\" + FolderNames[i])\n\n if tof == '':\n tof = len(os.listdir(Path +\"/\" + FolderNames[i]))\n\n FileList = os.listdir(Path +\"/\" + FolderNames[i])[fromf:tof]\n # tof is only renewed if it is equal to ''\n tof = ''\n if FilterbyName == True:\n filter1 = int(FolderNames[i].split('(')[1].split('-')[0])\n filter2 = int(FolderNames[i].split('(')[1].split('-')[1].split(')')[0])\n\n for j in range(len(FileList)):\n\n go = False\n\n if Left and FileList[j].split('.')[0].endswith(\"_left\"):\n print(str(i) + \"-\" + str(j) +\"-\" + FileList[j])\n # create data frame for the sub-basin\n first = \"L\" + FileList[j].split('.')[0]\n go = True\n\n elif Right and FileList[j].split('.')[0].endswith(\"_right\"):\n print(str(i) + \"-\" + str(j) +\"-\" + FileList[j])\n first = \"R\" + FileList[j].split('.')[0]\n go = True\n\n ## try to get the integer of the file name to make sure that it is\n ## one of the 1D results file\n elif OneD and not FileList[j].split('.')[0].endswith(\"_right\") and not FileList[j].split('.')[0].endswith(\"_left\"):\n print(str(i) + \"-\" + str(j) +\"-\" + FileList[j])\n # create data frame for the sub-basin\n first = \"one\" + FileList[j].split('.')[0]\n go = True\n\n if go == True:\n # get the updated list of variable names\n variables = locals()\n\n # read the file\n try:\n temp_df = pd.read_csv(Path + \"/\" + FolderNames[i] + \"/\" + FileList[j],header = None,\n delimiter = r'\\s+')\n\n if FilterbyName == True:\n temp_df = temp_df[temp_df[0] >= filter1]\n temp_df = temp_df[temp_df[0] <= filter2]\n # check whether the variable exist or not\n # if this is the first time this file exist\n if not first in variables.keys():\n # create a datafame with the name of the sub-basin\n total = first+ second\n exec(total)\n\n # concatenate the\n exec(first + \"= pd.concat([\" + first+ \", temp_df])\")\n except:\n continue\n\n # Save files\n variables = list(locals().keys())\n # get sub-basins variables (starts with \"One\")\n for i in range(len(variables)):\n var = variables[i]\n if var.endswith(\"_left\"):\n # put the dataframe in order first\n exec(var + \".sort_values(by=[0,1,2],ascending = True, inplace = True)\")\n path = SavePath + '/' + var[1:]+ '.txt'\n exec(var + \".to_csv(path ,index= None, sep = ' ', header = None)\")\n elif var.endswith(\"_right\"):\n # put the dataframe in order first\n exec(var + \".sort_values(by=[0,1,2],ascending = True, inplace = True)\")\n path = SavePath + '/' + var[1:]+ '.txt'\n exec(var + \".to_csv(path ,index= None, sep = ' ', header = None)\")\n elif var.startswith(\"one\"):\n # put the dataframe in order first\n exec(var + \".sort_values(by=[0,1,2],ascending = True, inplace = True)\")\n print(\"Saving \" + var[3:]+ '.txt')\n path = SavePath + '/' + var[3:]+ '.txt'\n exec(var + \".to_csv(path ,index= None, sep = ' ', header = None)\")",
"def __init__(self, animals_data=\"animals.json\", food_data=\"food.json\", zookeeper_data=\"zookeeper.json\"):\n if type(animals_data) is pd.DataFrame:\n self.animals_df = animals_data\n else:\n self.animals_df = pd.read_json(animals_data)\n\n if type(food_data) is pd.DataFrame:\n self.food_df = food_data\n else:\n self.food_df = pd.read_json(food_data)\n\n if type(zookeeper_data) is pd.DataFrame:\n self.zookeeper_df = zookeeper_data\n else:\n self.zookeeper_df = pd.read_json(zookeeper_data)",
"def get_data(paths, df_names, categorical_feats, groupby=None, exclude_classes=[], rel_cols=None, sep=\",\"):\n\n def _load_data(path, sep=sep):\n \"\"\"small function to load according to the dataformat. (excel or csv)\"\"\"\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df\n\n # initialize list to store dataframes in\n dfs = []\n\n # Handle single path input\n if groupby and (len(paths) == 1 or isinstance(paths, str)):\n\n # load data depending on if the single path is given in a list of as string\n if isinstance(paths, str):\n data = _load_data(paths, sep)\n elif isinstance(paths, list):\n data = _load_data(*paths, sep)\n else:\n raise ValueError(\"It seems like the input was a single path. Please input path as string or inside a list.\")\n\n grouping = data.groupby(groupby)\n\n # split dataframe groups and create a list with all dataframes\n for name, grp in grouping:\n # skip class if it should be excluded\n if name in exclude_classes:\n continue\n\n df = grouping.get_group(name)[::]\n\n # consider all columns as relevant is no rel_cols given.\n if rel_cols is None:\n rel_cols = list(df)\n\n # consider the relevant columns\n dfs.append(df[rel_cols])\n\n # Handle multiple paths input\n elif len(paths) > 1:\n for path in paths:\n df = _load_data(path)\n dfs.append(df)\n\n return DataCollection(dfs, df_names, categorical_feats)",
"def _determine_dataset_parts(fs, paths, gather_statistics, filters, dataset_kwargs):\n parts = []\n if len(paths) > 1:\n if gather_statistics is not False:\n # This scans all the files\n dataset = pq.ParquetDataset(\n paths, filesystem=fs, filters=filters, **dataset_kwargs\n )\n else:\n base, fns = _analyze_paths(paths, fs)\n relpaths = [path.replace(base, \"\").lstrip(\"/\") for path in paths]\n if \"_metadata\" in relpaths:\n # We have a _metadata file, lets use it\n dataset = pq.ParquetDataset(\n base + fs.sep + \"_metadata\",\n filesystem=fs,\n filters=filters,\n **dataset_kwargs,\n )\n else:\n # Rely on metadata for 0th file.\n # Will need to pass a list of paths to read_partition\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, **dataset_kwargs)\n parts = [base + fs.sep + fn for fn in fns]\n else:\n if fs.isdir(paths[0]):\n # This is a directory, check for _metadata, then _common_metadata\n allpaths = fs.glob(paths[0] + fs.sep + \"*\")\n base, fns = _analyze_paths(allpaths, fs)\n relpaths = [path.replace(base, \"\").lstrip(\"/\") for path in allpaths]\n if \"_metadata\" in relpaths and \"validate_schema\" not in dataset_kwargs:\n dataset_kwargs[\"validate_schema\"] = False\n if \"_metadata\" in relpaths or gather_statistics is not False:\n # Let arrow do its thing (use _metadata or scan files)\n dataset = pq.ParquetDataset(\n paths, filesystem=fs, filters=filters, **dataset_kwargs\n )\n else:\n # Use _common_metadata file if it is available.\n # Otherwise, just use 0th file\n if \"_common_metadata\" in relpaths:\n dataset = pq.ParquetDataset(\n base + fs.sep + \"_common_metadata\",\n filesystem=fs,\n **dataset_kwargs,\n )\n else:\n dataset = pq.ParquetDataset(\n allpaths[0], filesystem=fs, **dataset_kwargs\n )\n parts = [base + fs.sep + fn for fn in fns]\n else:\n # There is only one file to read\n dataset = pq.ParquetDataset(paths, filesystem=fs, **dataset_kwargs)\n return parts, dataset",
"def read_by_paths(path_list):\n # create empty df to concatenate to\n base_df = pd.DataFrame(data=None, columns=['timestamp', 'seq', 'accel_x', 'accel_y', 'accel_z', 'accel_magnitude',\n 'accel_pca', 'accel_x_smooth', 'accel_x_lp', 'accel_x_hp',\n 'accel_x_grad', 'accel_x_doublegrad', 'accel_y_smooth', 'accel_y_lp',\n 'accel_y_hp', 'accel_y_grad', 'accel_y_doublegrad', 'accel_z_smooth',\n 'accel_z_lp', 'accel_z_hp', 'accel_z_grad', 'accel_z_doublegrad',\n 'accel_magnitude_smooth', 'accel_magnitude_lp', 'accel_magnitude_hp',\n 'accel_magnitude_grad', 'accel_magnitude_doublegrad',\n 'accel_pca_smooth', 'accel_pca_lp', 'accel_pca_hp', 'accel_pca_grad',\n 'accel_pca_doublegrad', 'subject', 'activity', 'correctness'])\n\n activity_name_dict = get_activity_name_dict()\n\n for path in tqdm.tqdm(path_list):\n subject, activity_name, correctness, _ = path.split('/')[-1].split('_')\n\n df = read_single_path(path, keep_axes=True)\n df['subject'] = subject\n df['activity'] = activity_name_dict[activity_name]\n df['correctness'] = correctness.lower()\n\n # concatenate to base\n base_df = pd.concat([base_df, df])\n base_df.reset_index(drop=True, inplace=True)\n\n return base_df",
"def build_df(path_orig = r'.\\chest_xray', orig_file_ext = 'jpeg', path_seg = r'.\\segmentation', seg_file_ext = 'png', save_path = '.\\df_all.csv'):\n \n read_df = 'C'\n list_df = [] \n \n if os.path.exists(save_path):\n read_df = input('DataFrame was found, would you like to read it (R) or recreate it (C) (default Read)?\\n') or 'R'\n if read_df == 'R':\n df = pd.read_csv(save_path, index_col = 0)\n return df\n \n if read_df == 'C':\n for dirname, _, filenames in os.walk(path_orig):\n for filename in tqdm(filenames, disable=len(filenames)==0):\n if ('.' + orig_file_ext) in filename:\n list_val = []\n list_val.append('PNEUMONIA' if 'PNEUMONIA' in dirname else 'NORMAL')\n list_val.append(1 if 'PNEUMONIA' in dirname else 0)\n list_val.append('bacteria' if 'bacteria' in filename.lower() else 'virus' if 'virus' in filename.lower() else 'normal')\n list_val.append(1 if 'bacteria' in filename.lower() else 2 if 'virus' in filename.lower() else 0)\n list_val.append(filename)\n list_val.append(os.path.join(dirname, filename)) \n list_val.append(filename.replace(orig_file_ext, seg_file_ext))\n list_val.append(os.path.join(dirname.replace(path_orig, path_seg), filename.replace(orig_file_ext, seg_file_ext)))\n list_df.append(list_val)\n\n df = pd.DataFrame(list_df, columns = ['Label_name', 'Label_int', 'Label_pathology', 'Label_pathology_int', 'Filename_orig', 'Filepath_orig', 'Filename_seg', 'Filepath_seg'])\n df.to_csv(save_path)\n \n print('Done')\n \n return df",
"def get_data_paths(directory: Optional[str] = None) -> DataPaths:\n if directory is None:\n directory = DATA_DIRECTORY\n\n os.makedirs(directory, exist_ok=True)\n\n node_data_path = os.path.join(directory, 'nodes.tsv')\n if not os.path.exists(node_data_path):\n logger.info(f'downloading {NODE_DATA_URL}')\n urlretrieve(NODE_DATA_URL, node_data_path)\n\n edge_data_path = os.path.join(directory, 'edges.sif.gz')\n if not os.path.exists(edge_data_path):\n logger.info(f'downloading {EDGE_DATA_URL}')\n urlretrieve(EDGE_DATA_URL, edge_data_path)\n\n transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')\n if not os.path.exists(transformed_features_path):\n logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')\n urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)\n\n validate_data_path = os.path.join(directory, 'validation-statuses.tsv')\n if not os.path.exists(validate_data_path):\n logger.info(f'downloading {VALIDATE_DATA_URL}')\n urlretrieve(VALIDATE_DATA_URL, validate_data_path)\n\n symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')\n if not os.path.exists(symptomatic_data_path):\n logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')\n urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)\n\n repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')\n if not os.path.exists(repurpose_data_path):\n logger.info(f'downloading {REPURPOSE_DATA_URL}')\n urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)\n\n repo_data_path = os.path.join(directory, 'repo_data.csv')\n if not os.path.exists(repo_data_path):\n logger.info(f'downloading {REPO_DATA_URL}')\n urlretrieve(REPO_DATA_URL, repo_data_path)\n\n permutation_directory = os.path.join(directory, \"permutations\")\n os.makedirs(permutation_directory, exist_ok=True)\n\n permutation_paths = []\n for i in range(5):\n permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))\n if not os.path.exists(permutation_data_path):\n url = PERMUTATION_DATA_URL_FMT.format(i + 1)\n logger.info(f'downloading {url}')\n urlretrieve(url, permutation_data_path)\n permutation_paths.append(permutation_data_path)\n data_edge2vec_path = os.path.join(directory, 'data_edge2vec')\n\n return DataPaths(\n node_data_path=node_data_path,\n edge_data_path=edge_data_path,\n transformed_features_path=transformed_features_path,\n validate_data_path=validate_data_path,\n symptomatic_data_path=symptomatic_data_path,\n permutation_paths=permutation_paths,\n data_edge2vec_path=data_edge2vec_path,\n repurpose_data_path = repurpose_data_path,\n repo_data_path = repo_data_path\n )",
"def convert_dirs(base_dir, hdf_name, complib=None, complevel=0):\n print('Converting directories in {}'.format(base_dir))\n\n dirs = glob.glob(os.path.join(base_dir, '*'))\n dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES}\n if not dirs:\n raise RuntimeError('No direcotries found matching known data.')\n\n store = pd.HDFStore(\n hdf_name, mode='w', complevel=complevel, complib=complib)\n\n for dirpath in dirs:\n dirname = os.path.basename(dirpath)\n\n print(dirname)\n df = cache_to_df(dirpath)\n\n if dirname == 'travel_data':\n keys = ['from_zone_id', 'to_zone_id']\n elif dirname == 'annual_employment_control_totals':\n keys = ['sector_id', 'year', 'home_based_status']\n elif dirname == 'annual_job_relocation_rates':\n keys = ['sector_id']\n elif dirname == 'annual_household_control_totals':\n keys = ['year']\n elif dirname == 'annual_household_relocation_rates':\n keys = ['age_of_head_max', 'age_of_head_min',\n 'income_min', 'income_max']\n elif dirname == 'building_sqft_per_job':\n keys = ['zone_id', 'building_type_id']\n elif dirname == 'counties':\n keys = ['county_id']\n elif dirname == 'development_event_history':\n keys = ['building_id']\n elif dirname == 'target_vacancies':\n keys = ['building_type_id', 'year']\n else:\n keys = [dirname[:-1] + '_id']\n\n if dirname != 'annual_household_relocation_rates':\n df = df.set_index(keys)\n\n for colname in df.columns:\n if df[colname].dtype == np.float64:\n df[colname] = df[colname].astype(np.float32)\n elif df[colname].dtype == np.int64:\n df[colname] = df[colname].astype(np.int32)\n else:\n df[colname] = df[colname]\n\n df.info()\n print(os.linesep)\n store.put(dirname, df)\n\n store.close()",
"def detect_prepared_datasets(self):\n if utils.find('*target_ds_preprocessed.pkl', self.prepared_data_dir) and \\\n utils.find('*rf_ds_preprocessed.pkl', self.prepared_data_dir) and \\\n utils.find('*standardized_stacked_arr.pkl', self.prepared_data_dir):\n print('Pickles (preprocessed) found.')\n for pkl in utils.find('*preprocessed.pkl', self.prepared_data_dir):\n if \"target_ds\" in pkl: self.target_ds_preprocessed_path = pkl\n elif \"rf_ds\" in pkl: self.rf_ds_preprocessed_path = pkl\n \n LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path))\n\n for pkl in utils.find('*standardized_stacked_arr.pkl', self.prepared_data_dir):\n self.standardized_stacked_arr_path = pkl\n else:\n print('Pickles of pre-processed data incomplete. Proceeding to load & process raw dataset pickles.')\n self.target_ds_preprocessed_path, self.rf_ds_preprocessed_path = prepare.preprocess_time_series(self, self.prepared_data_dir, self.ALPHAs)\n\n LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path)) # generate new local model params\n\n self.standardized_stacked_arr_path = prepare.flatten_and_standardize_dataset(self, self.prepared_data_dir)\n print(f'--> Months for this dataset are: {self.month_names}')",
"def load_and_clean_data():\n\n def file2pd(path):\n\n # load a data file, remove comments, convert to list\n f = open(path, 'r').read().replace('# rsid', 'rsid').split('\\n')\n f = [x for x in f if len(x) and x[0] != '#']\n\n # get column names and values\n cols = f[0].split('\\t')\n f = [x.split('\\t') for x in f[1:]]\n\n # convert to DataFrame, convert position column to int\n df = pd.DataFrame(f, columns=cols)\n df['position'] = df['position'].astype(np.int64)\n\n return df\n\n return [file2pd(PATH_ONE), file2pd(PATH_TWO)]",
"def load_all_data() -> Tuple[pd.DataFrame, ...]:\n return tuple(\n pd.read_csv(path, sep='\\t') for path in (TARGETS_PATH, USER_INFO_PATH, INTERACTIONS_PATH, TRACK_INFO_PATH))",
"def prepare_data(self) -> None:\n if (self.root).is_dir():\n logger.info(\"Found the dataset.\")\n else:\n download_and_extract(self.root, DOWNLOAD_INFO)",
"def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass",
"def prepare_data(args):\n logger.info('Loading dataframe from %s' % args.newspath)\n df = pd.read_csv(args.newspath, encoding='gb18030')\n logger.info('Dataframe size: %d observations %d features after loaded' % (df.shape[0], df.shape[1]))\n\n # exclude rows with column source == NaN\n logger.info('Data cleansing...')\n df = df[~pd.isna(df['source'])]\n logger.info('Dataframe size: %d observations %d features after data cleansing' % (df.shape[0], df.shape[1]))\n\n # split the dataframe into training set and test set\n logger.info('Making training set & test set...')\n train_set, test_set = split_data(df)\n logger.info('Traning set size: %d' % train_set.shape[0])\n logger.info('Test set size: %d' % test_set.shape[0])\n\n # save the train set and test set to picke files\n logger.info('Save dataframes to files...')\n train_set.to_pickle(args.trainpath)\n test_set.to_pickle(args.testpath)",
"def _prepare_raw_data(kwargs):\n path = kwargs.get(\"path\", None)\n output_path = kwargs.get(\"output_path\", None)\n data_source = DataSource.best_available_data_source()\n for job in data_source.jobs(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_job(data=job, path=output_path)\n for traffic in data_source.traffics(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_traffic(data=traffic, path=output_path)",
"def _create_folders(self, name: str) -> Tuple[Path, pd.DataFrame]:\n data_folder = self.output_dir / Path(name)\n data_folder.mkdir(exist_ok=True)\n for host_name in self.df[\"host\"].unique():\n Path(data_folder, host_name).mkdir(exist_ok=True)\n df_and_folder_list = (data_folder, self.df)\n return df_and_folder_list",
"def fix_filepaths(df):\n\n if platform in [\"linux\", \"linux2\", \"darwin\"]:\n pass\n else:\n raise NotImplementedError(\n \"OSes other than Linux and Mac are currently not supported.\"\n )\n return df",
"def prepare_dataset(fpath):\n raise NotImplementedError",
"def check_intermediate_file(cache_dir, pull_start_dates):\n previous_dfs = {}\n for test_type in TEST_TYPES:\n previous_dfs[test_type] = None\n if pull_start_dates[test_type] is not None:\n pull_start_dates[test_type] = datetime.strptime(\n pull_start_dates[test_type], '%Y-%m-%d')\n\n for filename in os.listdir(cache_dir):\n if \".csv\" in filename:\n test_type = \"_\".join(filename.split(\"_\")[:2])\n date_string = filename.split(\"_\")[4].split(\".\")[0]\n pull_start_dates[test_type] = datetime.strptime(date_string,\n '%Y%m%d') + timedelta(days=1)\n previous_dfs[test_type] = pd.read_csv(join(cache_dir, filename),\n sep=\",\", parse_dates=[\"timestamp\"])\n return previous_dfs, pull_start_dates",
"def _do_build ():\n if os.path.exists(\"./database\"):\n data_path = \"./database/\"\n elif os.path.exists(\"../database\"):\n data_path = \"../database/\"\n elif os.path.exists(\"../../database\"):\n data_path = \"../../database/\"\n else:\n data_path = \".\"\n\n dir_specs = {}\n databases = []\n\n # first pass over the databases to create complete tree:\n for dirpath, dirnames, filenames in os.walk(data_path):\n # all databases are stored\n for name in filenames:\n if name.endswith(\".db\"):\n databases.append(os.path.join(dirpath, name).replace(data_path, \"\"))\n # but we need to store specs here otherwise things could get a bit confusing\n elif name.endswith(\".spec\"):\n possible_dir = os.path.join(dirpath, name[:-5]+\".db\")\n if os.path.exists(possible_dir) and os.path.isdir(possible_dir):\n spec_name = possible_dir.replace(data_path, \"\")\n dir_specs[spec_name] = parse_spec(os.path.join(dirpath, name))\n\n # and we create DatabaseFolders for each subfolder\n for name in dirnames:\n if name.endswith(\".db\"):\n # dump the extension here too\n obj_name = name[:-3]\n this_folder = DatabaseFolder(obj_name)\n\n if dir_specs.has_key(name):\n this_folder.spec = dir_specs.pop(name)\n\n if dirpath != data_path:\n search = dirpath.replace(data_path, \"\").split(PATH_DELIM)\n try:\n top_folder = globals()[search[0]]\n except KeyError:\n raise DatabaseError, \"Subdirectory of a db folder without a DatabaseFolder?\"\n for p in search[1:]:\n if p == name:\n break\n try:\n top_folder = getattr(top_folder, p)\n except AttributeError:\n raise DatabaseError, \"Subdirectory of a db subfolder without a DatabaseFolder subfolder!\"\n top_folder.append(this_folder)\n else:\n globals()[obj_name] = this_folder\n\n for database in databases:\n build_from_file_name(database, data_path)",
"def run(self, data_file: Optional[str] = None):\n\n \n\n # Pandas DF of 'list' files\n #cpd_list_df = pd.read_csv(self.cpd_list, low_memory=False, sep='\\t')\n #path_list_df = pd.read_csv(self.path_list, low_memory=False, sep='\\t')\n #rn_list_df = pd.read_csv(self.rn_list, low_memory=False, sep='\\t')\n #ko_list_df = pd.read_csv(self.ko_list, low_memory=False, sep='\\t')\n #cpd_to_chebi_df = pd.read_csv(self.cpd2chebi, low_memory=False, sep='\\t')\n\n # Pandas DF of 'kegg-*.tsv' files\n \n path_df = self.prune_columns(pd.read_csv(self.full_path, low_memory=False, sep='\\t', usecols=['ENTRY', 'NAME', 'DBLINKS']), 'path')\n rn_df = self.prune_columns(pd.read_csv(self.full_rn, low_memory=False, sep='\\t', usecols=['ENTRY', 'DEFINITION', 'EQUATION', 'DBLINKS']), 'rn')\n ko_df = self.prune_columns(pd.read_csv(self.full_ko, low_memory=False, sep='\\t', usecols=['ENTRY', 'DEFINITION', 'DBLINKS']), 'ko')\n \n ## **********************************************************************\n # Establishing 1-to-1 relation between KO and XRefs (['DBLINKS'] column)\n ##***********************************************************************\n\n # Explode DBLINKS in ko_df to separate rows\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).split('|'))\n ko_df = ko_df.explode('DBLINKS')\n\n #ko_df['ID'] = ko_df['ID'].apply(lambda row : 'ko:'+str(row))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).replace('RN: ', 'KEGG.REACTION:'))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).strip().replace('COG: ', 'COG:'))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).strip().replace('GO: ', 'GO:'))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).strip().replace('TC: ', 'tcdb:'))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).strip().replace('CAZy: ', 'cazy:'))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row : str(row).strip().replace('UniProt: ', 'uniprot:'))\n ko_df['DBLINKS'] = ko_df['DBLINKS'].apply(lambda row: str(row).split(' '))\n # Add prefixes to all DBLINKS\n ko_df['DBLINKS'] = ko_df['DBLINKS'] \\\n .apply(lambda row: [str(row[0])]+[str(row[0])\n .split(':')[0] + ':'+ x \\\n for x in row \\\n if not str(x).startswith(str(row[0]).split(':')[0]+ ':')])\n\n ko_df['DBLINKS'] = ['|'.join(map(str, l)) for l in ko_df['DBLINKS']]\n # Roll up to consolidated rows\n ko_df = ko_df.groupby(['ID', 'DESCRIPTION'], as_index=False).agg({'DBLINKS': lambda x: '|'.join(x)})\n ##########################################################################\n\n node_dict: dict = defaultdict(int)\n edge_dict: dict = defaultdict(int)\n\n df_dict = {\n 'pathway': path_df,\n 'rn': rn_df,\n 'ko': ko_df\n }\n\n node_dict, edge_dict = self.post_data(self.path_cpd_link, node_dict, edge_dict, df_dict, 'w')\n node_dict, edge_dict = self.post_data(self.rn_cpd_link, node_dict, edge_dict, df_dict, 'a')\n node_dict, edge_dict = self.post_data(self.path_rn_link, node_dict, edge_dict, df_dict, 'a')\n node_dict, edge_dict = self.post_data(self.path_ko_link, node_dict, edge_dict, df_dict, 'a')\n node_dict, edge_dict = self.post_data(self.rn_ko_link, node_dict, edge_dict, df_dict, 'a')\n \n\n return None",
"def _resolve_xml_and_data_paths(self):\n\n supported_extensions = ['.dat', '.lfp', '.eeg']\n self.filename = Path(self.filename)\n self.binary_file = Path(self.binary_file) if self.binary_file is not None else None\n \n if self.filename.suffix == '.xml':\n xml_file_path = self.filename\n data_file_path = self.binary_file \n elif self.filename.suffix == '':\n xml_file_path = self.filename.with_suffix(\".xml\")\n data_file_path = self.binary_file\n elif self.filename.suffix in supported_extensions:\n xml_file_path = self.filename.with_suffix(\".xml\")\n data_file_path = self.filename\n else:\n raise KeyError(f\"Format {self.filename.suffix} not supported, filename format should be {supported_extensions} or .xml\")\n \n if data_file_path is None:\n possible_file_paths = (xml_file_path.with_suffix(extension) for extension in supported_extensions)\n data_file_path = next((path for path in possible_file_paths if path.is_file()), None)\n if data_file_path is None:\n raise FileNotFoundError(f\"data binary not found for file {xml_file_path.stem} with supported extensions: {supported_extensions}\")\n\n \n assert xml_file_path.is_file(), f\"xml file not found at the expected location {xml_file_path}\"\n assert data_file_path.is_file(), f\"binary file not found at the expected location {data_file_path}\"\n\n self.xml_file_path = xml_file_path\n self.data_file_path = data_file_path",
"def __init__(self, data_dirs, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.data_dirs = strax.to_str_tuple(data_dirs)\n\n for d in self.data_dirs:\n try:\n os.makedirs(d)\n except FileExistsError:\n pass\n else:\n self.log.debug(f\"Created data dir {d}\")",
"def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count",
"def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()",
"def parse_directory(dpath):\n print(\"Starting to parse\")\n print(dpath)\n selected_files = sel_files(dpath)\n print(\"Number of selected files\", len(selected_files))\n\n # iterave over selected files and build dataframe\n empties = 0\n idx = pd.DataFrame(columns=list(PATH_REGEX.keys()))\n for fpath in selected_files:\n m = FNAME_REGEX.search(fpath)\n if m is None:\n print(\"ERROR:\", fpath, FNAME_REGEX.pattern)\n continue\n row_head = {k: m.group(k) for k in PATH_REGEX.keys()}\n for i, (webpage_id, lengths) in enumerate(it_webpages(fpath)):\n if len(lengths) == 0:\n empties += 1\n continue\n row_head['fname'] = os.path.basename(fpath)\n row_head['class_label'] = webpage_id\n row_head['lengths'] = lengths\n idx = idx.append(row_head, ignore_index=True)\n print(i, 'sites in', fpath)\n print(\"Empty traces:\", empties)\n\n # fix some naming issues:\n idx['inst'] = idx.inst.fillna(0)\n idx['date'] = pd.to_datetime(idx.date.str.replace('-18', '-2018'),\n format='%d-%m-%Y')\n #idx['dev'] = idx.dev.replace('browse', 'desktop')\n #idx.loc[idx.sites == 'desktop', ['dev', 'sites']] = ['desktop', None]\n return idx",
"def __call__(self, path):\n\n # Iterates through a directory of raw sources and builds staging databases\n databases = self.process(path)\n\n # Output database file\n qafile = os.path.join(path, \"questions.db\")\n\n # Build consolidated SQLite questions database\n db2qa = DB2QA()\n db2qa(databases, qafile)",
"def prepare_dfs(json_path, get_CBS=False, only_converged=False):\n data_dict = read_json(json_path)\n df_qc = qc_dframe(data_dict, only_converged=only_converged)\n df_qats = qats_dframe(data_dict)\n if get_CBS:\n df_qc = add_cbs_extrap_qc_df(\n df_qc, cbs_basis_key='aug', basis_set_lower='aug-cc-pVTZ',\n basis_set_higher='aug-cc-pVQZ'\n )\n df_qats = add_cbs_extrap_qats_df(\n df_qc, df_qats, cbs_basis_key='aug',\n basis_set_higher='aug-cc-pVQZ'\n )\n return df_qc, df_qats",
"def folder_to_df(path):\n summary_df = pd.DataFrame(columns=[\"file_name\", \"invoice_nr\", \"address\", \"contract\", \"base_charge\"])\n \n for file in Path(path).glob(\"*.pdf\"):\n print(file)\n try: \n summary_df = summary_df.append({\n \"file_name\": pdf_to_test(file)[0],\n \"invoice_nr\": pdf_to_test(file)[1],\n \"address\": pdf_to_test(file)[2],\n \"contract\": pdf_to_test(file)[3],\n \"base_charge\": pdf_to_test(file)[4]}, \n ignore_index = True)\n except:\n summary_df = summary_df.append({\n \"file_name\": file.name,\n \"invoice_nr\": \"Could not read malformed PDF file\",\n \"address\": \"Could not read malformed PDF file\",\n \"contract\": \"Could not read malformed PDF file\",\n \"base_charge\": \"Could not read malformed PDF file\"}, \n ignore_index = True)\n return summary_df",
"def parse_directory_of_series_files(self):\n if self.series_base_dir is None or len(self.series_file_list) < 1:\n self.logger.warn('Fatal: Base Directory not set %s')\n raise Exception('Error Base Directory not set')\n\n self.logger.info('Parsing dir of files from %s' % self.series_base_dir)\n\n self.ref_series_df = pd.DataFrame([], columns=['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT',\n 'FRAG', 'MOL_ID', 'ACTIVITY'])\n\n required_col = ['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT', 'FRAG', 'MOL_ID', 'ACTIVITY']\n max_series_id = 0\n\n for series_file in self.series_file_list:\n\n # print series_file\n temp_df = pd.read_csv(series_file) # , index_col=False)\n # print temp_df.columns\n\n # sanity check the data table for the columns we need\n for col in required_col:\n if col not in temp_df.columns:\n raise Exception(\"Input CSV %s does not have required columns: %s\" % (series_file, col))\n\n # re-sequence the series ID's\n if max_series_id == 0:\n max_series_id = temp_df['SERIES_ID'].max()\n else:\n max_series_id = self.ref_series_df['SERIES_ID'].max()\n # print max_series_id\n\n temp_df['SERIES_ID'] = temp_df['SERIES_ID'] + max_series_id\n temp_df['SOURCE_FILE'] = os.path.basename(series_file)\n\n # py2>3 explicit sort=False added\n self.ref_series_df = self.ref_series_df.append(temp_df, sort=False)\n self.logger.info('Appended dataframe shape %s to master dataframe %s' %\n (str(temp_df.shape), str(self.ref_series_df.shape)))\n # print ('Appended dataframe shape %s to master dataframe %s' % (str(temp_df.shape),\n # str(self.ref_series_df.shape)))\n # print self.ref_series_df['SERIES_ID'].max()\n\n self.series_comparison_df = self.ref_series_df"
]
| [
"0.6140729",
"0.60189486",
"0.60069317",
"0.5918741",
"0.5903055",
"0.5874215",
"0.57365006",
"0.5694283",
"0.5647274",
"0.5642507",
"0.56362635",
"0.56183475",
"0.5613369",
"0.5595061",
"0.5594611",
"0.5551575",
"0.5548242",
"0.5546088",
"0.55329657",
"0.54976887",
"0.5494978",
"0.54883736",
"0.5466808",
"0.5458557",
"0.5457711",
"0.5447318",
"0.5442391",
"0.5436507",
"0.5432758",
"0.5422107"
]
| 0.6369519 | 0 |
Plot the Fourier power spectrum as a function of Fourier period (1/frequency) | def FourierPlot(tas):
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
plt.plot(1/freqs,mag) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plotSpectrum(y,Fs):\n n = len(y) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n \n plt.plot(frq,abs(Y),'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')",
"def plotFFT(filename):\n fs_rate, signal = wavfile.read(filename)\n len_audio = len(signal.shape)\n print(signal.shape)\n print(signal[:][0])\n if len_audio == 2:\n signal = signal.sum(axis=1) / 2\n N = signal.shape[0]\n FFT = abs(scipy.fft(signal))\n FFT_side = FFT[range(N//2)]\n freqs = scipy.fftpack.fftfreq(signal.size, 1.0/fs_rate)\n fft_freqs = np.array(freqs)\n freqs_side = freqs[range(N//2)] # one side frequency range\n plt.plot(freqs_side, abs(FFT_side), \"b\") # plotting the complete fft spectrum\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('Single-sided Amplitude')\n plt.show()",
"def plot_fft(x, y, th=1e-4):\n n = x.size\n Lx = x[-1]-x[0]\n yf = np.fft.rfft(y)\n xf = np.fft.rfftfreq(n, d=Lx/n)\n fig = plt.figure(figsize=[9, 9])\n ax = fig.add_subplot(211)\n ax.plot(x, y)\n plt.title('1) first component of ODE solution')\n\n ax = fig.add_subplot(223)\n yf = yf / (n/2)\n ii = (np.abs(yf) > th)\n ii[0] = False\n plt.plot(xf[ii], np.abs(yf[ii]))\n T0 = 1.0/np.mean(xf*np.abs(yf))\n plt.title('2) power spectrum')\n plt.draw()\n plt.pause(2)\n return T0",
"def plot_spectrum( data ):\n d = data[1]\n # rfft gives positive frequecies. Square to get power spectrum.\n fp = numpy.absolute( numpy.fft.rfft( d ) )**2\n freq = numpy.fft.fftfreq( d.shape[-1] )\n n = len(fp)\n\n # reshape stuff a bit. keep only positive freqs.\n fp = fp[1:-1]\n freq = freq[1:n-1]\n lrslope = linregress( numpy.log(freq[30:]), numpy.log(fp[30:]) )[0]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.loglog( freq, fp, label=\"Lin. reg.=\"+str(round( lrslope,1 )) )\n ax.legend( loc='lower left' )\n return fig",
"def plot_periodogram(trj, coord: str = \"y\", fs: int = 1, interactive: bool = True):\n from scipy import signal\n\n vals = trj[coord].values\n f, Pxx = signal.periodogram(vals, fs=fs, window=\"hanning\", scaling=\"spectrum\")\n plt.title(\"Power Spectrum\")\n plt.plot(f, Pxx)\n if interactive:\n plt.show()\n\n return plt.gcf()",
"def plot_fft(self):\r\n\r\n self.ipx = int(self.imageData.shape[1]/2.)\r\n\r\n self.ipy = int(self.imageData.shape[2]/2.)\r\n\r\n nearf = np.absolute(self.DF[0:(self.freqs.shape[0]/2)-1,self.ipx-2:self.ipx+2,self.ipy-2:self.ipy+2])\r\n\r\n mpl.plot(self.freqs[0:(self.freqs.shape[0]/2)-1], np.mean(np.mean(nearf,axis=1), axis=1),\r\n\r\n 'ko-', markersize=2.5)\r\n\r\n mpl.plot(self.freqs[self.freq_point], np.mean(np.mean(nearf,axis=1), axis=1)[self.freq_point], 'ro', markersize=5)\r\n\r\n nearf = np.absolute(self.DF[0:(self.freqs.shape[0]/2)-1,-6:-1,-6:-1])\r\n\r\n mpl.plot(self.freqs[0:(self.freqs.shape[0]/2)-1], np.mean(np.mean(nearf,axis=1), axis=1),\r\n\r\n 'c-', markersize=2.5)\r\n\r\n mpl.title('FFT center of image and corner')",
"def powerSpectrum(input, nfft):\n result = fft(input, nfft)\n result = np.power(np.absolute(result), 2)\n\n # myplot(result, 'Power Spectogram')\n\n return result",
"def make_spectrum_figure(f):\n nb_freqs = int(f.readline().split()[0])\n freqs = read_list(f, nb_freqs)\n fluxes = read_list(f, nb_freqs)\n plot_spectrum(freqs, fluxes)\n plt.show()",
"def FourierRect(N):\n x = np.zeros((1,N))\n x[:,0:30]=1;\n x=x.flatten();\n \n \n #compute the DFT coefficients\n r1=FourierSeries(x)\n #magnitude of DFT coefficients\n a1=cabs(r1)\n\n #plot the time domain signal\n subplot(2,1,1)\n plt.plot(range(0,len(x)),x)\n xlabel('Time')\n ylabel('Amplitude')\n title('time doman')\n plt.ylim(-2,2);\n \n #plot the DFT coefficients\n L=len(a1);\n fr=np.arange(0,L);\n subplot(2,1,2)\n plt.stem(fr,a1,'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')\n title('complete signal')\n ticks=np.arange(0,L+1,25);\n plt.xticks(ticks,ticks); \n show()",
"def plot_freq_spec(data, title):\n plt.title(title)\n\n def plot_freq_spec(axis, line, label):\n n = len(axis)\n fft = fftpack.fft(axis) / n\n fft = fft[range(int(n / 2))]\n plt.plot(range(int(n / 2)), abs(fft), line, label=label)\n plot_freq_spec(data[:, 0], 'r-', label='x')\n plot_freq_spec(data[:, 1], 'g-', label='y')\n plot_freq_spec(data[:, 2], 'b-', label='z')",
"def display_fft(self, N=4096):\n if len(self.series) % N != 0:\n return\n\n h = self.series[-N:]\n H = fft(h)\n\n # the squared magnitude of the fft is an estimate of the\n # power spectral density\n\n # http://documents.wolfram.com/applications/timeseries/\n # UsersGuidetoTimeSeries/1.8.3.html\n # http://en.wikipedia.org/wiki/Power_spectral_density\n freq = range(N / 2 + 1)\n sdf = [Hn * Hn.conjugate() for Hn in H]\n sdf = [sdf[f].real for f in freq]\n loglog(freq, sdf)\n xlabel(\"frequency\")\n ylabel(\"power\")\n show()",
"def plot_timefrequency(z, time, f, signal=None, method=\"stft\"):\n\n if method == \"stft\":\n figure_title = \"Short-time Fourier Transform Magnitude\"\n fig, ax = plt.subplots()\n for i in range(len(time)):\n ax.plot(f, z[:, i], label=\"Segment\" + str(np.arange(len(time))[i] + 1))\n ax.legend()\n ax.set_title(\"Signal Spectrogram\")\n ax.set_ylabel(\"STFT Magnitude\")\n ax.set_xlabel(\"Frequency (Hz)\")\n\n elif method == \"cwt\":\n figure_title = \"Continuous Wavelet Transform Magnitude\"\n elif method == \"wvd\":\n figure_title = \"Wigner Ville Distrubution Spectrogram\"\n fig = plt.figure()\n plt.plot(time, signal)\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Signal\")\n\n elif method == \"pwvd\":\n figure_title = \"Pseudo Wigner Ville Distribution Spectrogram\"\n\n fig, ax = plt.subplots()\n spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap(\"magma\"), shading=\"auto\")\n plt.colorbar(spec)\n ax.set_title(figure_title)\n ax.set_ylabel(\"Frequency (Hz)\")\n ax.set_xlabel(\"Time (sec)\")\n return fig",
"def plot_spectrum(freqs, fluxes, min_lambda=3700, max_lambda=8000):\n plt.plot(freqs, fluxes)\n plt.xlim((min_lambda, max_lambda))\n plt.xlabel(r'$\\lambda\\, (\\AA)$', size=16)\n plt.ylabel(r'$Flux$', size=16)\n #plt.axes().minorticks_on()",
"def plot_freq(signal,\n fs,\n ax=None,\n scale='linear',\n mode='magnitude',\n stem=False,\n sides=None,\n title=None,\n **kwargs):\n result, freqs = _spectral_helper(\n signal, fs, scale=scale, mode=mode, **kwargs)\n\n if ax is None:\n ax = plt.gca()\n\n if scale == 'linear':\n ax.set_ylabel('Magnitude (linear)')\n elif scale == 'db':\n ax.set_ylabel('Magnitude / dB')\n else:\n raise NameError(\"Invalid scale\")\n if mode == 'magnitude':\n if title is not None:\n ax.set_title(title)\n else:\n ax.set_title('Magnitude Spectrum')\n elif mode == 'phase':\n if title is not None:\n ax.set_title(title)\n else:\n ax.set_title('Phase Spectrum')\n ax.set_ylabel('Phase / rad')\n elif mode == 'psd':\n if title is not None:\n ax.set_title(title)\n else:\n ax.set_title('Power Density Spectrum')\n ax.set_ylabel('dB / Hz')\n else:\n raise NameError(\"Invalid mode\")\n if stem is False:\n ax.plot(freqs, result, linewidth=1.4)\n else:\n ax.stem(freqs, result, linewidth=1.4)\n ax.set_xlabel('f / Hz')\n ax.grid(True)\n ax.ticklabel_format(useOffset=False)\n return ax",
"def fft_plot(x: np.ndarray, fs: Optional[int] = None,\n nfft: int = 2**18, onesided_flag: bool = None,\n mode: str = \"magnitude\", log_freq_flag: bool = False) -> go.Figure:\n\n # input validation\n assert x.ndim == 1, \"input must be a 1D array\"\n assert mode in [\"magnitude\", \"phase\", \"magnitude_phase\"], \\\n \"invalid mode, must be magnitude / phase / magnitude_phase\"\n if fs is None:\n fs = 2 * np.pi\n if onesided_flag is None:\n if all(np.isreal(x)):\n onesided_flag = True\n else:\n onesided_flag = False\n if log_freq_flag is True:\n assert onesided_flag is True, \\\n \"log scale can be plotted only if onesided_flag is True\"\n\n # calculate\n nfft = fft.next_fast_len(np.maximum(x.size, nfft))\n\n if onesided_flag:\n x_fft = fft.rfft(x, n=nfft)\n f_vec = fft.rfftfreq(nfft, 1/fs)\n else:\n x_fft = np.fft.fftshift(fft.fft(x, n=nfft))\n f_vec = np.fft.fftshift(fft.fftfreq(nfft, 1/fs))\n\n mag = 10*np.log10(np.abs(x_fft)**2)\n phase = np.angle(x_fft) * 180 / (np.pi)\n\n # plot\n freq_title = \"Frequency [rad]\" if fs == 2*np.pi else \"Frequency [Hz]\"\n\n if mode == \"magnitude\":\n fig = px.line(x=f_vec, y=mag, log_x=log_freq_flag)\n fig.update_xaxes(title_text=freq_title)\n fig.update_yaxes(title_text=\"Magnitude [dB]\")\n elif mode == \"phase\":\n fig = px.line(x=f_vec, y=phase, log_x=log_freq_flag)\n fig.update_xaxes(title_text=freq_title)\n fig.update_yaxes(title_text=\"Phase [degrees]\")\n elif mode == \"magnitude_phase\":\n fig = make_subplots(\n rows=2, cols=1,\n shared_xaxes=True)\n fig.add_trace(go.Scatter(x=f_vec, y=mag), row=1, col=1)\n fig.add_trace(go.Scatter(x=f_vec, y=phase), row=2, col=1)\n fig.update_xaxes(title_text=freq_title)\n if log_freq_flag:\n fig.update_xaxes(type=\"log\")\n fig.update_yaxes(title_text=\"Magnitude [dB]\", row=1, col=1)\n fig.update_yaxes(title_text=\"Phase [degrees]\", row=2, col=1)\n fig.update_layout(showlegend=False)\n\n fig.show()\n\n return fig",
"def plotDFT(x):\n \n X = DFTdirect(x)\n plt.plot([c.re for c in x], [c.im for c in x], 'ro')\n plt.plot([c.re for c in X], [c.im for c in X], 'bo')\n plt.show()",
"def plot_spectrum(wavetable: np.ndarray) -> None:\n ps = np.abs(np.fft.fft(wavetable)) ** 2\n\n time_step = 1 / 44100\n freqs = np.fft.fftfreq(wavetable.size, time_step)\n idx = np.argsort(freqs)\n\n plt.plot(freqs[idx], ps[idx])\n plt.show()",
"def plotfft(s, fmax):\n\n fs = abs(np.fft.fft(s))\n f = np.linspace(0, fmax // 2, len(s) // 2)\n return (f[1:len(s) // 2].copy(), fs[1:len(s) // 2].copy())",
"def show_spectrum(h, title=\"\"):\n H = fft2(h)\n\n # Remember to plot the abs of the fft2(h)\n plt.imshow(np.abs(H))\n plt.gray()\n plt.title(title)\n plt.show()",
"def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()",
"def plot_spectrum(self, ax, data, stat=None, label_axes=True):\n if len(data.shape) == 1:\n data = np.reshape(data, (1, len(data)))\n\n x_pow = np.abs(data)\n if stat == None:\n if self.scale_select.currentIndex() == 0:\n ax.plot(x_pow, label='linear')\n elif self.scale_select.currentIndex() == 1:\n ax.plot(10*np.log10(x_pow), label='decibels')\n elif self.scale_select.currentIndex() == 2:\n ax.plot(np.var(x_pow, axis=0), label='variance')\n elif self.scale_select.currentIndex() == 3:\n ax.plot(skew(x_pow, axis=0), label='skew')\n elif self.scale_select.currentIndex() == 4:\n ax.plot(kurtosis(x_pow, axis=0), label='kurtosis')\n else:\n \n if self.scale_select.currentIndex() == 1:\n x_pow = 10*np.log10(x_pow)\n if stat == 'median' or stat == 'med':\n ax.plot(np.median(x_pow, axis=0), label='median')\n if stat == 'min':\n ax.plot(np.min(x_pow, axis=0), label='min')\n if stat == 'max':\n ax.plot(np.max(x_pow, axis=0), label='max')\n \n plt.minorticks_on()\n if label_axes:\n self.updateFreqAxis(ax, n_ticks=10)\n plt.xlabel(\"Frequency\")\n plt.ylabel(\"Amplitude\")\n plt.legend()",
"def getFourierPower(data, freq=None, ax=1, t=None):\n\n if t==None:\n t = numpy.linspace(0.0,1.0,64)\n Fs = 1.0/t[1] \n L = t.shape[0]\n ft = numpy.fft.fft(data,axis=ax)\n freqs = numpy.fft.fftfreq(64,1.0/64.0)\n ftAmp = abs(ft)/data.shape[1]*2\n ftPhase = numpy.arctan2(ft.real, ft.imag)\n if freq==None:\n ii = numpy.argmax(abs(ft[:,1:])**2.0,axis=1)\n ii += 1\n else:\n fr = numpy.asarray([freq])\n fr = fr.flatten()\n ii = numpy.ones(fr.shape)\n for kk in range(0,fr.shape[0]):\n ii[kk] = numpy.argmin(abs(freqs-fr[kk]))\n \n jj = numpy.arange(0,ftAmp.shape[0])\n amp = ftAmp[jj,ii.astype('int')]\n phase = ftPhase[jj,ii.astype('int')]\n else:\n Fs = 1.0/t[1]\n L = t.shape[0]\n ft = numpy.fft.fft(data,n=NFFT,axis=ax)[1:]/L#skip the DC\n ftAmp = abs(ft)/len(t)*2\n ftPhase = numpy.arctan2(ft.real, ft.imag)\n if freq==None:\n ii = argmax(abs(ft)**2.0,0)\n else: \n ii = freq-1\n \n amp = ftAmp[:,ii]\n phase = ftPhase[:,ii]\n return amp, phase, ii",
"def plot_wavefunctions(self, num_levels=10):\n for ind, psi in enumerate(self.psis(num_levels)):\n plot(self.x, psi * sign(psi[1] - psi[0]), label=\"$\\psi_%d$\" % ind)",
"def plot_frequency(self):\n canvas = xboa.common.make_root_canvas(\"frequency vs time\")\n canvas.Draw()\n freq_list = [freq for freq in self.freq_list]\n hist, graph = xboa.common.make_root_graph(\"frequency vs time\",\n self.time_list, \"time [ns]\",\n freq_list, \"f [GHz]\")\n hist.Draw()\n graph.Draw(\"sameL\")\n fit = ROOT.TF1(\"fit\", \"pol4\", 0, 20*1e6)\n fit.FixParameter(0, freq_list[0])\n graph.Fit(fit)\n canvas.Update()",
"def frfplot(freq, H, freq_min=0, freq_max=None, type=1, legend=[]):\n FLAG = type # Plot type, should libe renamed throughout.\n freq = freq.reshape(1, -1)\n lenF = freq.shape[1]\n if len(H.shape) is 1:\n H = H.reshape(1, -1)\n\n if H.shape[0] > H.shape[1]:\n H = H.T\n\n if freq_max is None:\n freq_max = np.max(freq)\n\n if freq_min is None:\n freq_min = np.min(freq)\n\n if freq_min < np.min(freq):\n freq_min = np.min(freq)\n\n if freq_min > freq_max:\n raise ValueError('freq_min must be less than freq_max.')\n\n # print(str(np.amin(freq)))\n inlow = int(lenF * (freq_min - np.amin(freq)\n ) // (np.amax(freq) - np.amin(freq)))\n\n inhigh = int(lenF * (freq_max - np.amin(freq)\n ) // (np.amax(freq) - np.amin(freq)) - 1)\n # if inlow<1,inlow=1;end\n # if inhigh>lenF,inhigh=lenF;end\n \"\"\"print('freq shape: {}'.format(freq.shape))\n print('H shape: {}'.format(H.shape))\n print('Index of low frequency: {}'.format(inlow))\n print('Index of high frequency: {}'.format(inhigh))\"\"\"\n H = H[:, inlow:inhigh]\n # print(H.shape)\n freq = freq[:, inlow:inhigh]\n mag = 20 * np.log10(np.abs(H))\n # print(mag)\n # print(mag.shape)\n minmag = np.min(mag)\n maxmag = np.max(mag)\n phase = np.unwrap(np.angle(H)) * 180 / np.pi\n # phmin_max=[min(phase)//45)*45 ceil(max(max(phase))/45)*45];\n phmin = np.amin(phase) // 45 * 45.0\n phmax = (np.amax(phase) // 45 + 1) * 45\n \"\"\"minreal = np.amin(np.real(H))\n maxreal = np.amax(np.real(H))\n minimag = np.amin(np.imag(H))\n maximag = np.amax(np.imag(H))\"\"\"\n\n if FLAG is 1:\n fig, (ax1, ax2) = plt.subplots(2, 1)\n ax1.plot(freq.T, mag.T)\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Mag (dB)')\n ax1.grid()\n ax1.set_xlim(xmax=freq_max, xmin=freq_min)\n ax1.set_ylim(ymax=maxmag, ymin=minmag)\n\n ax2.plot(freq.T, phase.T)\n ax2.set_xlabel('Frequency (Hz)')\n ax2.set_ylabel('Phase (deg)')\n ax2.grid()\n ax2.set_xlim(xmax=freq_max, xmin=freq_min)\n ax2.set_ylim(ymax=phmax, ymin=phmin)\n ax2.set_yticks(np.arange(phmin, (phmax + 45), 45))\n fig.tight_layout()\n\n if len(legend) > 0:\n plt.legend(legend)\n ax = (ax1, ax2)\n else:\n print(\"Sorry, that option isn't supported yet\")\n return ax\n\n \"\"\"# elif FLAG==2:\n # subplot(2,1,1)\n # semilogx(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # % Fmin,Fmax,min(mag),max(mag)\n # axis([Fmin Fmax minmag maxmag])\n\n # subplot(2,1,2)\n # semilogx(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n\n # elif FLAG==3:\n # subplot(2,1,1)\n # mag=20*log10(abs(Xfer));\n # semilogx(F*2*pi,mag)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Wmin Wmax minmag maxmag])\n # zoom on\n # subplot(2,1,2)\n # semilogx(F*2*pi,phase)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Phase (deg)')\n # grid on\n # axis([Wmin Wmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n\n # elseif FLAG==4\n # subplot(2,1,1)\n # plot(F,real(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Real')\n # grid on\n # axis([Fmin Fmax minreal maxreal])\n # zoom on\n # subplot(2,1,2)\n # plot(F,imag(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Imaginary')\n # grid on\n # axis([Fmin Fmax minimag maximag])\n # zoom on\n # elseif FLAG==5\n # subplot(1,1,1)\n # imax=round(length(F)*Fmax/max(F));\n # imin=round(length(F)*Fmin/max(F))+1;\n # plot(real(Xfer(imin:imax)),imag(Xfer(imin:imax)))\n # xlabel('Real')\n # ylabel('Imaginary')\n # grid on\n # zoom on\n # elseif FLAG==6\n # subplot(1,1,1)\n # mag=20*log10(abs(Xfer));\n # plot(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Fmin Fmax minmag maxmag])\n # zoom on\n # elseif FLAG==7\n # subplot(1,1,1)\n # plot(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n # elseif FLAG==8\n # subplot(1,1,1)\n # plot(F,real(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Real')\n # grid on\n # axis([Fmin Fmax minreal maxreal])\n # zoom on\n # elseif FLAG==9\n # subplot(1,1,1)\n # plot(F,imag(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Imaginary')\n # grid on\n # axis([Fmin Fmax minimag maximag])\n # zoom on\n # elseif FLAG==10\n # subplot(1,1,1)\n # mag=20*log10(abs(Xfer));\n # semilogx(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Fmin Fmax minmag maxmag])\n # zoom on\n # elseif FLAG==11\n # subplot(1,1,1)\n # semilogx(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n # elseif FLAG==12\n # subplot(1,1,1)\n # semilogx(F,real(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Real')\n # grid on\n # axis([Fmin Fmax minreal maxreal])\n # zoom on\n # elseif FLAG==13\n # subplot(1,1,1)\n # semilogx(F,imag(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Imaginary')\n # grid on\n # axis([Fmin Fmax minimag maximag])\n # zoom on\n # elseif FLAG==14\n # subplot(1,1,1)\n # mag=20*log10(abs(Xfer));\n # semilogx(F*2*pi,mag)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Wmin Wmax minmag maxmag])\n # zoom on\n # elseif FLAG==15\n # subplot(1,1,1)\n # semilogx(F*2*pi,phase)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Phase (deg)')\n # grid on\n # axis([Wmin Wmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n # else\n # subplot(2,1,1)\n # mag=20*log10(abs(Xfer));\n # plot(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Fmin Fmax minmag maxmag])\n # zoom on\n # subplot(2,1,2)\n # plot(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n \"\"\"",
"def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")",
"def plot_frf(G, freq, p=0, m=0, sca=1, fig=None, ax=None, *args, **kwargs):\n fig, ax = fig_ax_getter(fig, ax)\n ax.plot(freq*sca, db(np.abs(G[:, p, m])), *args, **kwargs)\n\n if ax is None:\n ax.set_title('Nonparametric linear FRF')\n if sca == 1:\n xstr = '(Hz)'\n else:\n xstr = '(rad/s)'\n ax.set_xlabel('Frequency ' + xstr)\n ax.set_title(r'$G_{{{}{}}}$'.format(p, m))\n\n # For linear scale: 'Amplitude (m/N)'\n ax.set_ylabel('Amplitude (dB)')\n return fig, ax",
"def show_spectrum(image: np.ndarray):\n assert len(image.shape) == 2, 'image must be 2D'\n spectral = np.fft.fft2(image)\n spectral[0, 0] = 0 # Kill DC component\n spectrum = np.fft.fftshift(spectral) # Shift DC to center\n magnitude = np.log(np.abs(spectrum))\n plt.imshow(magnitude, cmap='gray')\n plt.show()",
"def powerSpectrum(input, nfft):\n freq = fft(input, nfft)\n return freq.real**2 + freq.imag**2",
"def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)"
]
| [
"0.7615863",
"0.7419003",
"0.7383069",
"0.7210782",
"0.71881646",
"0.71796924",
"0.7083896",
"0.7013927",
"0.69972104",
"0.69949406",
"0.69703716",
"0.6941302",
"0.6861467",
"0.6775772",
"0.67533875",
"0.67032903",
"0.6698539",
"0.6558969",
"0.65308404",
"0.6506726",
"0.64978397",
"0.64937395",
"0.64885324",
"0.6475332",
"0.64704764",
"0.6456125",
"0.64535713",
"0.64394265",
"0.6436807",
"0.6412766"
]
| 0.7869603 | 0 |
Check to see whether the annual cycle is dominant | def annual_cycle_dominant(tas):
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
the_period = 1./np.abs(freqs[np.argmax(mag)])
return the_period | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False",
"def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False",
"def duty_cycle(self):\n diff = np.diff(self.lc.time)\n t = np.median(diff)\n std = np.std(diff)\n mask = diff > (t + 3 * std)\n return (1 - np.sum(diff[mask]) / np.sum(diff))",
"def is_dominant(self, index_set = None, positive = True):\n return self.first_descent(index_set, not positive) is None",
"def new_year(dacycle):\n\n this_year = dacycle['time.start'].year\n prev_year = (dacycle['time.start']-dacycle['cyclelength']).year\n\n return (this_year != prev_year)",
"def test_dominance(self):\n c = array([1,0,2,5,2])\n self.assertFloatEqual(dominance(c), .34)\n d = array([5])\n self.assertEqual(dominance(d), 1)",
"def stationary(var, desc):\n print(\n f\"'{desc}' is stationary: \"\n f\"{adfuller(var)[0] < adfuller(var)[4]['1%']}.\")",
"def is_dominant_weight(self): # Or is_dominant_integral_weight?\n alphacheck = self.parent().simple_coroots()\n from sage.rings.semirings.non_negative_integer_semiring import NN\n return all(self.inner_product(alphacheck[i]) in NN\n for i in self.parent().index_set())",
"def get_dominant_cycle(tas):\n nt,nlat,nlon = tas.shape\n to_mask = MV.zeros((nlat,nlon))\n for i in range(nlat):\n for j in range(nlon):\n to_mask[i,j]=annual_cycle_dominant(tas[:,i,j])\n to_mask.setAxisList(tas.getAxisList()[1:])\n return to_mask",
"def has_convergence_delta(self) -> bool:\n return False",
"def test_for_discontinuity(a_n,b_n,c_n,d_n,x_n,x_n_plus_1,y_n_plus_1):\n\ty_n_final = a_n + b_n*(x_n_plus_1-x_n) + c_n*(x_n_plus_1-x_n)**2 + d_n*(x_n_plus_1-x_n)**3\n\tresult = abs(y_n_final-y_n_plus_1)<0.001\n\treturn(result)",
"def get_convergence_episode(self):\n values = self.stats['return_stats']['episode_totals']\n _, y, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n # The convergence is established as the first time the average return\n # is above the lower bounds of the final return.\n first_episode = max(np.argmax(y >= y_lower[-1]), 1)\n return first_episode",
"def test_yearly_resolution_perfect_model(monthly_initialized, monthly_obs):\n yearly_pm = monthly_initialized.resample(init=\"YS\").mean()\n yearly_obs = monthly_obs.resample(time=\"YS\").mean()\n yearly_pm.lead.attrs[\"units\"] = \"years\"\n assert compute_perfect_model(yearly_pm, yearly_obs).all()",
"def is_artificial(self):\n\t\treturn 0",
"def discrete_time(self):\n return bool(self._ll_tree_sequence.get_discrete_time())",
"def isLeapYear(self):\n if self.year % 400 == 0: return True\n elif self.year % 100 == 0: return False\n elif self.year % 4 == 0: return True\n return False",
"def is_bissextile(today):\n if (today.year % 4 == 0 and today.year % 100 != 0) or (today.year % 400 == 0):\n return True\n return False",
"def checkdeplaid(incidence):\n if incidence >= 95 and incidence <= 180:\n return 'night'\n elif incidence >=90 and incidence < 95:\n return 'night'\n elif incidence >= 85 and incidence < 90:\n return 'day'\n elif incidence >= 0 and incidence < 85:\n return 'day'\n else:\n return False",
"def _in_epsilon_cycle_(self, fsm=None):\n return self in self._epsilon_successors_(fsm)",
"def is_leap_year():",
"def test_centenary_positive():\n assert is_leap_year(2400) is True",
"def yearlyDepreciation():\n return .10",
"def is_stationary(self) -> bool:\n ad_fuller_result = adfuller(self.y.dropna(), autolag='AIC')\n p_value = ad_fuller_result[1]\n return p_value <= 0.5",
"def _is_dominated(teamA, teamB, novelty): \r\n if (teamB.fitness_ >= teamA.fitness_ and teamB.diversity_[novelty] >= teamA.diversity_[novelty] and \r\n ((teamB.fitness_ > teamA.fitness_ and not is_nearly_equal_to(teamA.fitness_, teamB.fitness_)) or\r\n (teamB.diversity_[novelty] > teamA.diversity_[novelty] and \r\n not is_nearly_equal_to(teamA.diversity_[novelty], teamB.diversity_[novelty])))):\r\n return True\r\n return False",
"def leapyear(year):\n\n # Return the answer\n return bool(calendar.isleap(year))",
"def _departure_on_duty(self) -> bool:\n return self._get_departure_shift().is_on_duty()",
"def discrete_observations(self) -> bool:\r\n return self.observations.discrete",
"def test_20th_century(self):\r\n season = \"1989-90\"\r\n res = get_end_year(season)\r\n assert res == 1990",
"def free_cookie_is_winner(person):\n now = timezone.now()\n discount_period = free_cookie_discount()\n\n if now < discount_period.begin or (discount_period.end and now > discount_period.end):\n # Discount period is not running at this moment.\n return False\n\n main_studies = Study.objects.filter(primary_study=True)\n bsc_main_studies = main_studies.filter(type='BSc')\n msc_main_studies = main_studies.filter(type='MSc')\n date = datetime.date(current_academic_year_strict() - 2, 9, 1)\n\n primary_study_member = Student.objects.filter(person=person, studyperiod__study__in=main_studies).exists()\n\n if not primary_study_member:\n return False\n\n older_years = Student.objects.filter(\n Q(studyperiod__study__in=bsc_main_studies, studyperiod__begin__lte=date) | Q(\n studyperiod__study__in=msc_main_studies\n ), person=person\n ).exists()\n\n if older_years:\n chance = settings.COOKIE_CORNER_FREE_COOKIE_DISCOUNT_RATE_HIGH\n else:\n chance = settings.COOKIE_CORNER_FREE_COOKIE_DISCOUNT_RATE_LOW\n\n if random.random() > chance:\n # Did not win\n return False\n\n already_used = discount_period.discount_set.all().aggregate(Sum('amount'))['amount__sum'] or 0\n if already_used >= settings.COOKIE_CORNER_FREE_COOKIE_DISCOUNT_LIMIT:\n # Limit has been reached\n return False\n\n # You won!\n return True",
"def compute_confidence_interval(self) -> bool:\n return False"
]
| [
"0.62297946",
"0.62297946",
"0.5960085",
"0.5867683",
"0.57064927",
"0.5630493",
"0.5588978",
"0.5522075",
"0.55003005",
"0.5482426",
"0.54632854",
"0.53641206",
"0.53202057",
"0.529571",
"0.52799577",
"0.52640945",
"0.52534276",
"0.52385813",
"0.52203447",
"0.5218158",
"0.52179176",
"0.5205857",
"0.51999307",
"0.51921713",
"0.51806515",
"0.5169679",
"0.5161445",
"0.51564276",
"0.51480657",
"0.5147904"
]
| 0.7054014 | 0 |
Return the tangent of the phase associated with Fourier mode | def get_tan_phase(tas,period=12):
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
phase = Im/R
return phase[i] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tangent(self, p):\n p = array(p, float)\n v = (p - self.o)\n v /= norm(v)\n b = self.o + ((cross(v, self.N) - v) / 3)*self.r\n mb = _mirror_(self.o, p, b) \n mbb = mb - b\n return mbb/norm(mbb)",
"def position_to_Fourier(self):\n #TODO Try to do it with FFT \n U = self.alphas @ self.positions\n \n return U",
"def phase(self):\n return -self.attrs['RFphase']*2*np.pi",
"def estimate_phase(img_ft, sim_frq, dxy):\n ny, nx = img_ft.shape\n fx = tools.get_fft_frqs(nx, dxy)\n fy = tools.get_fft_frqs(ny, dxy)\n\n phase = np.mod(np.angle(tools.get_peak_value(img_ft, fx, fy, sim_frq, 2)), 2*np.pi)\n\n return phase",
"def phase(self):\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))",
"def tan(self):\n\t\t# Ensure that no values in self.val are of the form (pi/2 + k*pi) \n\t\tvalues = map(lambda x: ((x / np.pi) - 0.5) % 1 == 0.0, self.val)\n\t\tif any(values):\n\t\t\traise ValueError(\"Tangent not valid at pi/2, -pi/2.\")\n\t\tval = np.tan(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.power(1 / np.cos(self.val), 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = np.multiply(to_multiply, self.der)\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)",
"def phaseangle(complexr):\n return numpy.arctan2(complexr.imag,complexr.real)",
"def det_to_tanp(self, x, y):\n crpix1, crpix2 = self._wcs.wcs.crpix - 1\n x, y = self._wcs.pix2foc(x, y, 0)\n x -= crpix1\n y -= crpix2\n return x, y",
"def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase",
"def det_to_tanp(self, x, y):\n return x, y",
"def calc_phase(p, t):\n\n return (t % p)/p",
"def tand(A):\n Arad = np.deg2rad(A)\n x = np.tan(Arad) \n return x",
"def phireturn(xhat0, tof):\n\t\n\t\tstoptime = tof\n\t\tnumpoints = 2\n\t\t#Integration time array:\n\t\tt = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]\n\t\t\n\t\txsol = twomode.intfull(xhat0, t, abserror=1.0e-14, relerror=1.0e-12)\n\t\t#Phase of the first mode is the slice phase\n\t\tphi = np.angle(xsol[1,0] + 1j*xsol[1,1]) \t\n\t\t\n\t\treturn -phi",
"def ft(x):\n y = np.fft.rfft(x)\n\n phi = 2 * np.pi * np.random.random(len(y))\n\n phi[0] = 0.0\n if len(x) % 2 == 0:\n phi[-1] = 0.0\n\n y = y * np.exp(1j * phi)\n return np.fft.irfft(y, n=len(x))",
"def phase_swap_operator(self, x1, x2):\r\n return np.fft.ifft(np.abs(np.fft.fft(x1))*np.angle(np.fft.fft(x2)))",
"def tanh(self):\r\n getcontext().prec += 2\r\n re2 = 2 * self._real\r\n im2 = 2 * self._imag\r\n den = cosh(re2) + cos(im2)\r\n ans = self.__class__(sinh(re2) / den, sin(im2) / den)\r\n getcontext().prec -= 2\r\n return +ans",
"def chord_and_tangent(F, P):\n # check the input\n R = F.parent()\n if not is_MPolynomialRing(R):\n raise TypeError('equation must be a polynomial')\n if R.ngens() != 3:\n raise TypeError('%s is not a polynomial in three variables' % F)\n if not F.is_homogeneous():\n raise TypeError('%s is not a homogeneous polynomial' % F)\n x, y, z = R.gens()\n if len(P) != 3:\n raise TypeError('%s is not a projective point' % P)\n K = R.base_ring()\n try:\n P = [K(c) for c in P]\n except TypeError:\n raise TypeError('cannot coerce %s into %s' % (P, K))\n if F(P) != 0:\n raise ValueError('%s is not a point on %s' % (P, F))\n\n # find the tangent to F in P\n dx = K(F.derivative(x)(P))\n dy = K(F.derivative(y)(P))\n dz = K(F.derivative(z)(P))\n # if dF/dy(P) = 0, change variables so that dF/dy != 0\n if dy == 0:\n if dx != 0:\n g = F.substitute({x: y, y: x})\n Q = [P[1], P[0], P[2]]\n R = chord_and_tangent(g, Q)\n return [R[1], R[0], R[2]]\n elif dz != 0:\n g = F.substitute({y: z, z: y})\n Q = [P[0], P[2], P[1]]\n R = chord_and_tangent(g, Q)\n return [R[0], R[2], R[1]]\n else:\n raise ValueError('%s is singular at %s' % (F, P))\n\n # t will be our choice of parmeter of the tangent plane\n # dx*(x-P[0]) + dy*(y-P[1]) + dz*(z-P[2])\n # through the point P\n t = rings.PolynomialRing(K, 't').gen(0)\n Ft = F(dy*t+P[0], -dx*t+P[1], P[2])\n if Ft == 0: # (dy, -dx, 0) is projectively equivalent to P\n # then (0, -dz, dy) is not projectively equivalent to P\n g = F.substitute({x: z, z: x})\n Q = [P[2], P[1], P[0]]\n R = chord_and_tangent(g, Q)\n return [R[2], R[1], R[0]]\n # Ft has a double zero at t=0 by construction, which we now remove\n Ft = Ft // t**2\n\n # first case: the third point is at t=infinity\n if Ft.is_constant():\n return projective_point([dy, -dx, K(0)])\n # second case: the third point is at finite t\n else:\n assert Ft.degree() == 1\n t0 = Ft.roots()[0][0]\n return projective_point([dy*t0+P[0], -dx*t0+P[1], P[2]])",
"def phase_lifetime(r, freq=1):\n return np.tan(np.angle(r)) / (2 * np.pi * freq)",
"def T_fourier(shape, T, is_fft_shifted = True):\n # make i, j, k for each pixel\n i = np.fft.fftfreq(shape[0]) \n j = np.fft.fftfreq(shape[1])\n k = np.fft.fftfreq(shape[2])\n i, j, k = np.meshgrid(i, j, k, indexing='ij')\n\n if is_fft_shifted is False :\n i = np.fft.ifftshift(i)\n j = np.fft.ifftshift(j)\n k = np.fft.ifftshift(k)\n\n phase_ramp = np.exp(- 2J * np.pi * (i * T[0] + j * T[1] + k * T[2]))\n return phase_ramp",
"def rhs(y,t):\n return math.cos(t)",
"def tand(x):\n if isinstance(x, numpy.ndarray):\n return numpy.tan(math.pi * x / 180.0)\n return math.cos(math.radians(x))",
"def tanh(x):\n return (1 - e ** (-2*x))/ (1 + e ** (-2*x))",
"def _tand(v):\n return math.tan(math.radians(v))",
"def phase_dist(phi1,phi2=None):\n shape = phi1.shape\n \n if phi2 is None:\n dist = np.abs(phi1).ravel()\n else:\n dist = np.abs(phi1-phi2).ravel()\n dist[dist>np.pi] = np.pi - dist[dist>np.pi]%np.pi\n return dist.reshape(shape)",
"def prolatesize(f, phase):\n # 2012-05-04 11:19 IJMC: Created.\n\n return np.sqrt(np.cos(phase)**2 + f**2 * np.sin(phase)**2)",
"def _coherency_phase_delay(f, fxy):\r\n\r\n return np.angle(fxy) / (2 * np.pi * f)",
"def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])",
"def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])",
"def calc_torsion_phi(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n paC = prev_res.get_atom('C')\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n return AtomMath.calc_torsion_angle(paC, aN, aCA, aC)",
"def dphi(x):\n return -kf(\n flapping_wing_frequency, flapping_angular_velocity_amplitude,\n flapping_acceleration_time_fraction, flapping_delay_time_fraction,\n x)"
]
| [
"0.6427801",
"0.63849",
"0.6319369",
"0.62911284",
"0.6175167",
"0.6103598",
"0.61004394",
"0.5953742",
"0.59293723",
"0.5875864",
"0.5871563",
"0.5863087",
"0.5850907",
"0.5847475",
"0.58400255",
"0.5815378",
"0.5795082",
"0.5776325",
"0.5728028",
"0.5683427",
"0.5675834",
"0.56730944",
"0.56611526",
"0.56602025",
"0.56564033",
"0.56363946",
"0.5628084",
"0.56034863",
"0.5594388",
"0.55917954"
]
| 0.6825005 | 0 |
Convert phase to day of the year | def phase_to_day(phase):
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))",
"def day_of_year(self):\n return int(self.date.strftime('%j'))",
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_longitude(tee) / 360))",
"def day_of_year(date=datetime.datetime.now()):\n return date.strftime(\"Its the %j day of %Y'th year.\")",
"def _four_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return str(year)",
"def day_of_year(time):\n if isinstance(time, datetime) or isinstance(time, date):\n return int(time.strftime(\"%j\"))\n elif isinstance(time, np.datetime64):\n doy = time.astype(\"datetime64[D]\")\n doy = doy - doy.astype(\"datetime64[Y]\") + np.timedelta64(1, \"D\")\n return doy.astype(\"i\")\n elif isinstance(time, str):\n return day_of_year(np.datetime64(time))\n elif np.size(time) > 0:\n if isinstance(time, list) or isinstance(time, tuple):\n return np.array([day_of_year(t) for t in time])\n else:\n doy = np.atleast_1d(time)\n # Truncate on daily resolution\n doy = doy.astype(\"datetime64[D]\")\n doy = doy - doy.astype(\"datetime64[Y]\") + np.timedelta64(1, \"D\")\n return doy.astype(\"i\")",
"def Dooms_day(year):\r\n day = (year % 100 + (year % 100)//4 + Anchor_day(year)) % 7\r\n return day",
"def Anchor_day(year):\r\n day = (5 * ((year // 100) % 4) + 2) % 7\r\n return day",
"def dia_independencia(year):\n return year, SEP, 16",
"def year(self) -> int:\n if self.is_old_style:\n yy = int(self.split('/', 1)[1][0:2])\n else:\n yy = int(self[:2])\n if yy > 90:\n return 1900 + yy\n return 2000 + yy",
"def _calculate_date(day_of_year):\n date = datetime.datetime.strptime(str(day_of_year), '%j')\n return date.strftime('%d-%b')",
"def _two_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return \"'%02d\" % (year % 100)",
"def day_of_year(self):\n if self._day_of_year is None:\n cumul_days_in_month_nonleap = tf.math.cumsum(\n _DAYS_IN_MONTHS_NON_LEAP, exclusive=True)\n cumul_days_in_month_leap = tf.math.cumsum(\n _DAYS_IN_MONTHS_LEAP, exclusive=True)\n days_before_month_non_leap = tf.gather(cumul_days_in_month_nonleap,\n self.month() - 1)\n days_before_month_leap = tf.gather(cumul_days_in_month_leap,\n self.month() - 1)\n days_before_month = tf.where(\n date_utils.is_leap_year(self.year()), days_before_month_leap,\n days_before_month_non_leap)\n self._day_of_year = days_before_month + self.day()\n return self._day_of_year",
"def get_year(self):\n\n # First we get the first 8 bits stored in the yqr register\n year_bcd = self.__read_register(_REGISTER_YEAR)\n\n # Then we extract the digits and the tens\n tens = (year_bcd & 0xF0) >> 4 # 0xF0 = 0b11110000\n digit = (year_bcd & 0x0F) # 0x0F = 0b00001111\n\n # We return year value shifted in range [1970..2129]\n return (10 * (tens) + digit) + 1970",
"def sectoFracYear(stime):\n\n ltime = convertCtimeToYdate(stime)\n atemp = re.split(':', ltime)\n year= int(atemp[0])\n ydate = int(atemp[1])\n hours = int(atemp[2])\n minutes = int(atemp[3])\n seconds = int(atemp[4])\n \n chk = 4.0 * int(0.25 * year)\n if chk == year:\n base = 366\n else:\n base = 365\n \n day = ydate + hours / 24.0 + minutes / 1440.0 + seconds / 86400.0\n \n return year + day / base",
"def make_year(res):\n return str(res['issued']['date-parts'][0][0])",
"def _ord2ymd(n):\n n -= 1\n n400, n = divmod(n, _DI400Y)\n year = n400 * 400 + 1 # ..., -399, 1, 401, ...\n n100, n = divmod(n, _DI100Y)\n n4, n = divmod(n, _DI4Y)\n n1, n = divmod(n, 365)\n year += n100 * 100 + n4 * 4 + n1\n if n1 == 4 or n100 == 4:\n assert n == 0\n return year - 1, 12, 31\n leapyear = n1 == 3 and (n4 != 24 or n100 == 3)\n assert leapyear == _is_leap(year)\n month = (n + 50) >> 5\n preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)\n if preceding > n: # estimate is too large\n month -= 1\n preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)\n n -= preceding\n assert 0 <= n < _days_in_month(year, month)\n return year, month, n + 1",
"def dayOfYear(self):\n d = int(self._d + (_tzoffset(self._tz, self._t) / 86400.0))\n return int((d + jd1901) - _julianday(self._year, 1, 0))",
"def days_to_years(datum):\n return datum/DAYS_PER_YEAR",
"def day_of_year(time_string):\n SECONDS_IN_DAY = 60 * 60 * 24.0\n time = parse_time(time_string)\n time_diff = time - datetime(time.year, 1, 1, 0, 0, 0)\n return time_diff.days + time_diff.seconds / SECONDS_IN_DAY + 1",
"def jovian_year(cls, date):\n return amod(quotient(cls.hindu_day_count(date), cls.ARYA_JOVIAN_PERIOD / 12) + 27, 60)",
"def yearMonthDay() :\n timeDateValue = time.asctime(time.gmtime()).lower().split()\n if int(timeDateValue[2]) < 10 : timeDateValue[2] = str('0'+str(timeDateValue[2]))\n return '%s%s%s' % (timeDateValue[4],timeDateValue[1],timeDateValue[2])",
"def yy(self):\n return str(self._year)[-2:]",
"def datetime_to_decimal_year(time):\n if not isinstance(time, datetime):\n raise TypeError(\"The input must be a datetime object.\")\n\n year_start = datetime(year=time.year, month=1, day=1)\n next_year_start = datetime(year=time.year+1, month=1, day=1)\n\n year_elapsed = (time - year_start).total_seconds()\n year_total = (next_year_start - year_start).total_seconds()\n\n return time.year + year_elapsed / year_total",
"def normalise_two_digit_year(y):\r\n if y[0] == \"'\":\r\n y = y[1:]\r\n if int(y) < 39:\r\n return '%04d' % (int(y) + 2000)\r\n elif int(y) < 100:\r\n return '%04d' % (int(y) + 1900)\r\n else:\r\n return '%04d' % int(y[:4])",
"def decade(year):\r\n # get the first 3 digits of the year\r\n partial = (year[0]//10).item()\r\n # add a 0 to the end, return as decade\r\n return partial * 10",
"def date_year(date):\n return date.year",
"def get_year(parameters_dictionary):\n if \"start-year\" in parameters_dictionary.keys():\n year = int(parameters_dictionary[\"start-year\"])\n return str(year) + str(year + 1)\n elif \"end-year\" in parameters_dictionary.keys():\n year = int(parameters_dictionary[\"end-year\"])\n return str(year - 1) + str(year)\n else:\n return str(THIS_YEAR - 1) + str(THIS_YEAR)",
"def dia_revolucion(year):\n return nth_day_of_month(3, MON, NOV, year)",
"def get_year(date):\n return date.strftime('%Y')"
]
| [
"0.66281295",
"0.656976",
"0.65153605",
"0.63975745",
"0.6394729",
"0.63570565",
"0.62479067",
"0.62440753",
"0.62306625",
"0.6158912",
"0.61399734",
"0.61389637",
"0.613164",
"0.6119462",
"0.61124986",
"0.6075899",
"0.60695523",
"0.60540843",
"0.60523653",
"0.60377324",
"0.5994341",
"0.5987959",
"0.5981708",
"0.59585166",
"0.5932668",
"0.59135",
"0.5906045",
"0.58921593",
"0.5882068",
"0.5853771"
]
| 0.816147 | 1 |
Convert phase to day of the year | def phase_to_day(phase):
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))",
"def day_of_year(self):\n return int(self.date.strftime('%j'))",
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_longitude(tee) / 360))",
"def day_of_year(date=datetime.datetime.now()):\n return date.strftime(\"Its the %j day of %Y'th year.\")",
"def _four_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return str(year)",
"def day_of_year(time):\n if isinstance(time, datetime) or isinstance(time, date):\n return int(time.strftime(\"%j\"))\n elif isinstance(time, np.datetime64):\n doy = time.astype(\"datetime64[D]\")\n doy = doy - doy.astype(\"datetime64[Y]\") + np.timedelta64(1, \"D\")\n return doy.astype(\"i\")\n elif isinstance(time, str):\n return day_of_year(np.datetime64(time))\n elif np.size(time) > 0:\n if isinstance(time, list) or isinstance(time, tuple):\n return np.array([day_of_year(t) for t in time])\n else:\n doy = np.atleast_1d(time)\n # Truncate on daily resolution\n doy = doy.astype(\"datetime64[D]\")\n doy = doy - doy.astype(\"datetime64[Y]\") + np.timedelta64(1, \"D\")\n return doy.astype(\"i\")",
"def Dooms_day(year):\r\n day = (year % 100 + (year % 100)//4 + Anchor_day(year)) % 7\r\n return day",
"def Anchor_day(year):\r\n day = (5 * ((year // 100) % 4) + 2) % 7\r\n return day",
"def dia_independencia(year):\n return year, SEP, 16",
"def year(self) -> int:\n if self.is_old_style:\n yy = int(self.split('/', 1)[1][0:2])\n else:\n yy = int(self[:2])\n if yy > 90:\n return 1900 + yy\n return 2000 + yy",
"def _calculate_date(day_of_year):\n date = datetime.datetime.strptime(str(day_of_year), '%j')\n return date.strftime('%d-%b')",
"def _two_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return \"'%02d\" % (year % 100)",
"def day_of_year(self):\n if self._day_of_year is None:\n cumul_days_in_month_nonleap = tf.math.cumsum(\n _DAYS_IN_MONTHS_NON_LEAP, exclusive=True)\n cumul_days_in_month_leap = tf.math.cumsum(\n _DAYS_IN_MONTHS_LEAP, exclusive=True)\n days_before_month_non_leap = tf.gather(cumul_days_in_month_nonleap,\n self.month() - 1)\n days_before_month_leap = tf.gather(cumul_days_in_month_leap,\n self.month() - 1)\n days_before_month = tf.where(\n date_utils.is_leap_year(self.year()), days_before_month_leap,\n days_before_month_non_leap)\n self._day_of_year = days_before_month + self.day()\n return self._day_of_year",
"def get_year(self):\n\n # First we get the first 8 bits stored in the yqr register\n year_bcd = self.__read_register(_REGISTER_YEAR)\n\n # Then we extract the digits and the tens\n tens = (year_bcd & 0xF0) >> 4 # 0xF0 = 0b11110000\n digit = (year_bcd & 0x0F) # 0x0F = 0b00001111\n\n # We return year value shifted in range [1970..2129]\n return (10 * (tens) + digit) + 1970",
"def sectoFracYear(stime):\n\n ltime = convertCtimeToYdate(stime)\n atemp = re.split(':', ltime)\n year= int(atemp[0])\n ydate = int(atemp[1])\n hours = int(atemp[2])\n minutes = int(atemp[3])\n seconds = int(atemp[4])\n \n chk = 4.0 * int(0.25 * year)\n if chk == year:\n base = 366\n else:\n base = 365\n \n day = ydate + hours / 24.0 + minutes / 1440.0 + seconds / 86400.0\n \n return year + day / base",
"def make_year(res):\n return str(res['issued']['date-parts'][0][0])",
"def _ord2ymd(n):\n n -= 1\n n400, n = divmod(n, _DI400Y)\n year = n400 * 400 + 1 # ..., -399, 1, 401, ...\n n100, n = divmod(n, _DI100Y)\n n4, n = divmod(n, _DI4Y)\n n1, n = divmod(n, 365)\n year += n100 * 100 + n4 * 4 + n1\n if n1 == 4 or n100 == 4:\n assert n == 0\n return year - 1, 12, 31\n leapyear = n1 == 3 and (n4 != 24 or n100 == 3)\n assert leapyear == _is_leap(year)\n month = (n + 50) >> 5\n preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)\n if preceding > n: # estimate is too large\n month -= 1\n preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)\n n -= preceding\n assert 0 <= n < _days_in_month(year, month)\n return year, month, n + 1",
"def dayOfYear(self):\n d = int(self._d + (_tzoffset(self._tz, self._t) / 86400.0))\n return int((d + jd1901) - _julianday(self._year, 1, 0))",
"def days_to_years(datum):\n return datum/DAYS_PER_YEAR",
"def day_of_year(time_string):\n SECONDS_IN_DAY = 60 * 60 * 24.0\n time = parse_time(time_string)\n time_diff = time - datetime(time.year, 1, 1, 0, 0, 0)\n return time_diff.days + time_diff.seconds / SECONDS_IN_DAY + 1",
"def jovian_year(cls, date):\n return amod(quotient(cls.hindu_day_count(date), cls.ARYA_JOVIAN_PERIOD / 12) + 27, 60)",
"def yearMonthDay() :\n timeDateValue = time.asctime(time.gmtime()).lower().split()\n if int(timeDateValue[2]) < 10 : timeDateValue[2] = str('0'+str(timeDateValue[2]))\n return '%s%s%s' % (timeDateValue[4],timeDateValue[1],timeDateValue[2])",
"def yy(self):\n return str(self._year)[-2:]",
"def datetime_to_decimal_year(time):\n if not isinstance(time, datetime):\n raise TypeError(\"The input must be a datetime object.\")\n\n year_start = datetime(year=time.year, month=1, day=1)\n next_year_start = datetime(year=time.year+1, month=1, day=1)\n\n year_elapsed = (time - year_start).total_seconds()\n year_total = (next_year_start - year_start).total_seconds()\n\n return time.year + year_elapsed / year_total",
"def normalise_two_digit_year(y):\r\n if y[0] == \"'\":\r\n y = y[1:]\r\n if int(y) < 39:\r\n return '%04d' % (int(y) + 2000)\r\n elif int(y) < 100:\r\n return '%04d' % (int(y) + 1900)\r\n else:\r\n return '%04d' % int(y[:4])",
"def decade(year):\r\n # get the first 3 digits of the year\r\n partial = (year[0]//10).item()\r\n # add a 0 to the end, return as decade\r\n return partial * 10",
"def date_year(date):\n return date.year",
"def get_year(parameters_dictionary):\n if \"start-year\" in parameters_dictionary.keys():\n year = int(parameters_dictionary[\"start-year\"])\n return str(year) + str(year + 1)\n elif \"end-year\" in parameters_dictionary.keys():\n year = int(parameters_dictionary[\"end-year\"])\n return str(year - 1) + str(year)\n else:\n return str(THIS_YEAR - 1) + str(THIS_YEAR)",
"def dia_revolucion(year):\n return nth_day_of_month(3, MON, NOV, year)",
"def get_year(date):\n return date.strftime('%Y')"
]
| [
"0.66281295",
"0.656976",
"0.65153605",
"0.63975745",
"0.6394729",
"0.63570565",
"0.62479067",
"0.62440753",
"0.62306625",
"0.6158912",
"0.61399734",
"0.61389637",
"0.613164",
"0.6119462",
"0.61124986",
"0.6075899",
"0.60695523",
"0.60540843",
"0.60523653",
"0.60377324",
"0.5994341",
"0.5987959",
"0.5981708",
"0.59585166",
"0.5932668",
"0.59135",
"0.5906045",
"0.58921593",
"0.5882068",
"0.5853771"
]
| 0.816147 | 0 |
Given a list of observers of the same type, aggregates and returns their results. | def aggregate_results(observers):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aggregate(observables, aggregator, window=1, name=None):\r\n return MultiFold(\r\n observables,\r\n UpdatableLookback(aggregator, window, name))",
"def aggregate(objs):\n summed = OrderedDict()\n for obj in objs:\n pps = [obj] if isinstance(obj, types.PlayPlayer) else obj.play_players\n for pp in pps:\n if pp.player_id not in summed:\n summed[pp.player_id] = pp._copy()\n else:\n summed[pp.player_id]._add(pp)\n return summed.values()",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def aggregate(self, xs: List[Tensor]):\n if self.aggr == \"concat\":\n return torch.cat(xs, dim=-1)\n\n x = torch.stack(xs, dim=-1)\n if self.aggr == \"add\":\n return x.sum(dim=-1)\n elif self.aggr == \"mean\":\n return x.mean(dim=-1)\n elif self.aggr == \"max\":\n return x.max(dim=-1)[0]\n elif self.aggr == \"mul\":\n return x.prod(dim=-1)[0]",
"def aggregate(predictions, aggfunc):\n return [aggfunc(sublist) for sublist in np.transpose(predictions)]",
"def merge_all(self):\n sources = self\n\n def subscribe(observer):\n group = CompositeDisposable()\n is_stopped = False\n m = SingleAssignmentDisposable()\n group.add(m)\n \n def on_next(inner_source):\n inner_subscription = SingleAssignmentDisposable()\n group.add(inner_subscription)\n\n def on_next(x):\n observer.on_next(x)\n \n def on_completed():\n group.remove(inner_subscription)\n if is_stopped and group.length == 1:\n observer.on_completed()\n \n inner_subscription.disposable = inner_source.subscribe(on_next, observer.on_error, on_completed)\n \n def on_completed():\n is_stopped = True\n if len(group) == 1:\n observer.on_completed()\n\n m.disposable = sources.subscribe(on_next, observer.on_error, on_completed)\n return group\n\n return AnonymousObservable(subscribe)",
"def add_observers(simenv, period = 0.05):\n\n data_collector = cSimulationResultsContainer()\n\n new_obs = []\n for thr_i in simenv.threads:\n if hasattr(thr_i, \"snoozed\") and thr_i.__class__.__name__ != 'cBox':\n new_obs += [cSnoozeObserver(thr_i, data_collector, period)]\n if hasattr(thr_i, \"storage\"):\n new_obs += [cCargoObserver(thr_i, data_collector, period)]\n\n simenv.start_threads(new_obs)\n\n return data_collector",
"def combine_latest(cls, *args):\n \n if args and isinstance(args[0], list):\n args = args[0]\n else:\n args = list(args)\n \n result_selector = args.pop()\n \n def subscribe(observer):\n n = len(args)\n has_value = [False] * n\n has_value_all = False\n is_done = [False] * n\n values = [None] * n\n\n def next(i):\n nonlocal has_value_all\n has_value[i] = True\n \n if has_value_all or all(has_value):\n try:\n print(\"selector: \", values)\n res = result_selector(*values)\n except Exception as ex:\n observer.on_error(ex)\n return\n \n observer.on_next(res)\n elif all([x for j, x in enumerate(is_done) if j != i]):\n observer.on_completed()\n\n has_value_all = all(has_value)\n\n def done(i):\n print(\"done: \", i)\n is_done[i] = True\n if all(is_done):\n observer.on_completed()\n \n subscriptions = [None] * n\n def func(i):\n subscriptions[i] = SingleAssignmentDisposable()\n \n def on_next(x):\n print(\"on_next: \", x)\n values[i] = x\n next(i)\n \n def on_completed():\n print(\"on_completed\")\n done(i)\n \n subscriptions[i].disposable = args[i].subscribe(on_next, observer.on_error, on_completed)\n\n for idx in range(n):\n func(idx)\n return CompositeDisposable(subscriptions)\n return AnonymousObservable(subscribe)",
"def merge_measurements(measurements_list: List[Measurements]) -> \\\n Tuple[Measurements, List[MetricName]]:\n summed_metrics: Measurements = {}\n\n all_metrics_names = set() # Sum of set of names.\n for measurements in measurements_list:\n all_metrics_names.update(measurements.keys())\n\n for metric_name in all_metrics_names:\n if metric_name in METRICS_METADATA:\n\n if METRICS_METADATA[metric_name].type == MetricType.GAUGE:\n operation = lambda values: sum(values) / len(values) # noqa\n else:\n assert METRICS_METADATA[metric_name].type == MetricType.COUNTER\n operation = sum\n\n else:\n log.debug('By default, unknown metric %r uses \"sum\" as merge operation.', metric_name)\n operation = sum\n\n summed_metrics[metric_name] = operation(\n [measurements[metric_name] for measurements in measurements_list\n if metric_name in measurements])\n\n return summed_metrics",
"def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics",
"def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate",
"def aggregate_msgs(self, connected_msgs_list):\n msg_num = len(connected_msgs_list)\n agg_msg = connected_msgs_list[0]\n for i in range(1, msg_num):\n agg_msg += connected_msgs_list[i]\n\n if self.msg_aggrgt == 'AVG':\n return agg_msg / msg_num\n elif self.msg_aggrgt == 'SUM':\n return agg_msg",
"def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics",
"def aggregateAll(exprs, typography):\n aggr = aggregateBySubject(aggregateByPredicate(exprs))\n return wrapStatement(typography, aggr)",
"def aggregate(self):\n data_to_track = {}\n for possession in self.possessions_to_track_aggregate:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_aggregate:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"aggregate\",\n data_to_track,\n self.group,\n self.round])",
"def aggregate_adata(file_list: list) -> AnnData:\n\n import anndata\n from anndata import AnnData\n\n if type(file_list[0]) == anndata._core.anndata.AnnData:\n adata_list = file_list\n elif type(file_list[0]) == str:\n adata_list = [anndata.read(i) for i in file_list]\n\n valid_cells = reduce(lambda a, b: a.intersection(b), [i.obs_names for i in adata_list])\n valid_genes = reduce(lambda a, b: a.intersection(b), [i.var_names for i in adata_list])\n\n if len(valid_cells) == 0 or len(valid_genes) == 0:\n raise Exception(\n f\"we don't find any gene or cell names shared across different adata objects.\" f\"Please check your data. \"\n )\n\n layer_dict = {}\n for i in adata_list[0].layers.keys():\n layer_dict[i] = reduce(\n lambda a, b: a + b,\n [adata[valid_cells, valid_genes].layers[i] for adata in adata_list],\n )\n\n agg_adata = anndata.AnnData(\n X=reduce(\n lambda a, b: a + b,\n [adata[valid_cells, valid_genes].X for adata in adata_list],\n ),\n obs=adata_list[0][valid_cells, valid_genes].obs,\n var=adata_list[0][valid_cells, valid_genes].var,\n layers=layer_dict,\n )\n\n return agg_adata",
"def test_metric_tracker_and_collection_multioutput(input_to_tracker, assert_type):\n tracker = MetricTracker(input_to_tracker)\n for _ in range(5):\n tracker.increment()\n for _ in range(5):\n preds, target = torch.randn(100, 2), torch.randn(100, 2)\n tracker.update(preds, target)\n all_res = tracker.compute_all()\n assert isinstance(all_res, assert_type)\n best_metric, which_epoch = tracker.best_metric(return_step=True)\n if isinstance(best_metric, dict):\n for v in best_metric.values():\n assert v is None\n for v in which_epoch.values():\n assert v is None\n else:\n assert best_metric is None\n assert which_epoch is None",
"def aggregate(self, **aggregations):\n # Before we iterate, reset the aggregations\n for _, agg in aggregations.items():\n agg.reset()\n # Do the accumulation\n for attrs in self:\n for _, agg in aggregations.items():\n agg.accumulate(attrs)\n # Return the results\n return {name: agg.result for name, agg in aggregations.items()}",
"def get_agg(self, x, ids):\n \n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_agg = torch.mean(x[ids == i], 0).repeat(sample_size, 1)\n \n # concatenate each group of aggregated data\n if i == 0:\n agg = sample_agg \n else:\n agg = torch.cat((agg, sample_agg), dim=0)\n \n return agg",
"def add_observers(self, count, date_observed):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n data = {\n 'count': count,\n 'dateObserved': self.util.any_to_datetime(date_observed).strftime('%Y-%m-%dT%H:%M:%SZ'),\n }\n\n return self.tc_requests.add_observations(\n self.api_type, self.api_branch, self.unique_id, data, owner=self.owner\n )",
"def test_top_observers(self):\n self._create_stars()\n self._create_observations()\n expected = [\n {\n \"observer_id\": self.observer.id,\n \"observer__user__username\": self.observer.user.username,\n \"observer__aavso_code\": self.observer.aavso_code,\n \"observations_count\": 15,\n },\n {\n \"observer_id\": self.observer2.id,\n \"observer__user__username\": self.observer2.user.username,\n \"observer__aavso_code\": self.observer2.aavso_code,\n \"observations_count\": 3,\n },\n ]\n top_observers = list(Observation.objects.top_observers())\n self.assertEqual(top_observers, expected)",
"def aggregate(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.aggregate(m.collection, *args, **kwargs)",
"def getResults(workers):\n results = []\n for worker in workers:\n results += worker.getResults()\n \n return results",
"def merge_observable(self):\n sources = self\n\n def subscribe(observer):\n m = SingleAssignmentDisposable()\n group = CompositeDisposable()\n is_stopped = False\n group.add(m)\n \n def on_next(inner_source):\n inner_subscription = SingleAssignmentDisposable()\n group.add(inner_subscription)\n\n def on_complete():\n nonlocal group\n \n group.remove(inner_subscription)\n if is_stopped and group.length == 1:\n observer.on_completed()\n \n disposable = inner_source.subscribe(\n observer.on_next,\n observer.on_error, \n on_complete)\n \n inner_subscription.disposable = disposable\n \n def on_complete():\n nonlocal is_stopped\n\n is_stopped = True\n if group.length == 1:\n observer.on_completed()\n \n m.disposable = sources.subscribe(on_next, observer.on_error, on_complete)\n return group\n \n return AnonymousObservable(subscribe)",
"def aggregated_iou_score(result_list):\n\n pred, gt, intersections, hist = result_list\n\n aggregated_intersection, aggregated_union, used_nuclei_pred, iou = 0, 0, [], []\n\n for i in hist: # start from 1 to exclude the background matches\n\n if i != 0:\n best_intersection_nucleus = np.argmax(intersections[i, 1:]) + 1\n best_intersection = intersections[i, best_intersection_nucleus]\n aggregated_intersection += best_intersection\n union = np.sum((gt == i) | (pred == best_intersection_nucleus))\n aggregated_union += np.sum((gt == i) | (pred == best_intersection_nucleus))\n used_nuclei_pred.append(best_intersection_nucleus)\n iou.append(best_intersection / union)\n\n return [aggregated_intersection, aggregated_union, used_nuclei_pred, iou]",
"def collect(results, **kwargs):\n l = kwargs.get('logger')\n l.info(\n u'#{} Collect ADD.'.format(u'-' * 8)\n )\n\n l.info(\n u'#{} {} results from {} total items.'.format(\n u'-' * 12, len(results), sum([x['items_processed'] for x in results])\n )\n )\n \n final_result = sum([x['result'] for x in results])\n\n l.info(\n u'#{} Final result: {}.'.format(\n u'-' * 12, final_result\n )\n )\n\n return final_result",
"def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass",
"def get_observers_from_reward_fns(reward_fn):\n defaults = get_default_kwargs(reward_fn)\n return [v for _, v in sorted(defaults.items()) if isinstance(v, Observer)]",
"def __update_observers(self):\n for observer in self.__observers: \n # print(\"hello\")\n observer.update(self)",
"def notify_subscribers(self, event_type, *event_args, **event_kwargs):\n subscribers = self.get_all(event_type, default=())\n if subscribers:\n subscribers = sorted(subscribers, key=Subscriber.sorter)\n event = event_type(*event_args, **event_kwargs)\n for subscriber in subscribers:\n subscriber.func(event, **subscriber.args)\n if subscriber.once:\n self.remove(event_type, subscriber)"
]
| [
"0.5721193",
"0.5561385",
"0.55577785",
"0.5462068",
"0.5439121",
"0.5361046",
"0.5298299",
"0.5273142",
"0.52542335",
"0.51171476",
"0.5113394",
"0.5094156",
"0.5029211",
"0.4950974",
"0.49225077",
"0.4893143",
"0.48639148",
"0.4856655",
"0.47869506",
"0.4766969",
"0.47594115",
"0.47449365",
"0.47322387",
"0.47316182",
"0.47172764",
"0.47011843",
"0.47004914",
"0.46934932",
"0.46761718",
"0.46759418"
]
| 0.7154875 | 0 |
Initialization of graph class with specification of the style used, default is 'ggplot' | def __init__(self, Style='ggplot'):
fig_var = ['style', 'Plots', 'title', 'xlabel', 'ylabel',
'Xmin',
'Xmax',
'Ymin', 'Ymax']
self.data = dict.fromkeys(fig_var)
self.data['style'] = Style
self.data['Plots'] = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, graph_format='png'):\n\n self.graph = Digraph(\"Dependencies\", format=graph_format, filename='dependency_graph.gv', node_attr={'color': 'lightblue2', 'style': 'filled'})\n self.graph.attr(size='8000,8000')",
"def new_graph(self, data, name=None, style='bar', color=colors.lightgreen,\n altcolor=colors.darkseagreen, linewidth=1, center=None,\n colour=None, altcolour=None, centre=None):\n #Let the UK spelling (colour) override the USA spelling (color)\n if colour is not None:\n color = colour\n if altcolour is not None:\n altcolor = altcolour\n if centre is not None:\n center = centre\n\n id = self._next_id # get id number\n graph = GraphData(id, data, name, style, color, altcolor, center)\n graph.linewidth = linewidth\n self._graphs[id] = graph # add graph data\n self._next_id += 1 # increment next id\n return graph",
"def define_plot_style(self, **kwargs):\n if not kwargs:\n options = DotDict(PLOT_DEFAULTS)\n else:\n options = DotDict(kwargs)\n if \"style\" not in options:\n options.style = PLOT_DEFAULTS[\"style\"]\n if \"manual\" not in options:\n options.manual = PLOT_DEFAULTS[\"manual\"]\n self._plot_options = options",
"def __init__(self, graph=None, prefix=\"\", node_class=ParsedNode):\n if graph is None:\n self.graph = dict()\n else:\n self.graph = graph\n self.prefix = prefix\n self.node_class = node_class",
"def __init__(self,**options):\n defaults={\"graph_name\":\"StringGraph\",\n \"node_names\":['n1','n2'],\n \"node_descriptions\":{'n1':\"A plain string\",\n 'n2':\"A list of strings with no \\\\n, created with string.splitlines()\"},\n \"current_node\":'n1',\n \"state\":[1,0],\n \"data\":\"This is a test string\\n it has to have multiple lines \\n and many characters 34%6\\n^\",\n \"edge_2_to_1\":edge_2_to_1,\n \"edge_1_to_2\":edge_1_to_2\n }\n self.options={}\n for key,value in defaults.iteritems():\n self.options[key]=value\n for key,value in options.iteritems():\n self.options[key]=value\n Graph.__init__(self,**self.options)",
"def __init__(self, plot_design):\n self.plot_design = plot_design\n\n text_color = self.plot_design.text_color\n text_font = self.plot_design.text_font\n sns.set_style(\"white\",\n {\n \"axes.edgecolor\": text_color,\n \"axes.labelcolor\": text_color,\n \"text.color\": text_color,\n \"font.sans-serif\": [text_font],\n \"xtick.color\": text_color,\n \"ytick.color\": text_color,\n }\n )",
"def mpl_switch_style(style=\"ggplot\"):\n # this import was moved here because ths code is executed when\n # the module is imported and for some reasons, it overides some of the settings\n # sphinx is doing and graphs are not part of the documentation but show up\n # in a separate window\n if \"plt\" not in sys.modules:\n import matplotlib.pyplot as plt # pylint: disable=C0415\n plt.style.use(style)",
"def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()",
"def _set_graph_style_callback(self, *args, **kwargs):\n try:\n self.logger.debug(\"Callback: graph style callback started.\")\n self.viewer.set_graph_style(*args, **kwargs)\n except: # pylint: disable=bare-except\n self.logger.exception(\"Exception in when setting graph style.\")",
"def __init__(self, selector, plot_classes, allow_mismatch=False):\n self.selector = selector\n self.plot_classes = OrderedDict(plot_classes)\n interface = self._define_interface(self.plot_classes.values(), allow_mismatch)\n self.style_opts, self.plot_options = interface",
"def __init__(self, *args, **kwargs):\n _gdi_.GraphicsPen_swiginit(self,_gdi_.new_GraphicsPen(*args, **kwargs))",
"def __init__(self, network=None):\n\n if network is None:\n self.graph = nx.Graph()\n self.graph.graph['graph_type'] = 'generic'\n # extent is the extent defined by pores surfaces\n self.graph.graph['extent'] = None\n self.graph.graph['bbox'] = None\n self.geom_complete = False\n self.pores_volume = 0\n self.throats_volume = 0\n else:\n self.graph = network\n self.compute_geometry()",
"def __init__(self, style=None, unit=\"px\"):\n self._meta = SVGHelper(self)\n self.layers = OrderedDict()\n self.style = defaultdict(dict)\n self.stock = SVGStockObjects\n self.unit = unit\n self.defs = []\n self.elements = []\n self.masks = {}\n self.add_style(style or {})",
"def __init__(self, parent=None):\n super().__init__()\n\n self.parent = parent\n\n # plot object, can be 2D or 3D\n self.plt = None",
"def __init__(self, my_graph: VehicleRoutingProblemGraph, path_queue: MPQueue):\r\n self.my_graph = my_graph\r\n self.nodes = my_graph.graph_nodes\r\n self.warehouse_index = my_graph.warehouse_index\r\n self.figure = plt.figure(figsize=(10, 10))\r\n self.figure_ax = self.figure.add_subplot(1, 1, 1)\r\n self.path_queue = path_queue\r\n self.warehouse_color = 'darkblue'\r\n self._customer_color = 'crimson'\r\n self._line_color = 'darksalmon'\r\n self.line_color_list = ['lime', 'gold', \r\n 'deepskyblue', 'orangered', 'magenta', 'blueviolet', \r\n 'royalblue', 'lawngreen', 'indigo', 'deeppink',\r\n 'darkturquoise', 'springgreen', 'aquamarine', 'darkorange',\r\n 'mediumslateblue', 'aqua']",
"def use(style):\r\n plt.style.use(_paths[style])",
"def __init__(self, g, msg):\n self.graph = g\n self.message = 'Graph ' + repr(self.graph) + ' error: ' + msg",
"def __init__(self, text=None, **style):\n\n self.lines = []\n if text is not None:\n self.append(text)\n Figure.__init__(self, style)",
"def __init__(self, graph_dict=None):\n if graph_dict == None:\n graph_dict = {}\n self.graph_dict = graph_dict",
"def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()",
"def apply_styles(graph, styles):\n graph.graph_attr.update(\n ('graph' in styles and styles['graph']) or {}\n )\n graph.node_attr.update(\n ('nodes' in styles and styles['nodes']) or {}\n )\n graph.edge_attr.update(\n ('edges' in styles and styles['edges']) or {}\n )\n return graph",
"def set_plot_props(self):\n \n if self.type == \"gas\":\n self.marker = \"v\"\n self.color = \"cyan\"\n \n elif self.type == \"cluster\":\n self.marker = \"o\"\n self.color = \"maroon\"\n \n elif self.type == \"spiral\":\n self.marker = \"*\"\n self.color = \"green\"\n \n elif self.type == \"loop\":\n self.marker = \"o\"\n self.color = \"maroon\"\n \n elif self.type == \"giant\":\n self.marker = \"s\"\n self.color = \"red\"\n \n return",
"def _plot_init(self):\n pass",
"def _plot_init(self):\n pass",
"def _define_graphic(self, graphic_type='', graphic_props={}):\n # type: (str, dict) -> None\n if graphic_type:\n graphic_type = graphic_type.lower()\n self.settings['_type'] = graphic_type\n if graphic_type == 'text':\n self._set_graphic(graphics.Text(graphic_props))\n self.type_def['order'] = 'auto'\n elif graphic_type == 'line':\n self._set_graphic(graphics.Line(graphic_props))\n else:\n self._set_graphic(graphics.Shape(graphic_type, graphic_props))",
"def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()",
"def __init__(self, graph_dict=None):\r\n if graph_dict == None:\r\n graph_dict = {}\r\n self.__graph_dict = graph_dict",
"def __init__(self, graph_dict=None):\n if graph_dict == None:\n graph_dict = {}\n self.__graph_dict = graph_dict",
"def __init__(self):\n import matplotlib.pyplot as plt\n\n\n SMALL_SIZE = 12\n MEDIUM_SIZE = 14\n BIGGER_SIZE = 16\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title",
"def init(self, g, element_cls):\n self.g = g\n self.element_cls = element_cls"
]
| [
"0.7160513",
"0.6328354",
"0.6294184",
"0.6196716",
"0.6181779",
"0.61812836",
"0.6128171",
"0.6123356",
"0.60171515",
"0.5940059",
"0.5715224",
"0.5710166",
"0.5687517",
"0.5683037",
"0.56678903",
"0.56520355",
"0.5641087",
"0.56200397",
"0.56195146",
"0.56164175",
"0.5608128",
"0.5595557",
"0.55926543",
"0.55926543",
"0.55866253",
"0.5570794",
"0.5570274",
"0.555999",
"0.555087",
"0.5520163"
]
| 0.7229065 | 0 |
define the X and Y axis limits | def setlimits(self, Xlim=[], Ylim=[]):
self.data['Xmin'] = Xlim[0]
self.data['Xmax'] = Xlim[1]
self.data['Ymin'] = Ylim[0]
self.data['Ymax'] = Ylim[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xylim(xmin=None, xmax=None, ymin=None, ymax=None):\n plt.axis(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)",
"def set_plot_limits(self) -> None:\n matplotlib.pyplot.xlim(0, self.imgsz[0])\n matplotlib.pyplot.ylim(self.imgsz[1], 0)",
"def setXYLimit(self, xmin=None, xmax=None, ymin=None, ymax=None):\n self._myCanvas.axes.set_xlim([xmin, xmax])\n self._myCanvas.axes.set_ylim([ymin, ymax])\n\n self._myCanvas.draw()\n\n return",
"def _set_axes_limits(ax, parameter, axis=\"x\"):\n\n lims = list(ax.get_xlim()) if axis == \"x\" else list(ax.get_ylim())\n\n if \"low\" in DEFAULT_BOUNDS[parameter]:\n low = DEFAULT_BOUNDS[parameter][\"low\"]\n if lims[0] < low:\n lims[0] = DEFAULT_BOUNDS[parameter][\"low\"]\n if \"high\" in DEFAULT_BOUNDS[parameter]:\n high = DEFAULT_BOUNDS[parameter][\"high\"]\n if lims[1] > high:\n lims[1] = DEFAULT_BOUNDS[parameter][\"high\"]\n\n if axis == \"x\":\n ax.set_xlim(lims)\n else:\n ax.set_ylim(lims)",
"def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y",
"def set_limits(xlim=None, ylim=None, ax=None):\n if ax is None:\n ax = plt.gca()\n if ylim is not None:\n ax.set_ylim(ylim)\n if xlim is not None:\n ax.set_xlim(xlim)",
"def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]",
"def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])",
"def xlim(left=None, right=None):\n impl.xlim(**locals())",
"def set_xlim(self, xlim):\n # x coordinate of center of leftmost pixel\n self.xmin = xlim[0]\n # x coordinate of center of rightmost pixel\n self.xmax = xlim[1]\n self.delta_x = (self.xmax-self.xmin)/float(self.cols-1)",
"def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])",
"def get_xlim(self):\n return (self._frame.GetXaxis().GetXmin(), self._frame.GetXaxis().GetXmax())",
"def xlim(self, left=None, right=None):\r\n for ax in self._subaxes:\r\n ax.set_xlim(left, right)\r\n self.figure.canvas.draw()",
"def set_axis_limits(*args):\n robots = get_robot_roots()\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n current_tab = pm.tabLayout('limits_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'position_limits_tab':\n set_position_limits()\n elif current_tab == 'velocity_limits_tab':\n set_deriv_limits('Velocity')\n elif current_tab == 'accel_limits_tab':\n set_deriv_limits('Accel')\n elif current_tab == 'jerk_limits_tab':\n set_deriv_limits('Jerk')",
"def getXLimit(self):\n return self.axes.get_xlim()",
"def _set_plot_limits(self, dist):\n\n plt.xlim(self._Position.xPos - dist * 1000.0, self._Position.xPos + dist * 1000.0)\n plt.ylim(self._Position.zPos - dist * 1000.0, self._Position.zPos + dist * 1000.0)",
"def set_lim(values, scale):\n\n v_min, v_max = min(values), max(values)\n margin = (v_max - v_min) * scale\n v_min, v_max = v_min - margin, v_max + margin\n\n return v_min, v_max",
"def update_limits(self):\n if len(self) == 0:\n self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])\n else:\n x_min, x_max = self.buf[self.rear][0], self.buf[self.front][0]\n y_min, y_max = self.slmm.get_minmax()\n self.limits = np.array([[x_min, y_min], [x_max, y_max]])",
"def set_axis_limit(axis_number, min_max):\n robots = get_robot_roots()\n\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n try:\n val = float(pm.textField('t_A{}{}'.format(axis_number, min_max),\n query=True,\n text=True))\n robot_list_str = ''\n for robot in robots:\n pm.setAttr(get_target_ctrl_path(robot)\n + '.axis{}{}'.format(axis_number, min_max),\n val)\n robot_list_str += robot + ' '\n except:\n pass\n\n pm.headsUpMessage('Axis Position Limits for {} set successfuly!'.format(robot_list_str))",
"def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()",
"def __createLimits(self):\r\n self.lowerXLabel = QLabel(\"lower limits of (x)\")\r\n self.lowerXField = QLineEdit(self)\r\n self.lowerXField.setPlaceholderText(\"-10\")\r\n\r\n self.upperXLabel = QLabel(\"upper limits of (x)\")\r\n self.upperXField = QLineEdit(self)\r\n self.upperXField.setPlaceholderText(\"10\")",
"def _axes_domain(self, *args, **kwargs):\n # See _add_gridline_label for detials\n lon_0 = self.axes.projection.proj4_params.get('lon_0', 0)\n x_range, y_range = type(self)._axes_domain(self, *args, **kwargs)\n x_range = np.asarray(x_range) + lon_0\n return x_range, y_range",
"def _calc_limits(self, points):\n # TODO: what should limits be if there are no points?\n if len(points) == 0:\n self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])\n else:\n x_vals, y_vals = points.T\n self.limits = np.array([(x_vals.min(), y_vals.min()),\n (x_vals.max(), y_vals.max())])",
"def set_axis1_limits(self, start, end):\n if start > end:\n raise ValueError(\"Start point over end for this view.\")\n\n self.axis1_limits = start, end",
"def set_xlim(self, left=None, right=None):\n if right is None and np.iterable(left):\n left, right = left\n\n if left is None or right is None:\n old_left, old_right = self.get_xlim()\n if left is None:\n left = old_left\n if right is None:\n right = old_right\n\n if left == right:\n warnings.warn(\n \"Attempting to set identical left == right == {} x-axis limits\".format(\n left\n ),\n stacklevel=2,\n )\n\n if left > right:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if right <= 0 and self._logx:\n warnings.warn(\n \"Attempting to set non-positive right xlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n right = self.get_xlim()[1]\n\n elif left <= 0 and self._logx:\n warnings.warn(\n \"Attempting to set non-positive left xlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n left = self.get_xlim()[0]\n\n if isinstance(self._frame, root.TH1F):\n self._frame.GetXaxis().SetLimits(left, right)\n else:\n self._frame.GetXaxis().SetRangeUser(left, right)\n\n self._pad.Modified() # Draw the updated axes\n\n return (left, right)",
"def printLimits():\n print(\"MinX:\",Drawable._minX)\n print(\"MaxX:\",Drawable._maxX)\n print(\"MinY:\",Drawable._minY)\n print(\"MaxY:\",Drawable._maxY)",
"def set_lim(x, y, **kws):\n per = kws['per']\n min_per = 50 - per/2\n max_per = per/2 + 50\n xper = np.nanpercentile(x,[min_per,max_per])\n yper = np.nanpercentile(y,[min_per,max_per])\n ax = plt.gca()\n ax.set_xlim(xper)\n ax.set_ylim(yper)",
"def xlim(self):\r\n lim = [ax.get_xlim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim",
"def set_axis2_limits(self, start, end):\n if start > end:\n raise ValueError(\"Start point over end for this view.\")\n\n self.axis2_limits = start, end",
"def render_limits(\n origin: tuple[float, float],\n size_in_inches: tuple[float, float],\n scale: float,\n) -> tuple[float, float, float, float]:\n min_x, min_y = origin\n max_x = min_x + size_in_inches[0] * scale\n max_y = min_y + size_in_inches[1] * scale\n return min_x, min_y, max_x, max_y"
]
| [
"0.7596813",
"0.7430736",
"0.73098594",
"0.73097605",
"0.72856957",
"0.7243441",
"0.71406424",
"0.7069249",
"0.70277447",
"0.6997044",
"0.6965993",
"0.6949617",
"0.69271195",
"0.69196284",
"0.68260384",
"0.68236095",
"0.6807305",
"0.6789631",
"0.6702974",
"0.6700313",
"0.66997075",
"0.6647352",
"0.6624457",
"0.66219544",
"0.6618001",
"0.65632695",
"0.650377",
"0.6439732",
"0.64257824",
"0.6405126"
]
| 0.8024198 | 0 |
Converts stoplist text file into list of tokens | def load_stop_list():
stop_list = []
with open(STOP_LIST, "r") as f:
lines = f.readlines()
stop_list = [word.strip() for word in lines]
return stop_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readStopList():\n f = None\n try:\n f = open('documents/stoplist.txt', 'r')\n except FileNotFoundError:\n print(\"ERROR: File not found.\")\n exit(-1)\n if f is None:\n print(\"ERROR: Error loading stoplist\")\n exit(-1)\n\n return str(f.read()).split()",
"def getstopwords():\n file = open('stopWords.txt', 'r')\n stoplist = []\n for word in file.readlines():\n word = word.strip('\\n')\n stoplist.append(word)\n return stoplist",
"def load_stop_words(stop_word_file):\n stop_words = []\n for line in open(stop_word_file):\n if line.strip()[0:1] != \"#\":\n for word in line.split(): # in case more than one per line\n stop_words.append(word)\n return stop_words",
"def load_stop_words():\n with open('../data/stop_words.txt', 'r') as stop_words_file:\n return stop_words_file.read().split()",
"def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result",
"def file_to_tokens(ifiles: list) -> list:\n if not ifiles:\n return []\n #\n # derived from https://stackoverflow.com/questions/16710076\n # regex to split a string preserving quoted fields\n #\n rpat = re.compile(\n r\"\"\" #\n (?:[^\\s\"]+)| # match non-delimiter\n (?<=\\W)\"(?:\\\\.|[^\"]*)\"(?=\\W)| # match double quoted\n (?<=\\W)'(?:\\\\.|.*?)'(?=\\W) # match single quoted\n \"\"\",\n re.X,\n )\n #\n if not isinstance(ifiles, list):\n ifiles = [ifiles]\n #\n for ffile in ifiles:\n try:\n ff = open(ffile, mode=\"r\")\n except OSError:\n logging.debug(\"open(%s) failed on %s\", ffile)\n else:\n for line in ff:\n if re.match(r\"^\\s*#\", line): # skip block comment\n continue\n if re.match(r\"^\\s*$\", line): # skip white space line\n continue\n # strip inline cmnt '<space(s)># to end of line'\n sline = re.sub(r\"\"\"(\\s*#[^\\'\"]*$)\"\"\", \"\", line)\n # tokenize what remains\n yield [\"\".join(t) for t in rpat.findall(sline)]\n ff.close()\n else:\n return []",
"def load_stop_words() -> list:\r\n with open(f'{ENGINE}/stop_words.txt', 'r') as i:\r\n stop_words = i.read().splitlines()\r\n stop_words = list(map(lambda x: x.upper(), stop_words)) # Force all stop words to UPPER case.\r\n return stop_words",
"def stokenize(txt, StopWords):\n Tokens = tokenize(txt)\n UnStopped = [t for t in Tokens if t not in StopWords]\n Stokens = [ps.stem(w) for w in UnStopped] # Stokens = Stemmed Tokens, list of all stokens in the txt\n \n return Stokens",
"def _stopwords():\n global _stopword_set\n if _stopword_set:\n return _stopword_set\n f_name = \"stopword.list\"\n if os.path.isfile(f_name):\n res = set()\n with open(f_name) as f:\n for line in f:\n res.add(line.strip())\n _stopword_set = res\n return res\n else:\n error(\"stop words - not a file: %s\" % f_name)",
"def compile_tokens(args, files):\n # Compile list of lists of tokens\n texts = []\n print(timestamp() + \" Compiling tokens.\", file=sys.stderr)\n for i in range(len(files)):\n file = files[i]\n with open(os.path.join(args.corpus_dir, file)) as f:\n text = f.read().lower().replace(\"\\n\", \" \").split(\" \")\n\n # Changed: Also remove stop words from Mallet version`\n # stop_words = stop.modified_stop_words\n # text = [word for word in text if word not in stop_words]\n texts.append(text)\n return texts",
"def make_list(list_file):\r\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]",
"def stopwords(self):\n with open(STOPWORDS_LIST, 'r') as content:\n return content.read().splitlines()",
"def make_list(list_file):\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]",
"def make_word_list(fin):\n\tword_list = []\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tword_list.append(word)\n\treturn word_list",
"def __init__(self,dir_stopwords):\n \n arc = open(dir_stopwords, \"r\", encoding='utf-8')\n self.stp_wrds = [line.strip() for line in arc]\n arc.close()",
"def get_token_list():\n token_list = []\n tokens_dir_path = os.path.join(BASE_DIR, TOKENS_DIR)\n for dir, dirs, files in os.walk(tokens_dir_path):\n for file_name in files:\n file = open(os.path.join(tokens_dir_path, file_name), 'r')\n token_list.append(file.read().strip())\n file.close()\n return token_list",
"def _load_file(file_path: str) -> List[str]:\n with open(file_path, 'r') as f:\n file_content = f.read()\n\n # Removes all comments\n str_without_comments = re.sub('\\/\\*[\\s\\S]*?\\*\\/|([^\\\\:]|^)\\/\\/.*$', '', file_content, flags=re.MULTILINE)\n str_without_new_lines = str_without_comments.lstrip('\\n').replace('\\n', ' ').replace('\\t', ' ') # Remove new lines and tabs\n return JackTokenizer._split_keep_seperators(str_without_new_lines) # Splits the string by symbols and spaces",
"def file_to_word_list(input_file):\n with open(input_file) as f:\n file_without_header = advance_past_header(f)\n word_list = []\n for line in file_without_header.readlines():\n dirty_words = [dirty_word.lower() for dirty_word in line.split()]\n clean_words = [remove_punctuation(dirty_word) for dirty_word in dirty_words]\n word_list.extend(clean_words)\n return word_list",
"def filter_tokens(tokens):\n token_list = []\n for token in tokens:\n if len(token) > 2 and token not in STOPWORDS:\n token_list.append(token)\n else:\n continue\n \n return token_list",
"def tokenize_file(infilename):\n# reg expss\n\timport re\n\timport sys\n\timport fileinput\n\timport math\n\n#\n# open and parse input \n#\n\n\ttry:\n\t\tfp = open (infilename, 'r')\n\texcept IOError:\n\t\tprint \"Error opening file\"\n\t\traise\n\n\tlines = fp.readlines ()\n\n#\n# put all tokens into tokens and remove comments\n#\n\ttokens = []\n\tfor line in lines:\n\t\ttmp = re.split ('[ \\t\\n]*',line)\n#\t\tprint \"tmp = \", tmp\n\t\tfor tok in tmp:\n\t\t\tif (tok != ''):\n\t\t\t\tif (re.compile('[#!][.]*').match(tok)):\n\t\t\t\t\tbreak\n\t\t\t\ttokens.append(tok)\n#\tprint \"tokens = \", tokens\n\n\tfp.close()\n\n\treturn tokens",
"def get_token_list(text):\n return text.split()",
"def remove_stops(tokens, stops):\n return [(token, pos) for token, pos in tokens if token not in stops.value]",
"def tokenize(review):\n\n token = strip_multiple_whitespaces(strip_punctuation(review))\n return [token.split() for token in simple_preprocess(token) if token not in STOPWORDS]",
"def read_tokens(file_path):\n\n text = open(file_path).read().lower()\n\n tokens = []\n for token in text.split():\n if token.isalpha():\n tokens.append(token)\n\n return tokens",
"def remove_stop_words(stop_list, tokens):\n return [t for t in tokens if len(t) > 2 and not t in stop_list]",
"def file_to_list_of_parsed(nameoffile):\n a = Grammar()\n b = a.syntax()\n file1 = open(nameoffile,'r')\n parsed = []\n for line in file1:\n parsed.append(b.parseString(line))\n return parsed",
"def convert_input_to_list():\n\n f = open('pizza_source.txt', 'r')\n file_to_list = f.read().split('\\n')\n\n return file_to_list",
"def make_stopwords(filepath='stopwords.txt'):\n sw = open(filepath, \"r\")\n my_stopwords = sw.read()\n my_stopwords = my_stopwords.split(\", \")\n sw.close()\n\n all_stopwords = stopwords.words('english')\n all_stopwords.extend(my_stopwords)\n return all_stopwords",
"def read_list_words(infile):\n\twords = []\n\tfin = open(infile)\n\tfor line in fin:\n\t\twords.append(line.strip())\n\treturn words",
"def tokenizer(docs, stop, stemmer):\n init = time.time()\n import re\n regex = re.compile('^\\d*[.,]?\\d*$')\n \n tok_docs = []\n for doc in docs:\n toks = [stemmer.stem(tok) for tok in word_tokenize(doc)\n if not tok in stop\n if len(tok)>2\n if regex.match(tok)==None]\n toks = [tok.upper() for tok in toks]\n tok_docs.append(toks)\n\n finish = time.time() - init\n logger_global.info('%s records tokenized succesfully in %s s.' % \n (str(len(docs)), str(finish)))\n\n return tok_docs"
]
| [
"0.7023497",
"0.69153845",
"0.6750235",
"0.6572064",
"0.65620124",
"0.65080184",
"0.6481563",
"0.6412093",
"0.6406209",
"0.6372647",
"0.6366648",
"0.6364497",
"0.62950796",
"0.62819886",
"0.6274022",
"0.62412465",
"0.618894",
"0.6159963",
"0.61280495",
"0.6125582",
"0.6113427",
"0.61113346",
"0.6093952",
"0.6080951",
"0.6073543",
"0.6058263",
"0.60571545",
"0.6036682",
"0.60228485",
"0.60064757"
]
| 0.749372 | 0 |
Sort in a specified order any dictionary nested in a complex structure. Especially useful for sorting a JSON file in a meaningful order. | def make_custom_sort(orders):
orders = [{k: -i for (i, k) in enumerate(reversed(order), 1)} for order in orders]
def process(stuff):
if isinstance(stuff, dict):
l = [(k, process(v)) for (k, v) in stuff.items()]
keys = set(stuff)
order = max(orders, key=lambda order: len(keys.intersection(order)))
order.update({key:i for (i, key) in enumerate(sorted(keys.difference(order), key=to_ascii), 1)})
return OrderedDict(sorted(l, key=lambda x: order[x[0]]))
if isinstance(stuff, list):
return [process(x) for x in stuff]
return stuff
return process | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dictsort_complex_sorting_key(self):\n data = [\n {\"foo\": {\"bar\": 1, \"baz\": \"c\"}},\n {\"foo\": {\"bar\": 2, \"baz\": \"b\"}},\n {\"foo\": {\"bar\": 3, \"baz\": \"a\"}},\n ]\n sorted_data = dictsort(data, \"foo.baz\")\n\n self.assertEqual([d[\"foo\"][\"bar\"] for d in sorted_data], [3, 2, 1])",
"def sort(self, order):\r\n params = base.get_params(None, locals())\r\n url = '{0}/sort'.format(self.get_url())\r\n\r\n request = http.Request('PUT', url, params)\r\n\r\n return request, parsers.parse_json",
"def sort_nested_dict(d):\n for x in d:\n for y in d[x]:\n d[x][y] = sorted(d[x][y].items(), key=lambda z:z[0])\n return d",
"def _order_json(self, json_string):\n\n if isinstance(json_string, dict):\n return sorted((k, self._order_json(v)) for k, v in json_string.items())\n if isinstance(json_string, list):\n return sorted(self._order_json(x) for x in json_string)\n else:\n return json_string",
"def sort_json_policy_dict(policy_dict):\n ...",
"def make_order(self, root):\n order = []\n if root and isinstance(root[0], dict):\n keys = set()\n for item in root:\n for key in item.keys():\n keys.add(key)\n for key in args.order or []:\n key = self.get_key(key, keys)\n keys.remove(key)\n order.append(key)\n order += sorted(list(keys))\n return order",
"def dictsort_by(d, order, items = False, skip=True):\n _d = d.copy()\n while _d:\n for ko in order:\n if ko in _d:\n v = _d.pop(ko)\n yield ko if not items else (ko, v)\n if not skip:\n # return the rest of the dict\n yield _d.popitem()[0] if not items else _d.popitem()\n else:\n return",
"def sort_object_info(results, sortkey):\n\n if sortkey == \"unsorted\":\n return results\n elif sortkey == \"name\":\n return sorted(results, key=lambda r: r[\"name\"])\n elif sortkey == \"ext\":\n def _get_ext(n):\n # Get extension for sorting\n if n[\"type\"] == \"dataobject\":\n return n[\"name\"].split(\".\")[-1]\n else:\n # Use name for sorting collections\n return n[\"name\"]\n\n return sorted(results, key=_get_ext)\n elif sortkey == \"size\":\n return sorted(results, key=lambda k: k.get(\"size\", 0))\n elif sortkey == \"date\":\n return sorted(results, key=lambda k: k.get(\"modify_time\", 0))\n else:\n exit_with_error(\"Sort option {} not supported.\".format(sortkey))",
"def order_by(self, field_paths, order=None):\n raise NotImplementedError(\"This should have been implemented.\")",
"def sort_entries(entries: List[CapTableEntry], order_by: str, order_direction: str):\n if order_by == \"balance\":\n key = lambda entry: entry.balance\n elif order_by == \"name\":\n key = lambda entry: entry.name\n elif order_by == \"updated\":\n key = lambda entry: entry.updated_at\n elif order_by == \"address\":\n key = lambda entry: entry.address\n else:\n raise TypeError(\"Unknown sort order\")\n\n if order_direction == \"asc\":\n entries.sort(key=key)\n elif order_direction == \"desc\":\n entries.sort(key=key, reverse=True)\n else:\n raise TypeError(\"Unknown sort direction\")",
"def sort_json(dp):\n def get_sort_key(d, ks: list):\n res = []\n for k in ks:\n if isinstance(d[k], (list, tuple)):\n for v in d[k]:\n res.append(v)\n elif d[k] is None:\n res.append('')\n else:\n res.append(d[k])\n return res\n\n def proc(x, ks):\n return sorted(x, key=partial(get_sort_key, ks=ks))\n\n if 'resources' in dp.keys():\n dp['resources'] = proc(dp['resources'], ['path'])\n\n if 'ddfSchema' in dp.keys():\n schema = dp['ddfSchema']\n for t in ['concepts', 'entities', 'datapoints']:\n if t in schema.keys():\n for v in schema[t]:\n v['resources'] = sorted(v['resources'])\n schema[t] = proc(schema[t], ['value', 'primaryKey'])\n\n dp['ddfSchema'] = schema\n\n return dp",
"def sort_nested_dict(sort_me, item_key, start_return=0, count=0, lambda_on_value=lambda x: x, descending=False):\n counter = 0\n ret = list()\n for key in sorted(sort_me.keys(), key=lambda x: lambda_on_value(sort_me[x][item_key]), reverse=descending):\n counter += 1\n if start_return and counter < start_return:\n continue\n if count and count < counter:\n break\n ret.append((key, sort_me[key]))\n return ret",
"def order_projections(model, connection_order):\n connection_list = [el if isinstance(el, tuple) else (el, None)\n for el in connection_order]\n\n for spec in model.projections:\n matches = [(i, el) for i, el in enumerate(connection_list)\n if el[0] == spec.matchname]\n if len(matches) == 0:\n raise Exception(\"Could not order projection %r\" % spec)\n elif len(matches) == 1:\n (i, (k, v)) = matches[0]\n spec.sort_precedence = i\n continue\n\n property_keys = [pdict.keys() for (_, (_, pdict)) in matches]\n if not all(len(pkeys)==1 for pkeys in property_keys):\n raise Exception(\"Please specify only a single property to sort on\")\n if not all(pkey[0]==property_keys[0][0] for pkey in property_keys):\n raise Exception(\"Please specify only a single property to sort on\")\n\n key = property_keys[0][0]\n spec_property_value = spec.src.properties[key]\n match = [ind for (ind, (_, pdict)) in matches if pdict[key] == spec_property_value]\n if len(match) != 1:\n raise Exception(\"Could not order projection %r by property %r\" % (spec, key))\n spec.sort_precedence = match[0]",
"def sort(self, *keys):\n s = self._clone()\n s._sort = []\n for k in keys:\n if isinstance(k, str) and k.startswith('-'):\n k = {k[1:]: {\"order\": \"desc\"}}\n s._sort.append(k)\n return s",
"def order_dict(dictionary):\n return sorted(dictionary.items(), key=lambda kv: kv[1],\n reverse=True)",
"def sort_dict(self, variants_exposed, inplace=False):\n if inplace:\n variants_exposed.order_by(key=self.key_condition_dict, reverse=self.reverse)\n else:\n return sorted(variants_exposed, key=self.key_condition_dict, reverse=self.reverse)",
"def recursive_sort(obj):\n\n if isinstance(obj, dict):\n for key, val in obj.iteritems():\n obj[key] = recursive_sort(val)\n _sorted = obj\n\n elif isinstance(obj, list):\n new_list = []\n for val in obj:\n new_list.append(recursive_sort(val))\n _sorted = sorted(new_list)\n\n else:\n _sorted = obj\n\n return _sorted",
"def _compile_order(self, orderings):\n to_apply = []\n for o in orderings:\n descending = False\n if o.startswith(\"-\"):\n descending = True\n o = o[1:]\n to_apply.append((o, descending))\n\n def compare(res1, res2):\n # res1 and res2 are attribute dictionaries\n # Apply each comparison in order\n # Note that we consider None to be bigger than anything else (i.e.\n # in an ascending sort, None comes after everything else)\n for attr, descending in to_apply:\n if descending:\n x, y = res2.get(attr, []), res1.get(attr, [])\n else:\n x, y = res1.get(attr, []), res2.get(attr, [])\n if x < y:\n return -1\n elif x > y:\n return 1\n return 0\n\n return compare",
"def ordered(obj):\n if isinstance(obj, dict):\n return sorted((k, ordered(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(ordered(x) for x in obj)\n else:\n return obj",
"def sort_key(self, order=None):\n\n # XXX: remove this when issue 5169 is fixed\n def inner_key(arg):\n if isinstance(arg, Basic):\n return arg.sort_key(order)\n else:\n return arg\n\n args = self._sorted_args\n args = len(args), tuple([inner_key(arg) for arg in args])\n return self.class_key(), args, S.One.sort_key(), S.One",
"def sort(self, field=None, asc=True, castFunction=None):\n if field is None or field == '':\n # sort the keys\n self._sequence.sort()\n return\n\n def cast(value):\n if not castFunction:\n return value\n else:\n return castFunction(value)\n\n def innerCmp(a,b):\n order = 1\n if asc == False:\n order = -1\n return cmp(cast(self.dictionary[a][field]), cast(self.dictionary[b][field])) * order\n\n self._sequence.sort(innerCmp)",
"def sort_tree(data_list, sort_key_path):\n result = {}\n for elem in data_list:\n temp_element = copy.deepcopy(elem)\n sort_name = get_sub_value(temp_element, sort_key_path)\n if sort_name not in result:\n result.update({sort_name: {}})\n\n while temp_element:\n val, keys = _remove_deepest(temp_element)\n if keys:\n _add_sub_value(result[sort_name], keys, val)\n\n return result",
"def dictorder():\n d = {1: 'a', 3: 'c', 2: 'b'}\n d.sort()\n print(d)",
"def sorted_options(sort_options):\n return [\n {\n \"title\": v[\"title\"],\n \"value\": (\n \"-{0}\".format(k)\n if v.get(\"default_order\", \"asc\") == \"desc\"\n else k\n ),\n }\n for k, v in sorted(\n sort_options.items(), key=lambda x: x[1].get(\"order\", 0)\n )\n ]",
"def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data",
"def custom_sort(pseudo):\n # Unpack\n pred = pseudo[\"pred_list\"]\n lab = pseudo[\"lab_list\"]\n name = pseudo[\"name_list\"]\n \n # Sort\n sorted_list = list(zip(pred, lab, name))\n sorted_list.sort(key=lambda x: x[0], reverse=True)\n \n pred_sorted = [row[0] for row in sorted_list]\n lab_sorted = [row[1] for row in sorted_list]\n name_sorted = [row[2] for row in sorted_list]\n \n # Re-pack\n pseudo = {\n \"pred_list\": pred_sorted,\n \"lab_list\": lab_sorted,\n \"name_list\": name_sorted\n }\n \n return pseudo",
"def _ordered_dictionary_sort(d, key=None):\n\n items = [(k, d[k]) for k in sorted(d, key=key)]\n\n d.clear()\n\n d.update(items)",
"def sort(self, order=None, *, reverse=False):\n self[:] = self.sorted(order=order, reverse=reverse)",
"def getSorted(self,column,reverse):\n data = self.data\n items = data.keys()\n if column == 'Package':\n items.sort(reverse=reverse)\n elif column == 'Files':\n items.sort(key=lambda x: len(data[x].fileSizeCrcs),reverse=reverse)\n else:\n items.sort()\n attr = column.lower()\n if column in ('Package','Group'):\n getter = lambda x: object.__getattribute__(data[x],attr).lower()\n items.sort(key=getter,reverse=reverse)\n else:\n getter = lambda x: object.__getattribute__(data[x],attr)\n items.sort(key=getter,reverse=reverse)\n #--Special sorters\n if settings['bash.installers.sortStructure']:\n items.sort(key=lambda x: data[x].type)\n if settings['bash.installers.sortActive']:\n items.sort(key=lambda x: not data[x].isActive)\n if settings['bash.installers.sortProjects']:\n items.sort(key=lambda x: not isinstance(data[x],InstallerProject))\n return items",
"def dic_sort(list_of_dicts, key):\n for passnum in range(len(list_of_dicts) - 1, 0, -1):\n is_sorted = True\n for idx in range(passnum):\n if list_of_dicts[idx][key] > list_of_dicts[idx + 1][key]:\n temp = list_of_dicts[idx]\n list_of_dicts[idx] = list_of_dicts[idx + 1]\n list_of_dicts[idx + 1] = temp\n is_sorted = False\n if is_sorted:\n return"
]
| [
"0.653086",
"0.6437128",
"0.63729537",
"0.632864",
"0.62446153",
"0.6146052",
"0.61061865",
"0.60864496",
"0.6072396",
"0.58841217",
"0.5882846",
"0.5782154",
"0.57771605",
"0.57738656",
"0.57239145",
"0.57142466",
"0.5708071",
"0.56768537",
"0.56660247",
"0.5647559",
"0.5618754",
"0.5615515",
"0.5585855",
"0.5568841",
"0.5557799",
"0.55553806",
"0.5549938",
"0.55482465",
"0.55436856",
"0.5520933"
]
| 0.65726316 | 0 |
Return statistics about the current state of this lock. | def statistics(self) -> LockStatistics:
return LockStatistics(self.locked(), self._owner_task, len(self._waiters)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)",
"def stats(self):\n\n res = self.read_block(REG_STATS, 9)\n\n ret = {\n \"completed_cycles\": (res[1] << 8) + (res[0] << 0),\n \"last_boot\": {\n \"retries\": res[2],\n \"duration\": (res[6] << 24) + (res[5] << 16) + (res[4] << 8) + (res[3] << 0)\n },\n \"forced_shutdowns\": (res[8] << 8) + (res[7] << 0)\n }\n\n return ret",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def statistics(self):\n return self._statistics",
"def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)",
"def statistics(self):\n return self._queue.statistics(self._name)",
"def statistics(self):\n return self.get_statistics()",
"def status(self):\n self._refresh_state()\n return self._data.get('status')",
"def status(self):\n return self.state",
"def stats(self):\n return self._stats",
"def state(self):\n return self.coordinator.data[METER_DEVICE_TYPE][self.base_unique_id][METER_STATE]",
"def status( self ):\n duration = datetime.datetime.now() - self.startTime\n status = {\n 'start': self.startTime.isoformat(),\n 'now': datetime.datetime.now().isoformat(),\n 'duration': duration.total_seconds(),\n 'bookmark': 0,\n 'events': 0,\n 'cumulative_rate': 0,\n 'processes': [],\n 'state': {\n 'id': self.state,\n 'description': definitions.STATE_STRING[self.state]\n }\n }\n\n # Sending pipes to processes which are not running or shutting down\n # will lead to errors and deadlocks. Loop through to detect errors.\n if self.state == definitions.STATE_RUNNING:\n # Loop through all processes and just check we're running properly\n for proxy in self.processes:\n if not proxy.process.is_alive():\n self.logger.info( 'Process {0} is dead.'.format( proxy.name ))\n self.state = definitions.STATE_ERROR\n break\n\n if proxy.request( 'status' )['state'] == definitions.STATE_ERROR:\n self.logger.info( 'Process {0} state is {1}.'.format(\n proxy.name,\n definitions.STATE_STRING[ definitions.STATE_ERROR ]\n ))\n\n self.state = definitions.STATE_ERROR\n break\n\n # Now do the actual status checks\n if self.state == definitions.STATE_RUNNING:\n # Loop through processes in order\n for proxy in self.processes:\n response = proxy.request('status')\n\n proc = {\n 'name': proxy.name,\n 'pid': proxy.process.pid,\n 'count': response['count'],\n 'sleep': response['sleep']\n }\n\n status['events'] = proc['count']\n status['processes'].append( proc )\n\n if 'bookmark' in response:\n status['bookmark'] = response['bookmark']\n\n status['cumulative_rate'] = round(\n status['events'] / duration.total_seconds(), 2)\n\n return status",
"def stats(self):\n return self.rpc.call(MsfRpcMethod.CoreModuleStats)",
"def get_statistics(self):\n\n return (self.func_id, self.instruction_count)",
"def status(self):\n now = int(time())\n return {\n 'smrt': {\n 'smrt_version': '1.0.0',\n 'app_loaded': True,\n 'uptime': now - self._started\n },\n 'application': {\n 'name': 'Cogsworth',\n 'status': 'OK',\n 'version': '0.0.1'\n },\n 'server_time': now,\n 'status': {\n 'amount_successful': self._requests_successful,\n 'amount_warning': self._requests_warning,\n 'amount_error': self._requests_error,\n 'amount_bad': self._requests_bad,\n 'amount_total': (self._requests_successful\n + self._requests_warning\n + self._requests_error\n + self._requests_bad)\n }\n }",
"def stats(self):\n ret = super(DiskCache, self).stats()\n ret[\"root\"] = (self.__env.stat(),)\n for name, database in self.__databases.items():\n with self.__env.begin(database, write=False) as txn:\n ret[name] = txn.stat(database)\n\n return ret",
"def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()",
"def __getstate__(self):\n state = self.__dict__\n state['_lock'] = None\n return state",
"def state(self):\n return self._measure",
"def get_stats(self):\n return self.stats",
"def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status",
"def get_state(self):\n if not self._variable.get():\n return \"Locked\"\n\n elif self._variable.get():\n return \"Unlocked\"",
"def get_state(self) -> Dict:\n return {\n \"patience\": self.patience,\n \"cooldown\": self.cooldown,\n \"cooldown_counter\": self.cooldown_counter,\n \"mode\": self.mode,\n \"threshold\": self.threshold,\n \"threshold_mode\": self.threshold_mode,\n \"best\": self.best,\n \"num_bad_epochs\": self.num_bad_epochs,\n \"mode_worse\": self.mode_worse,\n \"last_epoch\": self.last_epoch,\n }",
"def statistics(self) -> ConditionStatistics:\n return ConditionStatistics(len(self._waiters), self._lock.statistics())",
"def getstatus(self):\n status = dict(state=self.getstate(), runningcmd=None,\n current_exposure=self.current_exposure,\n max_exposures=self.max_exposures,\n statustime=str(datetime.now())[:-7],\n lastfile=self.lastfile)\n if self.process:\n status['lastcmd'] = self.process.args[0]\n status['lastreturn'] = self.process.poll()\n if status['state'] == 'running':\n status['runningcmd'] = path.basename(self.process.args[0])\n try:\n with open(self.logfilename, newline='') as logfile:\n ts = datetime.fromtimestamp(path.getmtime(self.logfilename))\n status['cmdoutput'] = f\"Last output: {str(ts)[:-7]}\\n\"\n status['cmdoutput'] += '#'*80+'\\n'\n lines = logfile.readlines()\n if lines and lines[-1][-1] == '\\r':\n lines[-1] = lines[-1][:-1]\n for line in lines:\n if not line.endswith('\\r'):\n status['cmdoutput'] += line\n except FileNotFoundError:\n status['cmdoutput'] = \"\"\n \n # info for the lastimg to update\n status['lastimg'] = self.lastimgpath\n try:\n status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)\n except FileNotFoundError:\n status['lastimg_timestamp'] = 0\n return status",
"def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")",
"def session_state():\n\n return state.summary()",
"def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status",
"def get_stats(self):\n return self.manager.get_stats(self)",
"def event_stats(self):\n return self.base_stats"
]
| [
"0.70566237",
"0.6653602",
"0.66055226",
"0.6495829",
"0.643085",
"0.6398969",
"0.6388215",
"0.6358421",
"0.63350755",
"0.6322747",
"0.6304496",
"0.62816703",
"0.6240129",
"0.61882067",
"0.61520123",
"0.6146637",
"0.6145223",
"0.614285",
"0.6135839",
"0.61339086",
"0.61233103",
"0.61212796",
"0.61194354",
"0.6119094",
"0.611053",
"0.61096096",
"0.61016214",
"0.6075444",
"0.60676354",
"0.60664237"
]
| 0.7832995 | 0 |
Notify exactly n listeners. | def notify(self, n: int = 1) -> None:
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_until_callbacks_invoked(n=10, loop=None):\n\n if loop is None:\n loop = get_event_loop()\n\n for i in range(n):\n _run_until_callbacks_invoked_once(loop=loop)",
"def notify_all(self, event: GameEvent):\n for listener in self._listeners:\n listener.notify(event)",
"def notify(self) -> None:\n for s in self.subscribers:\n s()",
"def notify(self, event):\n for observer in self.observers:\n observer.on_notify(event)",
"def notify(self, event):\n for observer in self.observers:\n observer.on_notify(event)",
"def notify(self, event):\n for o in self.observers:\n o.on_notify(event)",
"def _notify_all(self, event_data):\n for subs in self._subscribers:\n subs.notify(event_data)",
"def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)",
"def inform_listeners(self):\n d = self.get_all_sorted()\n for listener in self.listeners:\n listener.stream_updated(d)",
"async def notify(event):\n for subscriber in syncsubscribers:\n subscriber(event)\n for subscriber in asyncsubscribers:\n await subscriber(event)",
"def wait_for_n_keypresses(self, key, n=1):\n my_const = \"key_consumed\"\n counter = 0\n\n def keypress_listener(e): return my_const \\\n if e.type == pygame.KEYDOWN and e.key == key \\\n else EventConsumerInfo.DONT_CARE\n\n while counter < n:\n if self.listen(keypress_listener) == my_const:\n counter += 1",
"def instantiateAddCallbacksNoResult(n):\n d = defer.Deferred()\n def f(result):\n return result\n for i in xrange(n):\n d.addCallback(f)\n d.addErrback(f)\n d.addBoth(f)\n d.addCallbacks(f, f)",
"def newrelic_notify(event):\n for subscriber in subscribers:\n nr_relic_subscriber = newrelic.agent.FunctionTraceWrapper(subscriber, event.__class__.__name__, 'Zope/Dispatch')\n nr_relic_subscriber(event)",
"def notify(self, arg=None):\n for observer in self._observers:\n observer.notify(arg)",
"def instantiateAddCallbacksAfterResult(n):\n d = defer.Deferred()\n def f(result):\n return result\n d.callback(1)\n for i in xrange(n):\n d.addCallback(f)\n d.addErrback(f)\n d.addBoth(f)\n d.addCallbacks(f)",
"def __call__(self, iEpisode, nEpisodes, *args, **kwargs):\n self._notifyIfNotTooFrequent(iEpisode, nEpisodes)",
"def test_subscribe_many_listeners(self):\n def listener():\n pass\n\n def listener1():\n pass\n\n def listener2():\n pass\n\n EVENT_MANAGER.subscribe('test_listeners', listener, listener1, listener2)\n\n self.assertIn(listener, EVENT_MANAGER._listeners['test_listeners'])\n self.assertIn(listener1, EVENT_MANAGER._listeners['test_listeners'])\n self.assertIn(listener2, EVENT_MANAGER._listeners['test_listeners'])",
"def _notifies(self):\r\n\r\n # starts the counter that is going to be used to count\r\n # the number of processed notifications, start at zero\r\n count = 0\r\n\r\n # iterates while there are pending notifications to be\r\n # processed, the complete set of bind callables will be\r\n # called for each of the notifications\r\n while self._notified:\r\n event, data = self._notified.pop(0)\r\n binds = self._events.pop(event, [])\r\n for callable in binds: callable(data)\r\n count += 1\r\n\r\n # returns the number of processed notifications to the\r\n # the caller method\r\n return count",
"def _notify_killed_players(self, players_to_notify):\n for player in players_to_notify:\n player.notify(self._format_move_result_notification(None, Moveresult.EJECT, name=player.name))",
"def _notify_worklist_update_listeners(self, updates: List[ClientWorklistItemUpdate]):\n for listener in self.__worklist_update_listeners:\n try:\n listener(updates)\n except Exception as e:\n print(\"Caught exception while notifying listener:\", e)\n traceback.print_exc()",
"def _do_dispatch(self, listeners, event_type, details):\n possible_calls = len(listeners)\n call_failures = 0\n for listener in listeners:\n try:\n listener(event_type, details.copy())\n except Exception:\n self._logger.warn(\n \"Failure calling listener %s to notify about event\"\n \" %s, details: %s\", listener, event_type, details,\n exc_info=True)\n call_failures += 1\n return _Notified(possible_calls,\n possible_calls - call_failures,\n call_failures)",
"def notifyObservers(self):",
"def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]",
"def notify(self, error_code, *args, **kwargs):\n for listener in self.listeners_for(error_code):\n listener.notify(error_code, *args, **kwargs)",
"def notify_all(self):\n for voter in self.registered_voters:\n voter.notify(self, None, None, None, final_call=1)\n Legislation.open_legislation.remove(self)",
"def on_notify(self, name):\r\n pass",
"def notify(*values):\r\n data = {\"value\"+str(i+1): value for i, value in enumerate(values[:3])}\r\n\r\n response = requests.request(\"POST\", notification_url, data=data)\r\n response.raise_for_status()",
"def test_process_unsubscribes_after_3(self):\r\n p = Publisher()\r\n for i in range(6):\r\n newsub = SimpleSubscriber(\"Sub\"+str(i), p)\r\n p.publish(str(i))\r\n if i == 0:\r\n self.assertEqual(len(p.subscribers), 1)\r\n self.assertTrue(str(p.subscribers[0]).endswith(\"Sub0>\"))\r\n elif i == 1:\r\n self.assertEqual(len(p.subscribers), 2)\r\n self.assertTrue(str(p.subscribers[0]).endswith(\"Sub0>\"))\r\n self.assertTrue(str(p.subscribers[-1]).endswith(\"Sub1>\"))\r\n else:\r\n # should never have more than 3 in current settings\r\n self.assertEqual(len(p.subscribers), 3)\r\n self.assertTrue(str(p.subscribers[0]).endswith(\"Sub{0}>\".format(i-2)))\r\n self.assertTrue(str(p.subscribers[1]).endswith(\"Sub{0}>\".format(i-1)))\r\n self.assertTrue(str(p.subscribers[2]).endswith(\"Sub{0}>\".format(i)))",
"def set_n_sample_callback(self, n_samples, cb_func):\n self.cb_nSamples = n_samples\n self.cb_func = cb_func\n self.task.EveryNCallback = cb_func\n self.task.AutoRegisterEveryNSamplesEvent(\n everyNsamplesEventType=mx.DAQmx_Val_Acquired_Into_Buffer, \n nSamples=self.cb_nSamples,\n options=0)",
"def send_new_images(self, n_new):\r\n if n_new == 1:\r\n if self.next_data_has_pump:\r\n #self.pump_probe_data -= self.background\r\n self.new_pump_probe.emit(self.wavelen_arr,\r\n self.pump_probe_data - self.background)\r\n self.next_data_has_pump = False\r\n else:\r\n self.new_probe_only.emit(self.wavelen_arr,\r\n self.probe_only_data - self.background)\r\n self.next_data_has_pump = True\r\n else: # n_new == 2\r\n self.new_probe_only.emit(self.wavelen_arr,\r\n self.probe_only_data - self.background)\r\n self.new_pump_probe.emit(self.wavelen_arr,\r\n self.pump_probe_data - self.background)"
]
| [
"0.6379929",
"0.60313714",
"0.5906169",
"0.585487",
"0.585487",
"0.58078414",
"0.5751406",
"0.57240236",
"0.56408536",
"0.55741954",
"0.55261433",
"0.5521699",
"0.55093133",
"0.5486637",
"0.5445235",
"0.5404716",
"0.5376677",
"0.5353639",
"0.5348436",
"0.5333781",
"0.5329927",
"0.5324387",
"0.5309427",
"0.52382445",
"0.52260154",
"0.52174145",
"0.5208492",
"0.5200503",
"0.5188214",
"0.518797"
]
| 0.7488429 | 0 |
Return statistics about the current state of this condition. | def statistics(self) -> ConditionStatistics:
return ConditionStatistics(len(self._waiters), self._lock.statistics()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def state(self):\n return self._measure",
"def get_state(self) -> Dict:\n return {\n \"patience\": self.patience,\n \"cooldown\": self.cooldown,\n \"cooldown_counter\": self.cooldown_counter,\n \"mode\": self.mode,\n \"threshold\": self.threshold,\n \"threshold_mode\": self.threshold_mode,\n \"best\": self.best,\n \"num_bad_epochs\": self.num_bad_epochs,\n \"mode_worse\": self.mode_worse,\n \"last_epoch\": self.last_epoch,\n }",
"def statistics(self):\n return self._statistics",
"def state(self):\n result = self.getResult()\n return result.state",
"def get_state(self):\n return self.agents, self.foods, self.viruses, self.masses, self.time",
"def state_summary(self) -> np.ndarray:\n return np.array([len(self.current_clear_nodes), self.n_current_infected, len(self.current_isolated_nodes),\n len(self.current_immune_nodes), len(self.current_alive_nodes)])",
"def statistics(self):\n return self.get_statistics()",
"def status(self):\n return self.state",
"def get_state(self):\r\n return self.currentObservation",
"def state(self):\n return self.coordinator.data[METER_DEVICE_TYPE][self.base_unique_id][METER_STATE]",
"def stats(self):\n return self._stats",
"def state(self):\n return self.probe.get_data(self.variable)",
"def state(self):\n return self.probe.get_data(self.variable)",
"def session_state():\n\n return state.summary()",
"def state(self):\n return self.device.status(station=self.station_number)",
"def state(self) -> float:\n return self.value",
"def state(self):\n return self._attributes['status']",
"def get_state(self):\n return self.env.sim.get_state()",
"def get_state(self):\n return self.fmu._get_continuous_states()",
"def getstate(self):\r\n return GPBase.getstate(self) + [self.Z,\r\n self.num_inducing,\r\n self.has_uncertain_inputs,\r\n self.X_variance]",
"def state(self):\n return self.get_state()",
"def device_state_attributes(self):\n return self._device.status",
"def event_stats(self):\n return self.base_stats",
"def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)",
"def state(self):\n appliance = self._coordinator.data[\"appliances\"][self._appliance_id]\n smart_meter = appliance[\"smart_meter\"]\n echonetlite_properties = smart_meter[\"echonetlite_properties\"]\n measured_instantaneous = next(\n value[\"val\"] for value in echonetlite_properties if value[\"epc\"] == 231\n )\n _LOGGER.debug(\"Current state: %sW\", measured_instantaneous)\n return measured_instantaneous",
"def status(self):\n self._refresh_state()\n return self._data.get('status')",
"def state(self):\n return self._current_value",
"def getstate(self):\r\n return Parameterized.getstate(self) + \\\r\n [self.priors, self.optimization_runs,\r\n self.sampling_runs, self.preferred_optimizer]",
"def current_state(self):\n curr_state = dict(\n logfile=os.path.basename(self.logfile),\n time=self.time,\n converged=self.converged,\n solve_completed=self.solve_completed,\n converged_time=self.converged_time,\n failed=self.failed,\n fields=list(self.res_files.keys()),\n bounding_fields=list(self.bound_files.keys()))\n return curr_state",
"def getStati(self):\n raise \"not implemented\""
]
| [
"0.71586376",
"0.6778751",
"0.6764442",
"0.67526835",
"0.6747207",
"0.6739327",
"0.66986823",
"0.66798747",
"0.664067",
"0.6600649",
"0.6561397",
"0.6514252",
"0.6514252",
"0.64937204",
"0.64810133",
"0.6475574",
"0.6457649",
"0.645014",
"0.6437783",
"0.643425",
"0.64169234",
"0.6404102",
"0.6390011",
"0.63861644",
"0.6377934",
"0.6374754",
"0.6373325",
"0.6344361",
"0.6341064",
"0.63371193"
]
| 0.71001214 | 1 |
Return statistics about the current state of this semaphore. | def statistics(self) -> SemaphoreStatistics:
return SemaphoreStatistics(len(self._waiters)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def statistics(self):\n return self._queue.statistics(self._name)",
"def statistics(self):\n return self._statistics",
"def statistics(self) -> LockStatistics:\n return LockStatistics(self.locked(), self._owner_task, len(self._waiters))",
"def statistics(self):\n return self.get_statistics()",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)",
"def stats(self):\n return self._stats",
"def stats(self):\n return self.rpc.call(MsfRpcMethod.CoreModuleStats)",
"def stats(self):\n\n res = self.read_block(REG_STATS, 9)\n\n ret = {\n \"completed_cycles\": (res[1] << 8) + (res[0] << 0),\n \"last_boot\": {\n \"retries\": res[2],\n \"duration\": (res[6] << 24) + (res[5] << 16) + (res[4] << 8) + (res[3] << 0)\n },\n \"forced_shutdowns\": (res[8] << 8) + (res[7] << 0)\n }\n\n return ret",
"def status( self ):\n duration = datetime.datetime.now() - self.startTime\n status = {\n 'start': self.startTime.isoformat(),\n 'now': datetime.datetime.now().isoformat(),\n 'duration': duration.total_seconds(),\n 'bookmark': 0,\n 'events': 0,\n 'cumulative_rate': 0,\n 'processes': [],\n 'state': {\n 'id': self.state,\n 'description': definitions.STATE_STRING[self.state]\n }\n }\n\n # Sending pipes to processes which are not running or shutting down\n # will lead to errors and deadlocks. Loop through to detect errors.\n if self.state == definitions.STATE_RUNNING:\n # Loop through all processes and just check we're running properly\n for proxy in self.processes:\n if not proxy.process.is_alive():\n self.logger.info( 'Process {0} is dead.'.format( proxy.name ))\n self.state = definitions.STATE_ERROR\n break\n\n if proxy.request( 'status' )['state'] == definitions.STATE_ERROR:\n self.logger.info( 'Process {0} state is {1}.'.format(\n proxy.name,\n definitions.STATE_STRING[ definitions.STATE_ERROR ]\n ))\n\n self.state = definitions.STATE_ERROR\n break\n\n # Now do the actual status checks\n if self.state == definitions.STATE_RUNNING:\n # Loop through processes in order\n for proxy in self.processes:\n response = proxy.request('status')\n\n proc = {\n 'name': proxy.name,\n 'pid': proxy.process.pid,\n 'count': response['count'],\n 'sleep': response['sleep']\n }\n\n status['events'] = proc['count']\n status['processes'].append( proc )\n\n if 'bookmark' in response:\n status['bookmark'] = response['bookmark']\n\n status['cumulative_rate'] = round(\n status['events'] / duration.total_seconds(), 2)\n\n return status",
"def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)",
"def event_stats(self):\n return self.base_stats",
"def stats(self):\n return {\"size\": 0, \"maxsize\": 0, \"hits\": 0, \"miss\": 0}",
"def get_stats(self):\n return self.stats",
"def statistics(self) -> ConditionStatistics:\n return ConditionStatistics(len(self._waiters), self._lock.statistics())",
"def get_statistics(self):\n\n return (self.func_id, self.instruction_count)",
"def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()",
"def get_stats(self):\n return self.manager.get_stats(self)",
"def getState(self):\r\n self._update('getState')\r\n\r\n state = self.supervisord.options.mood\r\n statename = getSupervisorStateDescription(state)\r\n data = {\r\n 'statecode':state,\r\n 'statename':statename,\r\n }\r\n return data",
"def getStats(self):\n\n raise NotImplementedError",
"def get_statistics(self):\n\t\treturn Job(SDK.PrlVm_GetStatistics(self.handle)[0])",
"def stats(self) -> Dict:\n return self._stats",
"def get_state(self):\n return self.agents, self.foods, self.viruses, self.masses, self.time",
"def state(self):\n return self._measure",
"def processStats(self):\n return self._processes.itervalues()",
"def session_state():\n\n return state.summary()",
"def QueueStatistics(self):\n return self._get_attribute('queueStatistics')",
"def status_counts(self):\n return self._status_counts",
"def state_summary(self) -> np.ndarray:\n return np.array([len(self.current_clear_nodes), self.n_current_infected, len(self.current_isolated_nodes),\n len(self.current_immune_nodes), len(self.current_alive_nodes)])",
"def _base_stats(self):\n usage = resource.getrusage(resource.RUSAGE_SELF)\n return {'host': self.application.host,\n 'port': self.application.port,\n 'requests': self.application.counters,\n 'timestamp': int(time.time()),\n 'block': {'input': usage.ru_inblock,\n 'output': usage.ru_oublock},\n 'context_switches': usage.ru_nvcsw + usage.ru_nivcsw,\n 'cpu_time': {'user': usage.ru_utime,\n 'system': usage.ru_stime},\n 'memory_usage': usage.ru_maxrss,\n 'page_faults': {'minor': usage.ru_minflt,\n 'major': usage.ru_majflt},\n 'page_size': resource.getpagesize(),\n 'signals_received': usage.ru_nsignals,\n 'swap_outs': usage.ru_nswap}"
]
| [
"0.67205733",
"0.6648728",
"0.65306365",
"0.6503779",
"0.6471665",
"0.644764",
"0.6430983",
"0.6360429",
"0.6350041",
"0.6304903",
"0.62686765",
"0.6248332",
"0.62444067",
"0.6222603",
"0.61656606",
"0.61591345",
"0.61494213",
"0.61186785",
"0.6094856",
"0.60945207",
"0.60824746",
"0.60678774",
"0.6046742",
"0.60459834",
"0.6018051",
"0.5985649",
"0.5971396",
"0.59626275",
"0.5955625",
"0.5937485"
]
| 0.7326038 | 0 |
Create an asynchronous lock. | def create_lock() -> Lock:
return Lock() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lock_object(self):\n return gevent.thread.allocate_lock()",
"def createLock(self):\n self.lock = stdoutlock",
"def acquire(self, blocking=True):\n ops = fcntl.LOCK_EX\n if not blocking:\n ops |= fcntl.LOCK_NB\n fcntl.flock(self.lock_file, ops)",
"def get_lock():\n\n return multiprocessing.Lock()",
"def acquire(self):\n retries = [0]\n self._acquire_start_seconds = self._reactor.seconds()\n\n def log_lock_acquired(result):\n self._lock_acquired_seconds = self._reactor.seconds()\n seconds = self._lock_acquired_seconds - self._acquire_start_seconds\n self._log.msg('Acquired lock in {0} seconds'.format(seconds),\n lock_acquire_time=seconds, **self._log_kwargs)\n return result\n\n def acquire_lock():\n d = self._write_lock()\n d.addCallback(self._read_lock)\n d.addCallback(self._verify_lock)\n if self._log:\n d.addCallback(log_lock_acquired)\n d.addErrback(lock_not_acquired)\n return d\n\n def lock_not_acquired(failure):\n failure.trap(BusyLockError, NoLockClaimsError)\n retries[0] += 1\n if retries[0] <= self._max_retry:\n return task.deferLater(self._reactor, self._retry_wait, acquire_lock)\n else:\n return failure\n\n def log_lock_acquire_failure(failure):\n if self._log:\n seconds = self._reactor.seconds() - self._acquire_start_seconds\n self._log.msg(\n 'Could not acquire lock in {0} seconds due to {1}'.format(seconds, failure),\n lock_acquire_fail_time=seconds, reason=failure, **self._log_kwargs)\n return failure\n\n return acquire_lock().addErrback(log_lock_acquire_failure)",
"def acquire(self, blocking=True, shared=False):",
"def get_lock():\n fh = None\n # We don't do anything unless --synchronous_name is set.\n if args.synchronous_name is not None:\n if not os.path.isdir(args.synchronization_dir):\n log('--synchronization_dir does not exist, attempting to create')\n os.mkdir(args.synchronization_dir)\n\n lock = os.path.join(args.synchronization_dir, args.synchronous_name)\n fh = open(lock, 'w')\n log('Acquiring lock on %s' % lock)\n if args.nonblocking:\n try:\n fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n log('We did not get the lock but --nonblocking is true; '\n 'exiting successfully')\n fh.close()\n sys.exit(0)\n else:\n # Wait indefinitely. Hopefully there is a timeout on the synchro.py\n # holding the lock.\n fcntl.flock(fh, fcntl.LOCK_EX)\n log('Lock acquired')\n return fh",
"async def acquire(self) -> None:\n await self._lock.acquire()\n self._owner_task = get_current_task()",
"async def async_lock(self, **kwargs: Any) -> None:\n if not await self._node.secure_lock():\n raise HomeAssistantError(f\"Unable to lock device {self._node.address}\")",
"def get_lock(self, name, try_=False):\n lock = Lock(self, name, try_)\n with lock as got_lock:\n yield got_lock",
"def acquire_lock(self):\n self._multistore._lock()",
"def create_lock(self, resource, **kwargs):\n lock = DistLock(resource=resource, created_by_factory=True, **kwargs)\n lock.redis_nodes = self.redis_nodes\n lock.quorum = self.quorum\n lock.factory = self\n return lock",
"def create_lock(self):\n self.check_lock()\n lockfile = open(self._lockfilename, \"w\")\n lockfile.write(str(os.getpid()))",
"def acquire_lock(self):\n if self.lock:\n self.lock.acquire()",
"async def __aenter__(self):\n self.acquired = True\n return self",
"def acquire(self):\n start_time = time.time()\n while True:\n try:\n self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n break\n except (OSError,) as e:\n if e.errno != errno.EEXIST:\n raise\n if (time.time() - start_time) >= self.timeout:\n raise FileLockException(f\"{self.lockfile}: Timeout occurred.\")\n time.sleep(self.delay)\n\n self.is_locked = True",
"def api_acquire(self):\n\n self._api_acquire_lock_with_timer()",
"def acquire_lock (self):\n\n self._exec (self.select)\n self.locked = True",
"def lock(self, name, timeout=None, sleep=0.1):\n return Lock(self, name, timeout=timeout, sleep=sleep)",
"def acquire_nowait(self) -> None:\n self._lock.acquire_nowait()\n self._owner_task = get_current_task()",
"def acquire(self):\n start_time = time.time()\n while True:\n # 当前文件锁对象未有加锁,执行加锁\n if self.fd is None:\n try:\n # 独占式打开文件\n lock_dir = os.path.dirname(self.lockfile)\n if not os.path.isdir(lock_dir):\n os.makedirs(lock_dir, exist_ok=True)\n self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n # 超时\n if (time.time() - start_time) >= self.timeout:\n raise FileLockException(\"Timeout occured.\")\n # 本次加锁失败,需要等待\n time.sleep(self.delay)\n self.is_locked = True",
"def acquire_lock_1(force, lock_file=None):\n if lock_file is None:\n lock_file = config.LOCK_FILE\n lock = Lock(lock_file, LOCK_LIFETIME)\n try:\n lock.lock(timedelta(seconds=0.1))\n return lock\n except TimeOutError:\n if not force:\n raise\n # Force removal of lock first.\n lock.disown()\n hostname, pid, tempfile = lock.details\n os.unlink(lock_file)\n return acquire_lock_1(force=False)",
"def task_schedule_new_fetch_and_lock():\n task_fetch_and_lock.delay()",
"def acquire_nowait(self) -> None:\n task = get_current_task()\n if self._owner_task == task:\n raise RuntimeError('Attempted to acquire an already held Lock')\n\n if self._owner_task is not None:\n raise WouldBlock\n\n self._owner_task = task",
"def rlock_object(self):\n return RLock()",
"def create_lock(self, lock_name):\n path = '/locks/create/%s' % lock_name\n response = self.rest.request(method='post',\n content_type='text/plain', path=path)\n return response.text",
"def lock(*args):",
"def test_require_lock():\n\n class TestClient(object):\n def __init__(self):\n # Lock attribute\n self._lock = Lock()\n\n @require_lock\n def action(self):\n return 42\n\n client = TestClient()\n client._lock.acquire()\n assert client.action() == 42",
"def lock(self):\n raise NotImplementedError",
"async def acquire(self) -> None:\n try:\n self.acquire_nowait()\n except WouldBlock:\n event = create_event()\n self._waiters.append(event)\n try:\n await event.wait()\n except BaseException:\n if not event.is_set():\n self._waiters.remove(event)\n\n raise\n\n self.acquire_nowait()"
]
| [
"0.6727348",
"0.65719485",
"0.6558888",
"0.6537412",
"0.6400549",
"0.63881797",
"0.6383415",
"0.63614863",
"0.6335667",
"0.6314012",
"0.6301484",
"0.6290461",
"0.62749565",
"0.62582034",
"0.62369066",
"0.6198255",
"0.6179671",
"0.6177639",
"0.6171515",
"0.6148447",
"0.6141186",
"0.61259323",
"0.6105548",
"0.60792685",
"0.60361505",
"0.6023775",
"0.5999889",
"0.5978455",
"0.59661746",
"0.59625375"
]
| 0.77168024 | 0 |
Create an asynchronous condition. | def create_condition(lock: Optional[Lock] = None) -> Condition:
return Condition(lock=lock) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_condition(callback, *args,\n sleep_step=0.001, max_wait_time=30, **kwargs):\n event = Event()\n condition = ConditionWaiter(event, callback, *args,\n sleep_step=sleep_step, **kwargs)\n condition.start()\n result = event.wait(max_wait_time)\n condition.stop()\n\n return result",
"def await_condition(description, condition_eval_callable, on_failure=lambda: True, timeout=10, poll_s=0.1):\n start_time = time.time()\n\n def should_continue():\n return time.time() - start_time < timeout\n\n while not condition_eval_callable():\n if not should_continue():\n on_failure()\n raise AssertionError(\n \"Awaiting condition {0} has timed out after {1} seconds\".format(description, timeout)\n )\n time.sleep(poll_s)",
"def await_condition(condition, timeout=2000):\n\n for _ in range(timeout):\n if condition():\n return True\n time.sleep(0.001)\n return False",
"def wait_for(self, condition, *args):\n start_time = int(time.time())\n while True:\n try:\n condition(*args)\n except Exception:\n pass\n else:\n return\n if int(time.time()) - start_time >= self.build_timeout:\n condition(*args)\n return\n time.sleep(self.build_interval)",
"def make_waitable(self):\n if not self.is_waitable():\n self._condition = threading.Condition()",
"def blocking_condition(self):\n return self.__blocking_condition",
"def _wait_until(cond: Callable[[], bool], timeout: float = 15, interval: float = 0.1):\n start = time()\n end = start + timeout\n while time() <= end:\n if cond() is True:\n return\n sleep(interval)\n\n raise AssertionError(\"Condition not true in {} seconds\".format(timeout))",
"def __init__(self, condition: typing.Callable[..., bool]):\n super().__init__()\n self.condition = condition",
"def condition(self) -> global___Expression:",
"def condition(self) -> global___Expression:",
"def wait_until(self, condition, timeout=None):\n if condition():\n return True\n t_start = time.time()\n while not condition():\n if timeout is not None and time.time() > t_start + timeout:\n return False\n if threading.current_thread() is self.kernel.parent.control_thread:\n # Wait for a reply on the comm channel.\n self.poll_one()\n else:\n # Wait 10ms for a reply\n time.sleep(0.01)\n return True",
"def _wait_on_condition(self, timeout):\n self.__condition.wait(timeout)",
"def __await__(self):\n return self.waiter.__await__()",
"def __await__(self):\n return self.waiter.__await__()",
"def _wait_for(self, check_func, desc, result=False, timeout=200):\r\n if result:\r\n return Promise(check_func, desc, timeout=timeout).fulfill()\r\n else:\r\n return EmptyPromise(check_func, desc, timeout=timeout).fulfill()",
"async def wait_for(self, predicate, timeout=None):\n await self._event.acquire()\n await asyncio.wait_for(\n self._event.wait_for(lambda: predicate(self)),\n timeout=timeout,\n loop=self._loop,\n )\n self._event.release()",
"def wait_for_condition(self,\n namespace,\n name,\n expected_conditions=[],\n timeout=datetime.timedelta(days=365),\n polling_interval=datetime.timedelta(seconds=30),\n status_callback=None):\n end_time = datetime.datetime.now() + timeout\n while True:\n try:\n results = self.client.get_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, name)\n except Exception as e:\n logger.error(\"There was a problem waiting for %s/%s %s in namespace %s; Exception: %s\",\n self.group, self.plural, name, namespace, e)\n raise\n\n if results:\n if status_callback:\n status_callback(results)\n expected, condition = self.is_expected_conditions(results, expected_conditions)\n if expected:\n logger.info(\"%s/%s %s in namespace %s has reached the expected condition: %s.\",\n self.group, self.plural, name, namespace, condition)\n return results\n else:\n if condition:\n logger.info(\"Current condition of %s/%s %s in namespace %s is %s.\",\n self.group, self.plural, name, namespace, condition)\n\n if datetime.datetime.now() + polling_interval > end_time:\n raise Exception(\n \"Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the \"\n \"conditions {4}.\".format(self.group, self.plural, name, namespace, expected_conditions))\n\n time.sleep(polling_interval.seconds)",
"def create_event() -> abc.Event:\n return get_asynclib().Event()",
"def until_true(condition, timeout, error_msg):\n timeout = timestr_to_secs(timeout)\n max_wait = time.time() + timeout\n while True:\n if condition():\n break\n if time.time() > max_wait:\n raise AssertionError(error_msg)\n time.sleep(0.1)",
"async def wait_for(\n self,\n event: str,\n *,\n check: Callable[..., bool] = return_true,\n timeout: float | None = None,\n ) -> Any:\n future = self.loop.create_future()\n\n event_lower = event.lower()\n try:\n listeners = self._listeners[event_lower]\n except KeyError:\n listeners = []\n self._listeners[event_lower] = listeners\n\n listeners.append((future, check))\n return await asyncio.wait_for(future, timeout)",
"def _wait_until(self, condition, timeout: TimeoutType = DEFAULT_TIMEOUT):\n if not timeout:\n timeout = 0\n return wait.WebDriverWait(self._webdriver, timeout).until(condition)",
"def execute(self, condition=None, timeout=90):\n if not self.stopped:\n if timeout:\n def timeout_func():\n try:\n raise Exception('Async operation timed out after {} seconds'.format(timeout))\n except:\n self.stop(failure=sys.exc_info())\n\n self.io_loop.add_timeout(time.time() + timeout, timeout_func)\n while True:\n self.running = True\n with NullContext():\n # Wipe out the StackContext that was established in\n # self.run() so that all callbacks executed inside the\n # IOLoop will re-run it.\n self.io_loop.start()\n if (self.failure is not None or\n condition is None or condition()):\n break\n assert self.stopped\n self.stopped = False\n if self.failure is not None:\n raise self.failure[0], self.failure[1], self.failure[2]\n result = self.return_value\n self.return_value = None\n return result",
"def wait_fluently(condition: Callable, timeout: TimeoutType, err_msg: str):\n if timeout is None:\n timeout = 0\n start_time = time.time()\n while True:\n res = condition()\n if res:\n return res\n if time.time() - start_time >= timeout:\n raise TimeoutException(err_msg)\n time.sleep(0.3)",
"def test_condition_waiter(self):\n\n class Holder(object):\n def __init__(self, start_value, max_value):\n self.value = start_value\n self.max_value = max_value\n self.num_calls = 0\n\n def my_check(holder):\n holder.num_calls += 1\n if holder.value == holder.max_value:\n return True\n holder.value += 1\n\n holder = Holder(1, 10)\n event = Event()\n condition = ConditionWaiter(event, my_check, holder)\n condition.start()\n self.assertTrue(event.wait(2))\n condition.stop()\n self.assertEqual(holder.num_calls, 10 - 1 + 1)\n\n holder = Holder(4, 10)\n event = Event()\n condition = ConditionWaiter(event, my_check, holder)\n condition.start()\n self.assertTrue(event.wait(3))\n condition.stop()\n self.assertEqual(holder.num_calls, 10 - 4 + 1)\n\n holder = Holder(1, 10)\n event = Event()\n condition = ConditionWaiter(event, my_check, holder)\n condition.start()\n self.assertFalse(event.wait(0.0001))\n condition.stop()",
"def create_condition(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n\n create_condition = {\n u\"type\": \"REQUEST\",\n u\"comment\": \"\",\n u\"name\": \"condition\",\n u\"version\": service_version,\n u\"service_id\": service_id,\n u\"statement\": request_dict['statement'],\n u\"priority\": request_dict['priority']\n }\n\n if 'condition_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['condition_list'] = []\n\n self.fastly_cache[service_id][\n 'condition_list'].append(create_condition)\n return create_condition",
"def Condition(condition_type,\n status=\"Unknown\",\n severity=\"\",\n msg=\"\",\n reason=\"\",\n last_transition_time=\"\"):\n condition = run_v1_messages.GoogleCloudRunV1Condition()\n condition.type = condition_type\n condition.status = status\n condition.severity = severity\n condition.message = msg\n condition.reason = reason\n condition.lastTransitionTime = last_transition_time\n return condition",
"def _blocking(self, timeout, func):\n ret = func(True)\n if ret is not None or self._in_transaction:\n return ret\n if timeout:\n deadline = time.time() + timeout\n else:\n deadline = None\n while True:\n timeout = deadline - time.time() if deadline is not None else None\n if timeout is not None and timeout <= 0:\n return None\n # Python <3.2 doesn't return a status from wait. On Python 3.2+\n # we bail out early on False.\n if self._db.condition.wait(timeout=timeout) is False:\n return None # Timeout expired\n ret = func(False)\n if ret is not None:\n return ret",
"def wait_condition(cond, timeout=1, sleeptime=.01):\n # NOTE Increasing sleeptime can dramatically increase testsuite runtime\n # It also reduces CPU load significantly\n if timeout is None:\n timeout = 1\n\n if timeout < sleeptime:\n print(\"Warning, timeout cannot be smaller than\", sleeptime)\n timeout = sleeptime\n\n # Max number of attempts until giving up\n tries = int(timeout / sleeptime)\n\n for i in range(tries):\n val = cond()\n\n if val is not None:\n break\n\n sleep(sleeptime)\n\n return val",
"def wait_condition(cond, timeout=1, sleeptime=.01):\n # NOTE Increasing sleeptime can dramatically increase testsuite runtime\n # It also reduces CPU load significantly\n if timeout is None:\n timeout = 1\n\n if timeout < sleeptime:\n print(\"Warning, timeout cannot be smaller than\", sleeptime)\n timeout = sleeptime\n\n # Max number of attempts until giving up\n tries = int(timeout / sleeptime)\n\n for i in range(tries):\n val = cond()\n\n if val is not None:\n break\n\n sleep(sleeptime)\n\n return val",
"def condition(self) -> ExpressionNode:\n return self.__condition"
]
| [
"0.63410366",
"0.62055475",
"0.6199077",
"0.6119846",
"0.60336334",
"0.59146494",
"0.58612967",
"0.5782969",
"0.56079465",
"0.56079465",
"0.5525224",
"0.5494368",
"0.54022723",
"0.54022723",
"0.53985673",
"0.53892046",
"0.53815883",
"0.5368199",
"0.53287435",
"0.5314211",
"0.53081465",
"0.5299201",
"0.52765477",
"0.52753115",
"0.52522373",
"0.5234079",
"0.5224385",
"0.52080965",
"0.52080965",
"0.5205053"
]
| 0.6838926 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.